From 654cd6c70c4ec9d2fb165e3596ff8a5c4ed13ebf Mon Sep 17 00:00:00 2001 From: Travis CI User Date: Sun, 26 Jan 2025 03:03:16 -0500 Subject: [PATCH] Preparing for https://github.com/turbonomic/kubeturbo-deploy --- .dockerignore | 3 + .golangci.yml | 40 + Dockerfile | 51 + Makefile | 639 +++++ PROJECT | 33 + README.md | 218 ++ api/v1/groupversion_info.go | 36 + api/v1/kubeturbo_types.go | 485 ++++ api/v1/zz_generated.deepcopy.go | 720 +++++ api/v1alpha1/groupversion_info.go | 36 + api/v1alpha1/kubeturbo_types.go | 64 + api/v1alpha1/zz_generated.deepcopy.go | 114 + .../manifests/bases/annotations.yaml | 11 + ...turbo-certified.clusterserviceversion.yaml | 233 ++ .../cluster_permissions_automation.py | 29 + .../manifests/kustomization.yaml | 4 + cmd/main.go | 211 ++ .../bases/charts.helm.k8s.io_kubeturbos.yaml | 1330 ++++++++++ config/crd/kustomization.yaml | 25 + config/crd/kustomizeconfig.yaml | 19 + .../crd/patches/api-approved-annotation.yaml | 7 + config/default/kustomization.yaml | 142 + config/default/manager_auth_proxy_patch.yaml | 39 + config/default/manager_config_patch.yaml | 10 + config/manager/kustomization.yaml | 8 + config/manager/manager.yaml | 112 + config/manifests/kustomization.yaml | 28 + config/prometheus/kustomization.yaml | 2 + config/prometheus/monitor.yaml | 25 + .../rbac/auth_proxy_client_clusterrole.yaml | 16 + config/rbac/auth_proxy_role.yaml | 24 + config/rbac/auth_proxy_role_binding.yaml | 19 + config/rbac/auth_proxy_service.yaml | 21 + .../rbac/kubeturbo-operator-cluster-role.yaml | 81 + config/rbac/kubeturbo_editor_role.yaml | 31 + config/rbac/kubeturbo_viewer_role.yaml | 27 + config/rbac/kustomization.yaml | 18 + config/rbac/leader_election_role.yaml | 44 + config/rbac/leader_election_role_binding.yaml | 19 + config/rbac/role.yaml | 32 + config/rbac/role_binding.yaml | 19 + config/rbac/service_account.yaml | 12 + config/samples/charts_v1_kubeturbo.yaml | 120 + config/samples/charts_v1alpha1_kubeturbo.yaml | 50 + config/samples/kustomization.yaml | 5 + config/scorecard/bases/config.yaml | 7 + config/scorecard/kustomization.yaml | 16 + config/scorecard/patches/basic.config.yaml | 10 + config/scorecard/patches/olm.config.yaml | 50 + deploy/README.md | 33 + deploy/kubeturbo/.helmignore | 22 + deploy/kubeturbo/Chart.yaml | 5 + deploy/kubeturbo/HELM_README.md | 16 + deploy/kubeturbo/templates/_helpers.tpl | 32 + deploy/kubeturbo/templates/configmap.yaml | 88 + deploy/kubeturbo/templates/deployment.yaml | 135 + .../kubeturbo/templates/serviceaccount.yaml | 238 ++ deploy/kubeturbo/values.yaml | 177 ++ .../kubeturbo_crd.yaml | 1330 ++++++++++ .../kubeturbo_operator_full.yaml | 330 +++ .../kubeturbo_operator_least_admin_full.yaml | 330 +++ .../kubeturbo_operator_reader_full.yaml | 330 +++ .../kubeturbo_sample_cr.yaml | 120 + .../operator-bundle.yaml | 1528 +++++++++++ deploy/kubeturbo_yamls/YAMLS_README.md | 26 + deploy/kubeturbo_yamls/kubeturbo_full.yaml | 237 ++ .../kubeturbo_least_admin_full.yaml | 373 +++ ...bo_namespace_turbo_credentials_secret.yaml | 18 + .../kubeturbo_reader_full.yaml | 305 +++ .../kubeturbo_yamls/pasadena_kubeturbo.yaml | 126 + .../step1_turbo_namespace.yaml | 7 + .../step2_turbo_serviceAccount_sample.yaml | 8 + ...erviceAccountRoleBinding_admin_sample.yaml | 22 + ...oleBinding_turbo-cluster-admin_sample.yaml | 22 + ...leBinding_turbo-cluster-reader_sample.yaml | 22 + .../step4_turbo_configMap_sample.yaml | 78 + .../step5_turbo_kubeturboDeploy.yaml | 135 + deploy/kubeturbo_yamls/turbo-admin.yaml | 124 + deploy/kubeturbo_yamls/turbo-reader.yaml | 67 + .../turbo_kubeturbo_operator_full.yaml | 330 +++ ...o_kubeturbo_operator_least_admin_full.yaml | 330 +++ .../turbo_kubeturbo_operator_reader_full.yaml | 330 +++ ...urbo_opsmgr_credentials_secret_sample.yaml | 11 + deploy/tsc_operator_yamls/README | 10 + .../tsc_operator_yamls/operator-bundle.yaml | 2318 +++++++++++++++++ go.mod | 73 + go.sum | 226 ++ hack/boilerplate.go.txt | 15 + internal/api/kubeturbo/reconciler.go | 753 ++++++ internal/api/kubeturbo/request.go | 59 + internal/constants/constants.go | 28 + internal/controller/kubeturbo_controller.go | 207 ++ .../controller/kubeturbo_controller_test.go | 84 + internal/controller/suite_test.go | 90 + internal/kubeturbo/annotator/annotator.go | 70 + internal/kubeturbo/informer/informer.go | 62 + internal/kubeturbo/types.go | 72 + internal/kubeturbo/zz_generated.deepcopy.go | 83 + internal/reconcile/reconcile.go | 45 + internal/request/request.go | 84 + internal/runnable/CRDCheck.go | 205 ++ internal/utils/errors.go | 16 + internal/utils/errors_test.go | 68 + internal/utils/json.go | 3 + internal/utils/mapbuilder.go | 33 + internal/utils/mapbuilder_test.go | 60 + internal/utils/pointers.go | 7 + internal/utils/pointers_test.go | 43 + .../utils/test/kubernetesclient/directives.go | 4 + .../kubernetesclientfakes/fake_client.go | 1019 ++++++++ internal/utils/utils.go | 37 + internal/utils/utils_suite_test.go | 13 + internal/utils/utils_test.go | 28 + scripts/download_tools.sh | 51 + scripts/export_yamls.sh | 111 + scripts/install_kubeturbo_via_operator.sh | 868 ++++++ scripts/kubeturbo_deployment_helm_test.sh | 100 + scripts/kubeturbo_deployment_yaml_test.sh | 117 + scripts/multi-node-kind-cluster.yaml | 7 + scripts/public_repo_update.sh | 105 + test/e2e/e2e_kt_reconcile.go | 258 ++ test/e2e/e2e_operator_lifecycle.go | 153 ++ test/e2e/e2e_suite_test.go | 32 + test/utils/utils.go | 430 +++ 124 files changed, 20627 insertions(+) create mode 100644 .dockerignore create mode 100644 .golangci.yml create mode 100644 Dockerfile create mode 100644 Makefile create mode 100644 PROJECT create mode 100644 README.md create mode 100644 api/v1/groupversion_info.go create mode 100644 api/v1/kubeturbo_types.go create mode 100644 api/v1/zz_generated.deepcopy.go create mode 100644 api/v1alpha1/groupversion_info.go create mode 100644 api/v1alpha1/kubeturbo_types.go create mode 100644 api/v1alpha1/zz_generated.deepcopy.go create mode 100644 certified-bundle-config/manifests/bases/annotations.yaml create mode 100644 certified-bundle-config/manifests/bases/kubeturbo-certified.clusterserviceversion.yaml create mode 100755 certified-bundle-config/manifests/cluster_permissions_automation.py create mode 100644 certified-bundle-config/manifests/kustomization.yaml create mode 100644 cmd/main.go create mode 100644 config/crd/bases/charts.helm.k8s.io_kubeturbos.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml create mode 100644 config/crd/patches/api-approved-annotation.yaml create mode 100644 config/default/kustomization.yaml create mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_config_patch.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml create mode 100644 config/manifests/kustomization.yaml create mode 100644 config/prometheus/kustomization.yaml create mode 100644 config/prometheus/monitor.yaml create mode 100644 config/rbac/auth_proxy_client_clusterrole.yaml create mode 100644 config/rbac/auth_proxy_role.yaml create mode 100644 config/rbac/auth_proxy_role_binding.yaml create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/kubeturbo-operator-cluster-role.yaml create mode 100644 config/rbac/kubeturbo_editor_role.yaml create mode 100644 config/rbac/kubeturbo_viewer_role.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml create mode 100644 config/rbac/leader_election_role_binding.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/role_binding.yaml create mode 100644 config/rbac/service_account.yaml create mode 100644 config/samples/charts_v1_kubeturbo.yaml create mode 100644 config/samples/charts_v1alpha1_kubeturbo.yaml create mode 100644 config/samples/kustomization.yaml create mode 100644 config/scorecard/bases/config.yaml create mode 100644 config/scorecard/kustomization.yaml create mode 100644 config/scorecard/patches/basic.config.yaml create mode 100644 config/scorecard/patches/olm.config.yaml create mode 100644 deploy/README.md create mode 100644 deploy/kubeturbo/.helmignore create mode 100644 deploy/kubeturbo/Chart.yaml create mode 100644 deploy/kubeturbo/HELM_README.md create mode 100644 deploy/kubeturbo/templates/_helpers.tpl create mode 100644 deploy/kubeturbo/templates/configmap.yaml create mode 100644 deploy/kubeturbo/templates/deployment.yaml create mode 100644 deploy/kubeturbo/templates/serviceaccount.yaml create mode 100644 deploy/kubeturbo/values.yaml create mode 100644 deploy/kubeturbo_operator_yamls/kubeturbo_crd.yaml create mode 100644 deploy/kubeturbo_operator_yamls/kubeturbo_operator_full.yaml create mode 100644 deploy/kubeturbo_operator_yamls/kubeturbo_operator_least_admin_full.yaml create mode 100644 deploy/kubeturbo_operator_yamls/kubeturbo_operator_reader_full.yaml create mode 100644 deploy/kubeturbo_operator_yamls/kubeturbo_sample_cr.yaml create mode 100644 deploy/kubeturbo_operator_yamls/operator-bundle.yaml create mode 100644 deploy/kubeturbo_yamls/YAMLS_README.md create mode 100644 deploy/kubeturbo_yamls/kubeturbo_full.yaml create mode 100644 deploy/kubeturbo_yamls/kubeturbo_least_admin_full.yaml create mode 100644 deploy/kubeturbo_yamls/kubeturbo_namespace_turbo_credentials_secret.yaml create mode 100644 deploy/kubeturbo_yamls/kubeturbo_reader_full.yaml create mode 100644 deploy/kubeturbo_yamls/pasadena_kubeturbo.yaml create mode 100644 deploy/kubeturbo_yamls/step1_turbo_namespace.yaml create mode 100644 deploy/kubeturbo_yamls/step2_turbo_serviceAccount_sample.yaml create mode 100644 deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_admin_sample.yaml create mode 100644 deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-admin_sample.yaml create mode 100644 deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-reader_sample.yaml create mode 100644 deploy/kubeturbo_yamls/step4_turbo_configMap_sample.yaml create mode 100644 deploy/kubeturbo_yamls/step5_turbo_kubeturboDeploy.yaml create mode 100644 deploy/kubeturbo_yamls/turbo-admin.yaml create mode 100644 deploy/kubeturbo_yamls/turbo-reader.yaml create mode 100644 deploy/kubeturbo_yamls/turbo_kubeturbo_operator_full.yaml create mode 100644 deploy/kubeturbo_yamls/turbo_kubeturbo_operator_least_admin_full.yaml create mode 100644 deploy/kubeturbo_yamls/turbo_kubeturbo_operator_reader_full.yaml create mode 100644 deploy/kubeturbo_yamls/turbo_opsmgr_credentials_secret_sample.yaml create mode 100644 deploy/tsc_operator_yamls/README create mode 100644 deploy/tsc_operator_yamls/operator-bundle.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hack/boilerplate.go.txt create mode 100644 internal/api/kubeturbo/reconciler.go create mode 100644 internal/api/kubeturbo/request.go create mode 100644 internal/constants/constants.go create mode 100644 internal/controller/kubeturbo_controller.go create mode 100644 internal/controller/kubeturbo_controller_test.go create mode 100644 internal/controller/suite_test.go create mode 100644 internal/kubeturbo/annotator/annotator.go create mode 100644 internal/kubeturbo/informer/informer.go create mode 100644 internal/kubeturbo/types.go create mode 100644 internal/kubeturbo/zz_generated.deepcopy.go create mode 100644 internal/reconcile/reconcile.go create mode 100644 internal/request/request.go create mode 100644 internal/runnable/CRDCheck.go create mode 100644 internal/utils/errors.go create mode 100644 internal/utils/errors_test.go create mode 100644 internal/utils/json.go create mode 100644 internal/utils/mapbuilder.go create mode 100644 internal/utils/mapbuilder_test.go create mode 100644 internal/utils/pointers.go create mode 100644 internal/utils/pointers_test.go create mode 100644 internal/utils/test/kubernetesclient/directives.go create mode 100644 internal/utils/test/kubernetesclient/kubernetesclientfakes/fake_client.go create mode 100644 internal/utils/utils.go create mode 100644 internal/utils/utils_suite_test.go create mode 100644 internal/utils/utils_test.go create mode 100644 scripts/download_tools.sh create mode 100755 scripts/export_yamls.sh create mode 100755 scripts/install_kubeturbo_via_operator.sh create mode 100755 scripts/kubeturbo_deployment_helm_test.sh create mode 100755 scripts/kubeturbo_deployment_yaml_test.sh create mode 100644 scripts/multi-node-kind-cluster.yaml create mode 100755 scripts/public_repo_update.sh create mode 100644 test/e2e/e2e_kt_reconcile.go create mode 100644 test/e2e/e2e_operator_lifecycle.go create mode 100644 test/e2e/e2e_suite_test.go create mode 100644 test/utils/utils.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..a3aab7a --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..aed8644 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,40 @@ +run: + deadline: 5m + allow-parallel-runners: true + +issues: + # don't skip warning about doc comments + # don't exclude the default set of lint + exclude-use-default: false + # restore some of the defaults + # (fill in the rest as needed) + exclude-rules: + - path: "api/*" + linters: + - lll + - path: "internal/*" + linters: + - dupl + - lll +linters: + disable-all: true + enable: + - dupl + - errcheck + - exportloopref + - goconst + - gocyclo + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - prealloc + - staticcheck + - typecheck + - unconvert + - unparam + - unused diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..62710a8 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,51 @@ +# Build the manager binary +FROM golang:1.21.8 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/ internal/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot + +# Required OpenShift Labels +ARG VERSION +ARG DEFAULT_KUBETURBO_VERSION +ENV DEFAULT_KUBETURBO_VERSION="${DEFAULT_KUBETURBO_VERSION}" +LABEL name="Kubeturbo Operator" \ + vendor="IBM" \ + version="${VERSION}" \ + release="${VERSION}" \ + summary="This is the kubeturbo operator." \ + description="This operator will deploy an instance of kubeturbo." \ +### Required labels above - recommended below + url="https://www.ibm.com/products/turbonomic" \ + io.k8s.description="Turbonomic Workload Automation Platform simultaneously optimizes performance, compliance, and cost in real-time. Workloads are precisely resourced, automatically, to perform while satisfying business constraints. " \ + io.k8s.display-name="Kubeturbo Operator" \ + io.openshift.expose-services="" \ + io.openshift.tags="turbonomic, Multicloud Container" + +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..1b592a8 --- /dev/null +++ b/Makefile @@ -0,0 +1,639 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +export VERSION ?= 8.15.1-SNAPSHOT +export DEFAULT_KUBETURBO_VERSION=$(shell echo $(VERSION) | sed -E 's/([1-9][0-9]*)\.([1-9][0-9]*)\.([0-9]+)00(.*)/\1.\2.\3\4/') + +# build info +REMOTE_URL=$(shell git config --get remote.origin.url) +GIT_COMMIT=$(shell git rev-parse HEAD) +BRANCH=$(shell git rev-parse --abbrev-ref HEAD) +REVISION=$(shell git show -s --format=%cd --date=format:'%Y%m%d%H%M%S000') +BUILD_TIMESTAMP=$(shell date +'%Y%m%d%H%M%S000') + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# *-bundle:$VERSION and *-catalog:$VERSION. +export REGISTRY ?= icr.io/cpopen +# temporarily adding "-new" surffix to help on transition, we might need to remove it in the end +export OPERATOR_NAME ?= kubeturbo-operator + +# Use this value to set the Docker image registry +ifneq ($(origin REGISTRY), undefined) +_REGISTRY_PREFIX := $(REGISTRY)/ +endif + +IMAGE_TAG_BASE ?= $(_REGISTRY_PREFIX)$(OPERATOR_NAME) + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.34.1 + +# Image URL to use all building/pushing image targets +IMG ?= $(IMAGE_TAG_BASE):$(VERSION) +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.28.3 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen kustomize ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=kubeturbo-operator crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(KUSTOMIZE) build config/crd -o config/crd/bases/charts.helm.k8s.io_kubeturbos.yaml + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: export_yaml +export_yaml: export_operator_yaml_bundle + sh ./scripts/export_yamls.sh + +export YAML_BUNDLE_DIR ?= deploy/kubeturbo_operator_yamls +.PHONY: export_operator_yaml_bundle +export_operator_yaml_bundle: manifests kustomize + mkdir -p $(YAML_BUNDLE_DIR) + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | sed 's|__NAMESPACE__|$(NAMESPACE)|g' > $(YAML_BUNDLE_DIR)/operator-bundle.yaml + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. +export TESTING_LOGGING_LEVEL ?= WARN +.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. +test-e2e: create-kind-cluster kubectl + go test ./test/e2e/ -v -ginkgo.v + +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint +GOLANGCI_LINT_VERSION ?= v1.54.2 +golangci-lint: $(LOCALBIN) + @[ -f $(GOLANGCI_LINT) ] || { \ + set -e ;\ + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell dirname $(GOLANGCI_LINT)) $(GOLANGCI_LINT_VERSION) ;\ + } + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter & yamllint + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +.PHONY: buildInfo +buildInfo: + $(shell test -f git.properties && rm -rf git.properties) + @echo 'turbo-version.remote.origin.url=$(REMOTE_URL)' >> git.properties + @echo 'turbo-version.commit.id=$(GIT_COMMIT)' >> git.properties + @echo 'turbo-version.branch=$(BRANCH)' >> git.properties + @echo 'turbo-version.branch.version=$(VERSION)' >> git.properties + @echo 'turbo-version.commit.time=$(REVISION)' >> git.properties + @echo 'turbo-version.build.time=$(BUILD_TIMESTAMP)' >> git.properties + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: docker-precheck ## Build docker image with the manager. + $(CONTAINER_TOOL) build --no-cache --build-arg VERSION=$(VERSION) --build-arg DEFAULT_KUBETURBO_VERSION=$(DEFAULT_KUBETURBO_VERSION) -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: docker-precheck ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name $(OPERATOR_NAME)-builder + $(CONTAINER_TOOL) buildx use $(OPERATOR_NAME)-builder + - $(CONTAINER_TOOL) buildx build --label "git-commit=$(GIT_COMMIT)" --label "git-version=$(VERSION)" --build-arg VERSION=$(VERSION) --build-arg DEFAULT_KUBETURBO_VERSION=$(DEFAULT_KUBETURBO_VERSION) --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm $(OPERATOR_NAME)-builder + rm Dockerfile.cross + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize kubectl ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) $(KUBECONFIG_STR) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize kubectl ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) $(KUBECONFIG_STR) delete --ignore-not-found=$(ignore-not-found) -f - + +# Developer Edit: added NAMESPACE variable and sed command +export NAMESPACE ?= turbo +.PHONY: deploy +deploy: manifests kustomize kubectl ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | sed 's|__NAMESPACE__|$(NAMESPACE)|g' | $(KUBECTL) $(KUBECONFIG_STR) apply -f - + +.PHONY: undeploy +undeploy: kustomize kubectl ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | sed 's|__NAMESPACE__|$(NAMESPACE)|g' | $(KUBECTL) $(KUBECONFIG_STR) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Build Dependencies + +## Location copy api folder and the internal folder for docker build +docker-precheck: + mkdir -p api + mkdir -p internal/controller/ + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries, those exported vars are required in test while using make cil +export KUBECTL ?= $(shell command -v kubectl >/dev/null 2>&1 && echo kubectl || echo $(LOCALBIN)/kubectl) +export KIND ?= $(shell command -v kind >/dev/null 2>&1 && echo kind || echo $(LOCALBIN)/kind) +export KIND_CLUSTER ?= $(OPERATOR_NAME)-kind +export KIND_KUBECONFIG ?= $(HOME)/.kube/kind-config +export HELM ?= $(LOCALBIN)/helm +export KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest + + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.2.1 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 + +.PHONY: kubectl +kubectl: $(LOCALBIN) ## Download kubectl locally if necessary. + @if ! command -v kubectl >/dev/null 2>&1 ; then \ + test -s $(LOCALBIN)/kubectl || \ + curl -Lo $(LOCALBIN)/kubectl "https://storage.googleapis.com/kubernetes-release/release/$(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/$(shell go env GOOS)/$(shell go env GOARCH)/kubectl" && \ + chmod +x "$(LOCALBIN)/kubectl"; \ + fi + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. +$(KUSTOMIZE): $(LOCALBIN) + @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ + echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ + rm -rf $(LOCALBIN)/kustomize; \ + fi + test -s $(LOCALBIN)/kustomize || GOBIN=$(LOCALBIN) GO111MODULE=on go install sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. +$(CONTROLLER_GEN): $(LOCALBIN) + test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ + GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. +$(ENVTEST): $(LOCALBIN) + test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + +.PHONY: kind +kind: $(LOCALBIN) ## Download kind locally if necessary. + @if ! command -v kind >/dev/null 2>&1 ; then \ + test -s $(LOCALBIN)/kind || GOBIN=$(LOCALBIN) go install sigs.k8s.io/kind@latest; \ + fi + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +YQ ?= $(LOCALBIN)/yq +YQ_TOOLS_VERSION ?= v4.30.4 + +.PHONY: yq +yq: $(YQ) ## Download yq locally if necessary. +$(YQ): $(LOCALBIN) + test -s $(LOCALBIN)/yq || GOBIN=$(LOCALBIN) go install github.com/mikefarah/yq/v4@$(YQ_TOOLS_VERSION) + + +PYTHON = $(LOCALBIN)/python3 + +python: $(PYTHON) ## Install Python locally if necessary. Darwin OS is specific to mac users if running locally +$(PYTHON): + @if ! command -v python3 >/dev/null 2>&1; then \ + mkdir -p $(LOCALBIN); \ + if [ `uname -s` = "Darwin" ]; then \ + brew install python@3; \ + else \ + sudo apt update && sudo apt install python3; \ + fi \ + fi + # Ensure the bin directory exists before linking + @mkdir -p $(LOCALBIN) + ln -sf `command -v python3` $(PYTHON) + + + +# This parameter adjusts the patch version of the operator release. It suffix the patch number with a zero(x.y.z - 'z' is the operator version patch number) +# This change in patch number incrementation strategy offers flexibility and room for post release fixes of operator bundle +OPERATOR_RELEASE_VERSION_PATCH := $(shell echo $(OPERATOR_RELEASE_VERSION) | sed -E 's/(^[0-9]+\.[0-9]+\.)([1-9])$$/\1\20/') +# This parameter adjusts the OLM inclusive range of the operator version +OPERATOR_OLM_INCLUSIVE_RANGE_VERSION ?= 8.7.5 +# This parameter adjusts the OLM inclusive range to prefix with '-beta.1' of operator version if the release channel is beta +OPERATOR_OLM_INCLUSIVE_BETA_VERSION ?= beta.1 +# This parameter is a place holder for 'beta' keyword +OPERATOR_BETA_RELEASE_FILTER ?= beta +# This parameter is a place holder for 'SNAPSHOT' keyword to pull the SNAPSHOT image version for beta release +OPERATOR_BETA_RELEASE_VERSION_SNAPSHOT ?= SNAPSHOT +OPERATOR_CERTIFIED ?= kubeturbo-certified +OPERATOR_BUNDLE_DIR ?= certified-operator-bundle +OPERATOR_BUNDLE_CONFIG_DIR ?= certified-bundle-config +# This is a path to copy the crd into operator bundle +OPERATOR_CRD_FILE_PATH ?= config/crd/bases/charts.helm.k8s.io_kubeturbos.yaml +# This is a path to copy cluster role permission into csv +CLUSTER_PERMISSION_ROLE_YAML_FILE_PATH ?= config/rbac/kubeturbo-operator-cluster-role.yaml +CERTIFIED_OPERATOR_CLUSTER_SERVICE_VERSION_YAML_FILE_PATH ?= $(OPERATOR_BUNDLE_DIR)/manifests/kubeturbo-certified.clusterserviceversion.yaml +# This is a path to github repo to verify the existing operator bundle versions released +GITHUB_REPO_URL := https://api.github.com/repos/turbonomic/certified-operators/contents/operators/kubeturbo-certified +.PHONY: build-certified-operator-bundle +build-certified-operator-bundle:yq python operator-sdk verify_bundle_creation_parameters create_certified_operator_bundle_directory update_image_digest_in_operator_bundle update_operator_version_and_olm_skipRange_in_operator_bundle update_cluster_permissions_in_operator_bundle update_release_channel_in_operator_bundle validate_operator_bundle +## Verify bundle creation parameters +.PHONY: verify_bundle_creation_parameters +verify_bundle_creation_parameters: verify_operator_release_versions verify_operator_release_channel verify_stable_operator_release_version verify_image_digest_version +## Verify either the operator release version or the operator release version patch is not empty +verify_operator_release_versions: + @if [ -z "$(OPERATOR_RELEASE_VERSION)" ] || [ -z "$(OPERATOR_RELEASE_VERSION_PATCH)" ]; then \ + echo "Error: The operator release version is empty, cannot proceed with $(OPERATOR_CERTIFIED)-operator bundle release."; \ + exit 1; \ + fi +## verify operator release channel, to allow only valid releases +verify_operator_release_channel: + ifneq ($(filter $(OPERATOR_RELEASE_CHANNEL),stable beta),$(OPERATOR_RELEASE_CHANNEL)) + $(error Invalid operator release channel parameter - $(OPERATOR_RELEASE_CHANNEL). valid release channels are either "stable" or "beta only".) + endif +## verify operator release version on stable channel, to avoid multiple releases of same version +verify_stable_operator_release_version: + if [ "$(OPERATOR_RELEASE_CHANNEL)" = "stable" ]; then \ + echo "Checking if the stable release version $(OPERATOR_RELEASE_VERSION_PATCH) exists..."; \ + if [ -n "$$(curl -s "$(GITHUB_REPO_URL)" | jq -r 'map(select(.type == "dir" and .name == "$(OPERATOR_RELEASE_VERSION_PATCH)")) | .[].name')" ]; then \ + echo "Error: The operator release version already exists for stable channel: $(OPERATOR_RELEASE_VERSION_PATCH)."; \ + exit 1; \ + fi; \ + fi +## verify if the version field value is present in the image to proceed, if empty exit the execution +verify_image_digest_version: + @echo "Verify Image Digest version field value" + $(eval OPERATOR_IMAGE_RELEASE_VERSION := $(if $(filter $(OPERATOR_BETA_RELEASE_FILTER),$(OPERATOR_RELEASE_CHANNEL)),$(OPERATOR_RELEASE_VERSION)-$(OPERATOR_BETA_RELEASE_VERSION_SNAPSHOT),$(OPERATOR_RELEASE_VERSION))) + docker pull $(REGISTRY)/$(OPERATOR_NAME):$(OPERATOR_IMAGE_RELEASE_VERSION) + version=$$(docker inspect $(REGISTRY)/$(OPERATOR_NAME):$(OPERATOR_IMAGE_RELEASE_VERSION) | grep '"version":' | awk '{print $$2}' | tr -d '",'); \ + if [ -z "$$version" ]; then \ + echo "Error: Image digest version field is empty, cannot procced with $(OPERATOR_CERTIFIED)-operator bundle release."; \ + exit 1; \ + elif [ "$$version" != "$(OPERATOR_IMAGE_RELEASE_VERSION)" ]; then \ + echo "Error: Image digest version field: ($$version) does not match operator release version: ($(OPERATOR_IMAGE_RELEASE_VERSION))."; \ + exit 1; \ + else \ + echo "Image digest validation successful, proceeding with next steps."; \ + fi + +## create certified operator bundle dir and copy the base files to update the clusterserviceversion and metadata contents as required for releasing +create_certified_operator_bundle_directory: + @echo "Creating certified operator bundle files for $(OPERATOR_CERTIFIED) clusterserviceversion..." + mkdir -p $(OPERATOR_BUNDLE_DIR)/manifests/ + cp $(OPERATOR_CRD_FILE_PATH) $(OPERATOR_BUNDLE_DIR)/manifests/kubeturbos.charts.helm.k8s.io.crd.yaml + cp $(OPERATOR_BUNDLE_CONFIG_DIR)/manifests/bases/$(OPERATOR_CERTIFIED).clusterserviceversion.yaml $(OPERATOR_BUNDLE_DIR)/manifests/$(OPERATOR_CERTIFIED).clusterserviceversion.yaml + mkdir -p $(OPERATOR_BUNDLE_DIR)/metadata/ + cp $(OPERATOR_BUNDLE_CONFIG_DIR)/manifests/bases/annotations.yaml $(OPERATOR_BUNDLE_DIR)/metadata/annotations.yaml + @echo "$(OPERATOR_CERTIFIED)-operator bundle directory created successfully." + +## update image digest key +update_image_digest_in_operator_bundle: + @echo "Updating image digest in $(OPERATOR_CERTIFIED)-clusterserviceversion..." + $(eval OPERATOR_IMAGE_RELEASE_VERSION := $(if $(filter $(OPERATOR_BETA_RELEASE_FILTER),$(OPERATOR_RELEASE_CHANNEL)),$(OPERATOR_RELEASE_VERSION)-$(OPERATOR_BETA_RELEASE_VERSION_SNAPSHOT),$(OPERATOR_RELEASE_VERSION))) + digest=$$(docker inspect --format='{{index .RepoDigests 0}}' $(REGISTRY)/$(OPERATOR_NAME):$(OPERATOR_IMAGE_RELEASE_VERSION) | awk -F@ '{print $$2}'); \ + if [ -z "$$digest" ]; then \ + echo "Error: Image digest is empty, cannot proceed with $(OPERATOR_CERTIFIED)-operator bundle release."; \ + exit 1; \ + else \ + $(YQ) eval -i '.spec.install.spec.deployments[0].spec.template.spec.containers[0].image |= sub("sha256:.*", "'$$digest'") | .spec.relatedImages[0].image |= sub("sha256:.*", "'$$digest'")' \ + $(OPERATOR_BUNDLE_DIR)/manifests/$(OPERATOR_CERTIFIED).clusterserviceversion.yaml; \ + echo "$(OPERATOR_CERTIFIED)-clusterserviceversion image digest updated."; \ + fi + +## Update release version, olm.skipRange, and check beta release version +update_operator_version_and_olm_skipRange_in_operator_bundle: + @echo "Checking the minor version in the beta release channel, and updating release versions as well as the olm.skipRange value in $(OPERATOR_CERTIFIED)-clusterserviceversion..." +## Check the release candidate version for the beta channel; if it exists, increment the minor verion(beta.x). + $(eval OPERATOR_VERSION_BETA := $(shell \ + OPERATOR_BETA_RELEASE_MINOR_VERSION=$$(curl -s "$(GITHUB_REPO_URL)" | \ + jq -r 'map(select(.type == "dir")) | .[].name | match("$(OPERATOR_RELEASE_VERSION_PATCH)-beta\\.[0-9]+") | try .string catch "0"' | \ + awk -F'.' 'BEGIN{max=0} {n=substr($$0, index($$0, "beta.")+5)+0; if (n>max) max=n} END{print max}'); \ + if [ -z "$$OPERATOR_BETA_RELEASE_MINOR_VERSION" ]; then \ + OPERATOR_BETA_RELEASE_MINOR_VERSION=1; \ + else \ + OPERATOR_BETA_RELEASE_MINOR_VERSION=$$(($$OPERATOR_BETA_RELEASE_MINOR_VERSION + 1)); \ + fi; \ + OPERATOR_VERSION_BETA=beta.$$OPERATOR_BETA_RELEASE_MINOR_VERSION; \ + echo "$$OPERATOR_VERSION_BETA" \ + )) + $(eval OPERATOR_RELEASE_CHANNEL_VERSION := $(if $(filter $(OPERATOR_BETA_RELEASE_FILTER),$(OPERATOR_RELEASE_CHANNEL)),$(OPERATOR_RELEASE_VERSION_PATCH)-$(OPERATOR_VERSION_BETA),$(OPERATOR_RELEASE_VERSION_PATCH))) + $(eval OLMRANGE_LOWER_BOUND := $(if $(filter $(OPERATOR_BETA_RELEASE_FILTER),$(OPERATOR_RELEASE_CHANNEL)),$(OPERATOR_OLM_INCLUSIVE_RANGE_VERSION)-$(OPERATOR_OLM_INCLUSIVE_BETA_VERSION),$(OPERATOR_OLM_INCLUSIVE_RANGE_VERSION))) + $(eval OLMRANGE_UPPER_BOUND := $(if $(filter $(OPERATOR_BETA_RELEASE_FILTER),$(OPERATOR_RELEASE_CHANNEL)),$(OPERATOR_RELEASE_VERSION_PATCH)-$(OPERATOR_VERSION_BETA),$(OPERATOR_RELEASE_VERSION_PATCH))) +## Update the release version in clusterserviceversion + $(YQ) eval -i '.metadata.name |= sub("kubeturbo-operator.v.*", "kubeturbo-operator.v$(OPERATOR_RELEASE_CHANNEL_VERSION)") | .spec.version = "$(OPERATOR_RELEASE_CHANNEL_VERSION)"' \ + $(OPERATOR_BUNDLE_DIR)/manifests/$(OPERATOR_CERTIFIED).clusterserviceversion.yaml +## Update skipRange based on inclusive and exclusive release versions set in clusterserviceversion + $(YQ) eval -i '.metadata.annotations."olm.skipRange" |= sub(">=[^<]+", ">=$(OLMRANGE_LOWER_BOUND)") | .metadata.annotations."olm.skipRange" |= sub("<[^<]+", " <$(OLMRANGE_UPPER_BOUND)")' \ + $(OPERATOR_BUNDLE_DIR)/manifests/$(OPERATOR_CERTIFIED).clusterserviceversion.yaml + @echo "$(OPERATOR_CERTIFIED)-clusterserviceversion release version, olm.skipRange are updated successfully." + +## update cluster permissions roles +update_cluster_permissions_in_operator_bundle: + @echo "Updating cluster permissions roles in $(OPERATOR_CERTIFIED)-clusterserviceversion..." + $(PYTHON) $(OPERATOR_BUNDLE_CONFIG_DIR)/manifests/cluster_permissions_automation.py \ + $(CLUSTER_PERMISSION_ROLE_YAML_FILE_PATH) \ + $(CERTIFIED_OPERATOR_CLUSTER_SERVICE_VERSION_YAML_FILE_PATH) + @echo "$(OPERATOR_CERTIFIED)-clusterserviceversion cluster permissions roles updated successfully." + +## update release channel +update_release_channel_in_operator_bundle: + @echo "Updating release channel in $(OPERATOR_CERTIFIED)-annotations..." + $(YQ) eval -i '.annotations."operators.operatorframework.io.bundle.channels.v1" = "$(OPERATOR_RELEASE_CHANNEL)"' \ + $(OPERATOR_BUNDLE_DIR)/metadata/annotations.yaml + @echo "$(OPERATOR_CERTIFIED)-annotations release channel updated successfully." + +## validate operator bundle +validate_operator_bundle: + @echo "Validating $(OPERATOR_BUNDLE_DIR) through Operator SDK:$(OPERATOR_SDK_VERSION) for compliance and correctness..." + $(OPERATOR_SDK) bundle validate ./$(OPERATOR_BUNDLE_DIR) + + +.PHONY: opm +OPM = $(LOCALBIN)/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) + +## Custom targets + +# Create a kind cluster if not exist +.PHONY: create-kind-cluster +create-kind-cluster: kind + $(KIND) get clusters | grep "^$(KIND_CLUSTER)$$" || \ + $(KIND) create cluster \ + --name $(KIND_CLUSTER) \ + --kubeconfig $(KIND_KUBECONFIG) \ + --config ./scripts/multi-node-kind-cluster.yaml + +.PHONY: describe-vars +describe-vars: + # REGISTRY: $(REGISTRY) + # OPERATOR_NAME: $(OPERATOR_NAME) + # VERSION: $(VERSION) + # NAMESPACE: $(NAMESPACE) + # KUBECTL: $(KUBECTL) + # KIND: $(KIND) + # KIND_CLUSTER: $(KIND_CLUSTER) + # KIND_KUBECONFIG: $(KIND_KUBECONFIG) + +.PHONY: go-mod-tidy +go-mod-tidy: ## Add missing and remove unused Go modules + go mod tidy + +.PHONY: go-generate +go-generate: ## Run go code generation + go get github.com/maxbrunsfeld/counterfeiter/v6 + go generate ./... + +.PHONY: git-check-generated-items +git-check-generated-items: manifests generate export_yaml run-shellcheck + @echo "Checking if all 'make manifests generate export_yaml' items are commited ..." + $(eval result = $(shell git status --untracked-files=all | grep -oE '\s+(config|api|deploy)(/[a-zA-Z0-9._-]+)*' | sed -e 's/\t//g' -e 's/ //g')) + @if [[ -n "$(result)" ]] ; then \ + echo "Here are some uncommitted auto-generated files:"; \ + for it in $(result); do echo "* $$it"; done; \ + echo "Please run 'make git-check-generated-items' before pushing the branch."; \ + exit 1; \ + fi + @echo "Done for checking auto-generated files!" + +.PHONY: helm +helm: $(LOCALBIN) ## Download helm locally if necessary. + @if ! command -v $(HELM) >/dev/null 2>&1 ; then \ + OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ + curl -Lo "helm-v3.16.1-$${OS}-$${ARCH}.tar.gz" "https://get.helm.sh/helm-v3.16.1-$${OS}-$${ARCH}.tar.gz" && \ + tar -zxvf "helm-v3.16.1-$${OS}-$${ARCH}.tar.gz" && \ + mv $${OS}-$${ARCH}/helm $(LOCALBIN)/ && \ + rm -f "helm-v3.16.1-$${OS}-$${ARCH}.tar.gz" && \ + rm -rf "$${OS}-$${ARCH}/"; \ + fi + $(HELM) version + +HELM_LINTER := docker run --rm --workdir=/data --volume $(shell pwd):/data quay.io/helmpack/chart-testing:v3.11.0 ct + +.PHONY:helm-lint +helm-lint: + $(HELM_LINTER) lint --charts deploy/kubeturbo --validate-maintainers=false --target-branch staging + +.PHONY:public-repo-update +public-repo-update: helm + @if [[ "$(VERSION)" =~ ^[0-9]+\.[0-9]+\.[0-9]+$$ ]] ; then \ + ./scripts/public_repo_update.sh ${VERSION}; \ + fi + +.PHONY: helm-test +helm-test: helm-lint helm create-kind-cluster kubectl + VERSION=${DEFAULT_KUBETURBO_VERSION} KUBECONFIG=${KIND_KUBECONFIG} ./scripts/kubeturbo_deployment_helm_test.sh + +.PHONY: yaml-test +yaml-test: create-kind-cluster kubectl + VERSION=${DEFAULT_KUBETURBO_VERSION} KUBECONFIG=${KIND_KUBECONFIG} ./scripts/kubeturbo_deployment_yaml_test.sh +# Minimum severity of errors to consider (error, warning, info, style) +SHELLCHECK_SEVERITY ?= "warning" +SHELLCHECK_FOLDER ?= "scripts" +SHELLCHECK ?= $(LOCALBIN)/shellcheck + +.PHONY: shellcheck +shellcheck: $(SHELLCHECK) ## Download shellcheck locally if necessary. + +# shellcheck tool refers to https://github.com/koalaman/shellcheck +$(SHELLCHECK): $(LOCALBIN) + LOCALBIN=$(LOCALBIN) sh ./scripts/download_tools.sh + +.PHONY: run-shellcheck +run-shellcheck: shellcheck ## Run shellcheck against bash files to check the error syntax + @echo "Running shellcheck against all *.sh files under the $(SHELLCHECK_FOLDER) folder..." + @echo "Note: In case of failure, please run 'make run-shellcheck' for more info!" + @find $(SHELLCHECK_FOLDER) -type f -name "*.sh" -exec $(SHELLCHECK) --severity=$(SHELLCHECK_SEVERITY) {} +; + @echo "Done for shellcheck scan!" + +.PHONY: run-shellcheck-docker ## In case shellcheck cil not working then use docker image for instead +run-shellcheck-docker: ## Run shellcheck against bash files to check the error syntax using docker image + @echo "Running shellcheck against all *.sh files under the $(SHELLCHECK_FOLDER) folder..." + @find $(SHELLCHECK_FOLDER) -type f -name "*.sh" -exec docker run --rm -v "$(shell pwd):/mnt" koalaman/shellcheck:stable --severity=$(SHELLCHECK_SEVERITY) {} +; + @echo "Done for shellcheck scan!" + +.PHONY: CI +CI: docker-build + +CD: docker-buildx diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000..7a33d85 --- /dev/null +++ b/PROJECT @@ -0,0 +1,33 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: helm.k8s.io +layout: +- go.kubebuilder.io/v4 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: kubeturbo-deploy +repo: github.ibm.com/turbonomic/kubeturbo-deploy +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: helm.k8s.io + group: charts + kind: Kubeturbo + path: github.ibm.com/turbonomic/kubeturbo-deploy/api/v1 + plural: kubeturbos + version: v1 +- api: + crdVersion: v1 + namespaced: true + domain: helm.k8s.io + group: charts + kind: Kubeturbo + path: github.ibm.com/turbonomic/kubeturbo-deploy/api/v1alpha1 + plural: kubeturbos + version: v1alpha1 +version: "3" diff --git a/README.md b/README.md new file mode 100644 index 0000000..cfdfc1e --- /dev/null +++ b/README.md @@ -0,0 +1,218 @@ +# kubeturbo-deploy + +[![Made with Operator SDK](https://img.shields.io/badge/Made%20with-Operator%20SDK%20-EE0000?logo=data:image/svg%2bxml;base64,PHN2ZyBjbGFzcz0iYmkgYmktbGlnaHRuaW5nLWNoYXJnZS1maWxsIiBmaWxsPSIjRkZGIiBoZWlnaHQ9IjE2IiB2aWV3Qm94PSIwIDAgMTYgMTYiIHdpZHRoPSIxNiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj48cGF0aCBkPSJNMTEuMjUxLjA2OGEuNS41IDAgMCAxIC4yMjcuNThMOS42NzcgNi41SDEzYS41LjUgMCAwIDEgLjM2NC44NDNsLTggOC41YS41LjUgMCAwIDEtLjg0Mi0uNDlMNi4zMjMgOS41SDNhLjUuNSAwIDAgMS0uMzY0LS44NDNsOC04LjVhLjUuNSAwIDAgMSAuNjE1LS4wOXoiLz48L3N2Zz4=)](https://sdk.operatorframework.io/) [![Operator SDK Version](https://img.shields.io/badge/Operator%20SDK%20version-1.34.1%20-EE0000)](https://github.com/operator-framework/operator-sdk/releases/tag/v1.34.1) [![Go Version](https://img.shields.io/badge/Go%20version-1.21.8%20-00ADD8)](https://go.dev/) + +## Description + +This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) and this operator is used to install [Kubeturbo](https://github.com/turbonomic/kubeturbo/tree/master/deploy/kubeturbo). + +It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) +which provides a reconcile function responsible for synchronizing resources until the desired state is reached on the cluster + + +## Getting Started + +You’ll need a Kubernetes cluster to run against. It is recommended that you run against a remote cluster. + +**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). + +If you are using a Turbonomic VM, it is also recommended that you use your VM's docker environment when building the Operator docker image. + +### Running on the cluster + +1. Build the Operator image: + +```sh +# Build local docker image +make docker-build + +# Build multi arch image and push to registry +make docker-buildx +``` + +2. Deploy the Operator to the cluster. You can set the docker image version by setting the following environment variable before running the commands. + +If `VERSION` is not set, the default value from the Makefile will be used. See the Makefile for other variables that can be set, such as `OPERATOR_NAME`, `REGISTRY` and `NAMESPACE`.: + +```sh +# Deploy operator with the latest push tag +make deploy + +# example of deploying the operator in turbo ns with a specific version +NAMESPACE=turbo VERSION=tmp-8.12.5-SNAPSHOT make deploy +``` + +**Note:** This assumes that the Operator docker image is accessible to the cluster + +3. Install a Custom Resource instance: + +Since the customize resources definition might not be available at the cluster you +work with, it's not a bad idea to always install the CRDs before applying your CR: + +```sh +make install +``` + +Sample Kubeturbo CR can be found in `config/samples/`. You can also create your own Kubeturbo CR of your choice. Install the instance using `kubectl apply`, e.g.: + +```sh +kubectl apply -f +``` + +#### Uninstall CRDs + +To delete the CRDs from the cluster: + +```sh +make uninstall +``` + +#### Undeploy Operator + +UnDeploy the operator to the cluster: + +```sh +make undeploy + +# Similar to make deploy, if you specific to install the operator +# in a specific namespace please export that namespace as well +NAMESPACE=turbo make undeploy +``` + +### Running the go based operator on local machine + +1. Install the CRDs into the cluster: + +```sh +make install +``` + +2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + +```sh +make run +``` + +**NOTE:** You can also run this in one step by running: `make install run` + +You can also use your IDE to run the operator, here is a sample for VSCode for debug purposes + +```json +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Debug Kubeturbo operator", + "type": "go", + "request": "launch", + "mode": "debug", + "program": "${workspaceFolder}/cmd", + "args": [], + "env": { + "WATCH_NAMESPACE": "turbonomic" + }, + } + ] +} +``` + +#### Testing e2e + +The end to end testing is to test if the Kubeturbo operator can be deploy successfully and to verify if the Kubeturbo can be applied as well. The test utilizes to use `Kind` cluster a the host cluster and then : +1. do `make install deploy` to install the operator +2. check if the operator is running +3. [deploy the sample Kubeturbo CR](https://github.ibm.com/turbonomic/kubeturbo-deploy/blob/staging/config/samples/charts_v1_kubeturbo.yaml) in the same namespace where the operator locates +4. + +You will need to start a kind cluster before running the e2e test +```bash +# install required tools +make kind kubectl + +# +make describe-vars +# REGISTRY: icr.io/cpopen +# OPERATOR_NAME: kubeturbo-operator +# VERSION: tmp-8.12.5-SNAPSHOT +# NAMESPACE: turbonomic +# KUBECTL: kubectl +# KIND: /root/repo/kubeturbo-deploy/bin/kind +# KIND_CLUSTER: kubeturbo-operator-kind +# KIND_KUBECONFIG: /root/.kube/kind-config + +# KIND_CLUSTER is optional, and it's default to kind +KIND_CLUSTER= make create-kind-cluster +``` + +To run the e2e test, you can simply do `make e2e-test` + +To run the e2e test in a debug mode you can use the following config in your VSCode +```json +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Debug e2e test", + "type": "go", + "request": "launch", + "mode": "test", + "program": "${workspaceFolder}/test/e2e", + "args": [ + "-ginkgo.v" + ], + "env": { + "KUBECTL": "", + "KIND": "", + "KIND_CLUSTER": "", + "KIND_KUBECONFIG": "" + }, + }, + ] +} +``` + + +### Modifying the API definitions + +If you are editing the API definitions, generate the manifests such as CRs or CRDs using: + +```sh +make manifests +``` + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +### Educational Resources + +- https://sdk.operatorframework.io/docs/building-operators/golang/ +- https://book.kubebuilder.io/ +- https://book.kubebuilder.io/reference/markers/crd.html +- https://book.kubebuilder.io/reference/markers/rbac.html +- https://kubectl.docs.kubernetes.io/references/kustomize/ +- https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +- https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/ +- https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.0.md diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go new file mode 100644 index 0000000..498e4dd --- /dev/null +++ b/api/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the charts v1 API group +// +kubebuilder:object:generate=true +// +groupName=charts.helm.k8s.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "charts.helm.k8s.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1/kubeturbo_types.go b/api/v1/kubeturbo_types.go new file mode 100644 index 0000000..3948dec --- /dev/null +++ b/api/v1/kubeturbo_types.go @@ -0,0 +1,485 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "reflect" + "strings" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/utils" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + RoleTypeReadOnly string = "turbo-cluster-reader" + RoleTypeAdmin string = "turbo-cluster-admin" + RoleTypeClusterAdmin string = "cluster-admin" + DefaultVersion string = "VERSION" + DefaultAnnotationKey string = "kubeturbo.io/controllable" + DefaultAnnotationVal string = "false" +) + +var ( + defaultKtVersion = "" + defaultSysWlNsPatterns = []string{"kube-.*", "openshift-.*", "cattle.*"} +) + +// NB - if a block is marked as omitempty, it must be given a default value in order for inner fields to be populated with their defaults +// a single inner field value will suffice + +// KubeturboSpec defines the desired state of Kubeturbo +type KubeturboSpec struct { + // You can use this configuration to define how daemon pods are identified. + // Note if you do not enable daemonPodDetectors, the default is to identify all pods running as kind = daemonSet + // Any entry for daemonPodDetectors would overwrite default. Recommend you do not use this parameter. + // +kubebuilder:default={} + DaemonPodDetectors DaemonPodDetectors `json:"daemonPodDetectors,omitempty"` // no default + // The annotationWhitelist allows users to define regular expressions to allow kubeturbo to collect + // matching annotations for the specified entity type. By default, no annotations are collected. + // These regular expressions accept the RE2 syntax (except for \C) as defined here: https://github.com/google/re2/wiki/Syntax + AnnotationWhitelist AnnotationWhitelist `json:"annotationWhitelist,omitempty"` // no default + // +kubebuilder:default={"kubeturbo.io/controllable":"false"} + Annotations map[string]string `json:"annotations,omitempty"` // default: kubeturbo.io/controllable: "false" + + // Specify 'turbo-cluster-reader' or 'turbo-cluster-admin' as role name instead of the default using + // the 'cluster-admin' role. A cluster role with this name will be created during deployment + // If using a role name other than the pre-defined role names, cluster role will not be created. This role should be + // existing in the cluster and should have the necessary permissions required for kubeturbo to work accurately. + // +kubebuilder:default=cluster-admin + // +kubebuilder:validation:Pattern="^[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?::[a-z0-9](?:[-a-z0-9]*[a-z0-9])?)*$" + RoleName string `json:"roleName,omitempty"` + + // The name of cluster role binding. Default is turbo-all-binding. If role binding is updated from an existing kubeturbo instance, + // the operator will not delete the existing role binding in the clsuter. Therefore, the user may want to manually delete the old + // clusterrolebinding from the cluster so that the service account is no longer tied to the previous role binding. + // +kubebuilder:default=turbo-all-binding + RoleBinding string `json:"roleBinding,omitempty"` // default: "turbo-all-binding" + + // The name of the service account name. Default is turbo-user + // +kubebuilder:default=turbo-user + ServiceAccountName string `json:"serviceAccountName,omitempty"` // default: "turbo-user" + + // Kubeturbo replicaCount + ReplicaCount *int32 `json:"replicaCount,omitempty"` // default: 1 + // Kubeturbo image details for deployments outside of RH Operator Hub + // +kubebuilder:default={repository:icr.io/cpopen/turbonomic/kubeturbo, pullPolicy:IfNotPresent} + Image KubeturboImage `json:"image,omitempty"` + // Configuration for Turbo Server + // +kubebuilder:default={turboServer:"https://Turbo_server_URL"} + ServerMeta KubeturboServerMeta `json:"serverMeta,omitempty"` + // Credentials to register probe with Turbo Server + // +kubebuilder:default={turbonomicCredentialsSecretName:turbonomic-credentials} + RestAPIConfig KubeturboRestAPIConfig `json:"restAPIConfig,omitempty"` + // Configurations to register probe with Turbo Server + // +kubebuilder:default={registrationTimeoutSec:300, restartOnRegistrationTimeout:true} + SdkProtocolConfig KubeturboSdkProtocolConfig `json:"sdkProtocolConfig,omitempty"` + // Enable or disable features + FeatureGates map[string]bool `json:"featureGates,omitempty"` + // Create HA placement policy for Node to Hypervisor by node role. Master is default + // +kubebuilder:default={nodeRoles:"\"master\""} + HANodeConfig KubeturboHANodeConfig `json:"HANodeConfig,omitempty"` + // Optional target configuration + TargetConfig KubeturboTargetConfig `json:"targetConfig,omitempty"` + // Kubeturbo command line arguments + // +kubebuilder:default={logginglevel:2} + Args KubeturboArgs `json:"args,omitempty"` + // Kubeturbo resource configuration + Resources *ResourceRequirements `json:"resources,omitempty"` + + // Optional logging level configuration. Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + // +kubebuilder:default={level:2} + Logging Logging `json:"logging,omitempty"` + + // Optional node pool configuration. Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + // +kubebuilder:default={min:1, max: 1000} + NodePoolSize NodePoolSize `json:"nodePoolSize,omitempty"` + + // Cluster Role rules for ORM owners. It's required when using ORM with ClusterRole 'turbo-cluster-admin'. It's recommended to use ORM with ClusterRole 'cluster-admin' + OrmOwners OrmOwners `json:"ormOwners,omitempty"` + + // Flag system workloads such as those defined in kube-system, openshift-system, etc. Kubeturbo will not generate actions for workloads that match the supplied patterns + // +kubebuilder:default={namespacePatterns:{kube-.*, openshift-.*, cattle.*}} + SystemWorkloadDetectors SystemWorkloadDetectors `json:"systemWorkloadDetectors,omitempty"` + + // Identity operator-controlled workloads by name or namespace using regular expressions + ExclusionDetectors ExclusionDetectors `json:"exclusionDetectors,omitempty"` + + // WireMock mode configuration + // +kubebuilder:default={enabled:false, url: "wiremock:8080"} + Wiremock Wiremock `json:"wiremock,omitempty"` + + // Discovery-related configurations + // +kubebuilder:default={chunkSendDelayMillis: 0, numObjectsPerChunk: 5000} + Discovery Discovery `json:"discovery,omitempty"` + + // Specify one or more kubeturbo pod scheduling constraints in the cluster. + // See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on nodeSelector, affinity, tolerations + KubeturboPodScheduling KubeturboPodScheduling `json:"kubeturboPodScheduling,omitempty"` +} + +type KubeturboPodScheduling struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + // +mapType=atomic + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // The pod this Toleration is attached to tolerates any taint that matches + // the triple using the matching operator . + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` +} + +type KubeturboImage struct { + // Container repository + // +kubebuilder:default=icr.io/cpopen/turbonomic/kubeturbo + Repository string `json:"repository,omitempty"` // default: icr.io/cpopen/turbonomic/kubeturbo + // Kubeturbo container image tag + Tag *string `json:"tag,omitempty"` // no default + // Busybox repository. default is busybox. This is overridden by cpufreqgetterRepository + BusyboxRepository *string `json:"busyboxRepository,omitempty"` // no default + // Repository used to get node cpufrequency. + CpufreqgetterRepository *string `json:"cpufreqgetterRepository,omitempty"` // no default + // +kubebuilder:default=IfNotPresent + PullPolicy *corev1.PullPolicy `json:"pullPolicy,omitempty"` // default: IfNotPresent + // Define the secret used to authenticate to the container image registry + ImagePullSecret *string `json:"imagePullSecret,omitempty"` // no default +} + +type KubeturboServerMeta struct { + // Turbo Server major version + Version *string `json:"version,omitempty"` // no default + // URL for Turbo Server endpoint + // +kubebuilder:default="https://Turbo_server_URL" + TurboServer string `json:"turboServer,omitempty"` // default="https://Turbo_server_URL + // Proxy server address + Proxy *string `json:"proxy,omitempty"` // no default +} + +type KubeturboRestAPIConfig struct { + // Name of k8s secret that contains the turbo credentials + // +kubebuilder:default=turbonomic-credentials + TurbonomicCredentialsSecretName string `json:"turbonomicCredentialsSecretName,omitempty"` // default: "turbonomic-credentials" + // Turbo admin user id + OpsManagerUserName *string `json:"opsManagerUserName,omitempty"` // default: "Turbo_username" let's not add default to CRD + // Turbo admin user password + OpsManagerPassword *string `json:"opsManagerPassword,omitempty"` // default: "Turbo_password" +} + +type KubeturboSdkProtocolConfig struct { + // Time in seconds to wait for registration response from the Turbo Server + // +kubebuilder:default=300 + RegistrationTimeoutSec *int `json:"registrationTimeoutSec,omitempty"` // default: 300 + // Restart probe container on registration timeout + // +kubebuilder:default=true + RestartOnRegistrationTimeout *bool `json:"restartOnRegistrationTimeout,omitempty"` // default: true +} + +type KubeturboHANodeConfig struct { + // Node role names + // +kubebuilder:default="\"master\"" + NodeRoles string `json:"nodeRoles,omitempty"` // default: "\"master\"" +} + +type KubeturboTargetConfig struct { + TargetName *string `json:"targetName,omitempty"` // no default + // TargetType *string `json:"targetType,omitempty"` // no default +} + +type KubeturboArgs struct { + // Define logging level, default is info = 2 + // +kubebuilder:default=2 + Logginglevel *int `json:"logginglevel,omitempty"` // default: 2 + // Identify if kubelet requires https + // +kubebuilder:default=true + Kubelethttps *bool `json:"kubelethttps,omitempty"` // default: true + // Identify kubelet port + // +kubebuilder:default=10250 + Kubeletport *int `json:"kubeletport,omitempty"` // default: 10250 + // Allow kubeturbo to execute actions in OCP + Sccsupport *string `json:"sccsupport,omitempty"` // no default + ReadinessRetryThreshold *int32 `json:"readinessRetryThreshold,omitempty"` // no default (60 in kt pod) + // Allow kubeturbo to reschedule pods with volumes attached + FailVolumePodMoves *bool `json:"failVolumePodMoves,omitempty"` // no default (true in kt pod) + // Do not run busybox on these nodes to discover the cpu frequency with k8s 1.18 and later, default is either of kubernetes.io/os=windows or beta.kubernetes.io/os=windows present as node label + BusyboxExcludeNodeLabels *string `json:"busyboxExcludeNodeLabels,omitempty"` // no default, comma separated list of key=value node label pairs + // Identify if using uuid or ip for stitching + // +kubebuilder:default=true + Stitchuuid *bool `json:"stitchuuid,omitempty"` // default: true + // +kubebuilder:default=false + Pre16K8sVersion *bool `json:"pre16k8sVersion,omitempty"` // default: false; CANDIDATE FOR REMOVAL + // Identify if cleanup the resources created for scc impersonation, default is true + // +kubebuilder:default=true + CleanupSccImpersonationResources *bool `json:"cleanupSccImpersonationResources,omitempty"` // default: true + // Skip creating the resources for scc impersonation + // +kubebuilder:default=false + SkipCreatingSccImpersonationResources *bool `json:"skipCreatingSccImpersonationResources,omitempty"` + // The email to be used to push changes to git with ArgoCD integration + GitEmail *string `json:"gitEmail,omitempty"` // no default + // The username to be used to push changes to git with ArgoCD integration + GitUsername *string `json:"gitUsername,omitempty"` // no default + // The name of the secret which holds the git credentials to be used with ArgoCD integration + GitSecretName *string `json:"gitSecretName,omitempty"` // no default + // The namespace of the secret which holds the git credentials to be used with ArgoCD integration + GitSecretNamespace *string `json:"gitSecretNamespace,omitempty"` // no default + // The commit mode that should be used for git action executions with ArgoCD Integration. One of request or direct. Defaults to direct. + GitCommitMode *string `json:"gitCommitMode,omitempty"` // no default + // The IBM cloud satellite location provider, it only support azure as of today + SatelliteLocationProvider *string `json:"satelliteLocationProvider,omitempty"` + // The discovery interval in seconds + // +kubebuilder:default=600 + DiscoveryIntervalSec *int `json:"discoveryIntervalSec,omitempty"` + // The discovery interval in seconds to collect additional resource usage data samples from kubelet. This should be no smaller than 10 seconds. + // +kubebuilder:default=60 + DiscoverySampleIntervalSec *int `json:"discoverySampleIntervalSec,omitempty"` + // The number of resource usage data samples to be collected from kubelet in each full discovery cycle. This should be no larger than 60. + // +kubebuilder:default=10 + DiscoverySamples *int `json:"discoverySamples,omitempty"` + // The discovery timeout in seconds for each discovery worker. Default value is 180 seconds + // +kubebuilder:default=180 + DiscoveryTimeoutSec *int `json:"discoveryTimeoutSec,omitempty"` + // The garbage collection interval in minutes for potentially leaked pods due to failed actions and kubeturbo restarts. Default value is 10 minutes + // +kubebuilder:default=10 + GarbageCollectionIntervalMin *int `json:"garbageCollectionIntervalMin,omitempty"` + // The number of discovery workers. Default is 10 + // +kubebuilder:default=10 + DiscoveryWorkers *int `json:"discoveryWorkers,omitempty"` +} + +type DaemonPodDetectors struct { + PodNamePatterns []string `json:"podNamePatterns,omitempty"` + NamespacePatterns []string `json:"namespacePatterns,omitempty"` +} + +type AnnotationWhitelist struct { + ContainerSpec *string `json:"containerSpec,omitempty"` + Namespace *string `json:"namespace,omitempty"` + WorkloadController *string `json:"workloadController,omitempty"` +} + +type Logging struct { + // Define logging level + // +kubebuilder:default=2 + Level *int `json:"level,omitempty"` +} + +type NodePoolSize struct { + // minimum number of nodes allowed in the node pool + // +kubebuilder:default=1 + Min *int `json:"min,omitempty"` + // maximum number of nodes allowed in the node pool + // +kubebuilder:default=1000 + Max *int `json:"max,omitempty"` +} + +type OrmOwners struct { + // API group for ORM owners + ApiGroup []string `json:"apiGroup,omitempty"` + // resources for ORM owners + Resources []string `json:"resources,omitempty"` +} + +type SystemWorkloadDetectors struct { + // A list of regular expressions that match the namespace names for system workloads + // +kubebuilder:default={kube-.*, openshift-.*, cattle.*} + NamespacePatterns []string `json:"namespacePatterns,omitempty"` +} + +type ExclusionDetectors struct { + // A list of regular expressions representing operator-controlled Workload Controllers. Workload Controllers that match the supplied expression will not have actions generated against them. + OperatorControlledWorkloadsPatterns []string `json:"operatorControlledWorkloadsPatterns,omitempty"` + // A list of regular expressions representing namespaces containing operator-controlled Workload Controllers. Workload Controllers deployed within the matching namespaces will not have actions generated against them. + OperatorControlledNamespacePatterns []string `json:"operatorControlledNamespacePatterns,omitempty"` +} + +type Wiremock struct { + // Enable WireMock mode + // +kubebuilder:default=false + Enabled *bool `json:"enabled,omitempty"` + // WireMock service URL + // +kubebuilder:default="wiremock:8080" + URL *string `json:"url,omitempty"` +} + +type Discovery struct { + // time delay (in milliseconds) between transmissions of chunked discovery data + // +kubebuilder:default=0 + ChunkSendDelayMillis *int32 `json:"chunkSendDelayMillis,omitempty"` + // Desired size (in number of DTOs) of discovery data chunks (default = 5,000) + // +kubebuilder:default=5000 + NumObjectsPerChunk *int32 `json:"numObjectsPerChunk,omitempty"` +} + +// +kubebuilder:pruning:PreserveUnknownFields +// KubeturboStatus defines the observed state of Kubeturbo +type KubeturboStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Timestamp of the last sync up + LastUpdatedTimestamp string `json:"lastUpdatedTimestamp,omitempty"` + // Hash of the constructed turbo.config file + ConfigHash string `json:"configHash,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:resource:path=kubeturbos,shortName=kt +//+kubebuilder:subresource:status +//+kubebuilder:storageversion + +// Kubeturbo is the Schema for the kubeturbos API +type Kubeturbo struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:default={} + Spec KubeturboSpec `json:"spec"` + Status KubeturboStatus `json:"status,omitempty"` +} + +func (kt *Kubeturbo) SetSpecDefault() error { + var err error + + // If CR doesn't specify a version then use the DEFAULT_KUBETURBO_VERSION + // as Kubeturbo's version tag. This behavior ensures the Kubeturbo pod will + // always up-to-date when the operator bumping up its version. Won't affect + // the scenario if the client want to use a fixed version that specified in + // the CR. + if defaultKtVersion == "" { + defaultKtVersion, err = utils.GetDefaultKubeturboVersion() + if err != nil { + return err + } + } + + // Patch default version if the value is not specified + if kt.Spec.Image.Tag == nil || *kt.Spec.Image.Tag == DefaultVersion { + kt.Spec.Image.Tag = &defaultKtVersion + } + if kt.Spec.ServerMeta.Version == nil || *kt.Spec.ServerMeta.Version == DefaultVersion { + kt.Spec.ServerMeta.Version = &defaultKtVersion + } + + // Patch default annotations if the value is not specified + if _, ok := kt.Spec.Annotations[DefaultAnnotationKey]; !ok { + kt.Spec.Annotations = map[string]string{DefaultAnnotationKey: DefaultAnnotationVal} + } + + // Patch default namespace patterns for SystemWorkloadDetectors if not specified + if kt.Spec.SystemWorkloadDetectors.NamespacePatterns == nil { + kt.Spec.SystemWorkloadDetectors.NamespacePatterns = defaultSysWlNsPatterns + } + + return kt.VerifySubfields() +} + +// Verify if the fetched Kubeturbo type contains all necessary fields +func (kt *Kubeturbo) VerifySubfields() error { + // Following are the fields that cause the Kubeturbo pod unable to launch + // Pause the reconcilation loop if any of the field is missing + checkList := []string{ + "Spec.RoleName", + "Spec.RoleBinding", + "Spec.ServiceAccountName", + "Spec.Image.Repository", + "Spec.ServerMeta.TurboServer", + "Spec.RestAPIConfig.TurbonomicCredentialsSecretName", + "Spec.HANodeConfig.NodeRoles", + } + + errorMessages := []string{} + val := reflect.ValueOf(kt) + + // Ensure the input is a struct or a pointer to a struct + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + if val.Kind() != reflect.Struct { + return nil + } + + var scanFields func(reflect.Value, string) + scanFields = func(v reflect.Value, parent string) { + for i := 0; i < v.NumField(); i++ { + field := v.Type().Field(i) + fieldValue := v.Field(i) + + // Construct the full field name + fieldName := field.Name + if parent != "" { + fieldName = parent + "." + fieldName + } + + // Skip unexported fields + if !fieldValue.CanInterface() { + continue + } + + if utils.StringInSlice(fieldName, checkList) { + // Check if the field is a non-pointer and has a zero value + if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { + errorMessages = append(errorMessages, fieldName) + } else if fieldValue.Kind() == reflect.String && fieldValue.String() == "" { + errorMessages = append(errorMessages, fieldName) + } + } + + // Recursively check nested structs + if fieldValue.Kind() == reflect.Struct { + scanFields(fieldValue, fieldName) + } + } + } + scanFields(val, "") + + // Summarize errors + if len(errorMessages) > 0 { + return fmt.Errorf("stopping reconciliation for Kubeturbo CR due to missing critical field(s): %s. Please review your CR and ensure the latest CRD is applied before proceeding", strings.Join(errorMessages, ", ")) + } + return nil +} + +//+kubebuilder:object:root=true + +// KubeturboList contains a list of Kubeturbo +type KubeturboList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Kubeturbo `json:"items"` +} + +type ResourceRequirements struct { + Limits map[corev1.ResourceName]resource.Quantity `json:"limits,omitempty"` + Requests map[corev1.ResourceName]resource.Quantity `json:"requests,omitempty"` +} + +func (rr ResourceRequirements) Internalize() corev1.ResourceRequirements { + return corev1.ResourceRequirements{ + Limits: rr.Limits, + Requests: rr.Requests, + } +} + +func init() { + SchemeBuilder.Register(&Kubeturbo{}, &KubeturboList{}) +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..c17b6aa --- /dev/null +++ b/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,720 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnnotationWhitelist) DeepCopyInto(out *AnnotationWhitelist) { + *out = *in + if in.ContainerSpec != nil { + in, out := &in.ContainerSpec, &out.ContainerSpec + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.WorkloadController != nil { + in, out := &in.WorkloadController, &out.WorkloadController + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnnotationWhitelist. +func (in *AnnotationWhitelist) DeepCopy() *AnnotationWhitelist { + if in == nil { + return nil + } + out := new(AnnotationWhitelist) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonPodDetectors) DeepCopyInto(out *DaemonPodDetectors) { + *out = *in + if in.PodNamePatterns != nil { + in, out := &in.PodNamePatterns, &out.PodNamePatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NamespacePatterns != nil { + in, out := &in.NamespacePatterns, &out.NamespacePatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonPodDetectors. +func (in *DaemonPodDetectors) DeepCopy() *DaemonPodDetectors { + if in == nil { + return nil + } + out := new(DaemonPodDetectors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Discovery) DeepCopyInto(out *Discovery) { + *out = *in + if in.ChunkSendDelayMillis != nil { + in, out := &in.ChunkSendDelayMillis, &out.ChunkSendDelayMillis + *out = new(int32) + **out = **in + } + if in.NumObjectsPerChunk != nil { + in, out := &in.NumObjectsPerChunk, &out.NumObjectsPerChunk + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Discovery. +func (in *Discovery) DeepCopy() *Discovery { + if in == nil { + return nil + } + out := new(Discovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExclusionDetectors) DeepCopyInto(out *ExclusionDetectors) { + *out = *in + if in.OperatorControlledWorkloadsPatterns != nil { + in, out := &in.OperatorControlledWorkloadsPatterns, &out.OperatorControlledWorkloadsPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OperatorControlledNamespacePatterns != nil { + in, out := &in.OperatorControlledNamespacePatterns, &out.OperatorControlledNamespacePatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExclusionDetectors. +func (in *ExclusionDetectors) DeepCopy() *ExclusionDetectors { + if in == nil { + return nil + } + out := new(ExclusionDetectors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubeturbo) DeepCopyInto(out *Kubeturbo) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubeturbo. +func (in *Kubeturbo) DeepCopy() *Kubeturbo { + if in == nil { + return nil + } + out := new(Kubeturbo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Kubeturbo) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboArgs) DeepCopyInto(out *KubeturboArgs) { + *out = *in + if in.Logginglevel != nil { + in, out := &in.Logginglevel, &out.Logginglevel + *out = new(int) + **out = **in + } + if in.Kubelethttps != nil { + in, out := &in.Kubelethttps, &out.Kubelethttps + *out = new(bool) + **out = **in + } + if in.Kubeletport != nil { + in, out := &in.Kubeletport, &out.Kubeletport + *out = new(int) + **out = **in + } + if in.Sccsupport != nil { + in, out := &in.Sccsupport, &out.Sccsupport + *out = new(string) + **out = **in + } + if in.ReadinessRetryThreshold != nil { + in, out := &in.ReadinessRetryThreshold, &out.ReadinessRetryThreshold + *out = new(int32) + **out = **in + } + if in.FailVolumePodMoves != nil { + in, out := &in.FailVolumePodMoves, &out.FailVolumePodMoves + *out = new(bool) + **out = **in + } + if in.BusyboxExcludeNodeLabels != nil { + in, out := &in.BusyboxExcludeNodeLabels, &out.BusyboxExcludeNodeLabels + *out = new(string) + **out = **in + } + if in.Stitchuuid != nil { + in, out := &in.Stitchuuid, &out.Stitchuuid + *out = new(bool) + **out = **in + } + if in.Pre16K8sVersion != nil { + in, out := &in.Pre16K8sVersion, &out.Pre16K8sVersion + *out = new(bool) + **out = **in + } + if in.CleanupSccImpersonationResources != nil { + in, out := &in.CleanupSccImpersonationResources, &out.CleanupSccImpersonationResources + *out = new(bool) + **out = **in + } + if in.SkipCreatingSccImpersonationResources != nil { + in, out := &in.SkipCreatingSccImpersonationResources, &out.SkipCreatingSccImpersonationResources + *out = new(bool) + **out = **in + } + if in.GitEmail != nil { + in, out := &in.GitEmail, &out.GitEmail + *out = new(string) + **out = **in + } + if in.GitUsername != nil { + in, out := &in.GitUsername, &out.GitUsername + *out = new(string) + **out = **in + } + if in.GitSecretName != nil { + in, out := &in.GitSecretName, &out.GitSecretName + *out = new(string) + **out = **in + } + if in.GitSecretNamespace != nil { + in, out := &in.GitSecretNamespace, &out.GitSecretNamespace + *out = new(string) + **out = **in + } + if in.GitCommitMode != nil { + in, out := &in.GitCommitMode, &out.GitCommitMode + *out = new(string) + **out = **in + } + if in.SatelliteLocationProvider != nil { + in, out := &in.SatelliteLocationProvider, &out.SatelliteLocationProvider + *out = new(string) + **out = **in + } + if in.DiscoveryIntervalSec != nil { + in, out := &in.DiscoveryIntervalSec, &out.DiscoveryIntervalSec + *out = new(int) + **out = **in + } + if in.DiscoverySampleIntervalSec != nil { + in, out := &in.DiscoverySampleIntervalSec, &out.DiscoverySampleIntervalSec + *out = new(int) + **out = **in + } + if in.DiscoverySamples != nil { + in, out := &in.DiscoverySamples, &out.DiscoverySamples + *out = new(int) + **out = **in + } + if in.DiscoveryTimeoutSec != nil { + in, out := &in.DiscoveryTimeoutSec, &out.DiscoveryTimeoutSec + *out = new(int) + **out = **in + } + if in.GarbageCollectionIntervalMin != nil { + in, out := &in.GarbageCollectionIntervalMin, &out.GarbageCollectionIntervalMin + *out = new(int) + **out = **in + } + if in.DiscoveryWorkers != nil { + in, out := &in.DiscoveryWorkers, &out.DiscoveryWorkers + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboArgs. +func (in *KubeturboArgs) DeepCopy() *KubeturboArgs { + if in == nil { + return nil + } + out := new(KubeturboArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboHANodeConfig) DeepCopyInto(out *KubeturboHANodeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboHANodeConfig. +func (in *KubeturboHANodeConfig) DeepCopy() *KubeturboHANodeConfig { + if in == nil { + return nil + } + out := new(KubeturboHANodeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboImage) DeepCopyInto(out *KubeturboImage) { + *out = *in + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(string) + **out = **in + } + if in.BusyboxRepository != nil { + in, out := &in.BusyboxRepository, &out.BusyboxRepository + *out = new(string) + **out = **in + } + if in.CpufreqgetterRepository != nil { + in, out := &in.CpufreqgetterRepository, &out.CpufreqgetterRepository + *out = new(string) + **out = **in + } + if in.PullPolicy != nil { + in, out := &in.PullPolicy, &out.PullPolicy + *out = new(corev1.PullPolicy) + **out = **in + } + if in.ImagePullSecret != nil { + in, out := &in.ImagePullSecret, &out.ImagePullSecret + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboImage. +func (in *KubeturboImage) DeepCopy() *KubeturboImage { + if in == nil { + return nil + } + out := new(KubeturboImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboList) DeepCopyInto(out *KubeturboList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Kubeturbo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboList. +func (in *KubeturboList) DeepCopy() *KubeturboList { + if in == nil { + return nil + } + out := new(KubeturboList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeturboList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboPodScheduling) DeepCopyInto(out *KubeturboPodScheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboPodScheduling. +func (in *KubeturboPodScheduling) DeepCopy() *KubeturboPodScheduling { + if in == nil { + return nil + } + out := new(KubeturboPodScheduling) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboRestAPIConfig) DeepCopyInto(out *KubeturboRestAPIConfig) { + *out = *in + if in.OpsManagerUserName != nil { + in, out := &in.OpsManagerUserName, &out.OpsManagerUserName + *out = new(string) + **out = **in + } + if in.OpsManagerPassword != nil { + in, out := &in.OpsManagerPassword, &out.OpsManagerPassword + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboRestAPIConfig. +func (in *KubeturboRestAPIConfig) DeepCopy() *KubeturboRestAPIConfig { + if in == nil { + return nil + } + out := new(KubeturboRestAPIConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboSdkProtocolConfig) DeepCopyInto(out *KubeturboSdkProtocolConfig) { + *out = *in + if in.RegistrationTimeoutSec != nil { + in, out := &in.RegistrationTimeoutSec, &out.RegistrationTimeoutSec + *out = new(int) + **out = **in + } + if in.RestartOnRegistrationTimeout != nil { + in, out := &in.RestartOnRegistrationTimeout, &out.RestartOnRegistrationTimeout + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboSdkProtocolConfig. +func (in *KubeturboSdkProtocolConfig) DeepCopy() *KubeturboSdkProtocolConfig { + if in == nil { + return nil + } + out := new(KubeturboSdkProtocolConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboServerMeta) DeepCopyInto(out *KubeturboServerMeta) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboServerMeta. +func (in *KubeturboServerMeta) DeepCopy() *KubeturboServerMeta { + if in == nil { + return nil + } + out := new(KubeturboServerMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboSpec) DeepCopyInto(out *KubeturboSpec) { + *out = *in + in.DaemonPodDetectors.DeepCopyInto(&out.DaemonPodDetectors) + in.AnnotationWhitelist.DeepCopyInto(&out.AnnotationWhitelist) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReplicaCount != nil { + in, out := &in.ReplicaCount, &out.ReplicaCount + *out = new(int32) + **out = **in + } + in.Image.DeepCopyInto(&out.Image) + in.ServerMeta.DeepCopyInto(&out.ServerMeta) + in.RestAPIConfig.DeepCopyInto(&out.RestAPIConfig) + in.SdkProtocolConfig.DeepCopyInto(&out.SdkProtocolConfig) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.HANodeConfig = in.HANodeConfig + in.TargetConfig.DeepCopyInto(&out.TargetConfig) + in.Args.DeepCopyInto(&out.Args) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + in.Logging.DeepCopyInto(&out.Logging) + in.NodePoolSize.DeepCopyInto(&out.NodePoolSize) + in.OrmOwners.DeepCopyInto(&out.OrmOwners) + in.SystemWorkloadDetectors.DeepCopyInto(&out.SystemWorkloadDetectors) + in.ExclusionDetectors.DeepCopyInto(&out.ExclusionDetectors) + in.Wiremock.DeepCopyInto(&out.Wiremock) + in.Discovery.DeepCopyInto(&out.Discovery) + in.KubeturboPodScheduling.DeepCopyInto(&out.KubeturboPodScheduling) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboSpec. +func (in *KubeturboSpec) DeepCopy() *KubeturboSpec { + if in == nil { + return nil + } + out := new(KubeturboSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboStatus) DeepCopyInto(out *KubeturboStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboStatus. +func (in *KubeturboStatus) DeepCopy() *KubeturboStatus { + if in == nil { + return nil + } + out := new(KubeturboStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboTargetConfig) DeepCopyInto(out *KubeturboTargetConfig) { + *out = *in + if in.TargetName != nil { + in, out := &in.TargetName, &out.TargetName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboTargetConfig. +func (in *KubeturboTargetConfig) DeepCopy() *KubeturboTargetConfig { + if in == nil { + return nil + } + out := new(KubeturboTargetConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logging) DeepCopyInto(out *Logging) { + *out = *in + if in.Level != nil { + in, out := &in.Level, &out.Level + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging. +func (in *Logging) DeepCopy() *Logging { + if in == nil { + return nil + } + out := new(Logging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolSize) DeepCopyInto(out *NodePoolSize) { + *out = *in + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = new(int) + **out = **in + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolSize. +func (in *NodePoolSize) DeepCopy() *NodePoolSize { + if in == nil { + return nil + } + out := new(NodePoolSize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OrmOwners) DeepCopyInto(out *OrmOwners) { + *out = *in + if in.ApiGroup != nil { + in, out := &in.ApiGroup, &out.ApiGroup + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrmOwners. +func (in *OrmOwners) DeepCopy() *OrmOwners { + if in == nil { + return nil + } + out := new(OrmOwners) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(map[corev1.ResourceName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(map[corev1.ResourceName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemWorkloadDetectors) DeepCopyInto(out *SystemWorkloadDetectors) { + *out = *in + if in.NamespacePatterns != nil { + in, out := &in.NamespacePatterns, &out.NamespacePatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemWorkloadDetectors. +func (in *SystemWorkloadDetectors) DeepCopy() *SystemWorkloadDetectors { + if in == nil { + return nil + } + out := new(SystemWorkloadDetectors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Wiremock) DeepCopyInto(out *Wiremock) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Wiremock. +func (in *Wiremock) DeepCopy() *Wiremock { + if in == nil { + return nil + } + out := new(Wiremock) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..76346bb --- /dev/null +++ b/api/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the charts v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=charts.helm.k8s.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "charts.helm.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha1/kubeturbo_types.go b/api/v1alpha1/kubeturbo_types.go new file mode 100644 index 0000000..8698c1c --- /dev/null +++ b/api/v1alpha1/kubeturbo_types.go @@ -0,0 +1,64 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NB - if a block is marked as omitempty, it must be given a default value in order for inner fields to be populated with their defaults +// a single inner field value will suffice + +// +kubebuilder:pruning:PreserveUnknownFields +// KubeturboSpec defines the desired state of Kubeturbo +type KubeturboSpec struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:pruning:PreserveUnknownFields +// KubeturboStatus defines the observed state of Kubeturbo +type KubeturboStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +//+kubebuilder:object:root=true +//+kubebuilder:resource:path=kubeturbos,shortName=kt +//+kubebuilder:subresource:status + +// Kubeturbo is the Schema for the kubeturbos API +type Kubeturbo struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KubeturboSpec `json:"spec"` + Status KubeturboStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// KubeturboList contains a list of Kubeturbo +type KubeturboList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Kubeturbo `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Kubeturbo{}, &KubeturboList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ebf4623 --- /dev/null +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubeturbo) DeepCopyInto(out *Kubeturbo) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubeturbo. +func (in *Kubeturbo) DeepCopy() *Kubeturbo { + if in == nil { + return nil + } + out := new(Kubeturbo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Kubeturbo) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboList) DeepCopyInto(out *KubeturboList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Kubeturbo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboList. +func (in *KubeturboList) DeepCopy() *KubeturboList { + if in == nil { + return nil + } + out := new(KubeturboList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeturboList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboSpec) DeepCopyInto(out *KubeturboSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboSpec. +func (in *KubeturboSpec) DeepCopy() *KubeturboSpec { + if in == nil { + return nil + } + out := new(KubeturboSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboStatus) DeepCopyInto(out *KubeturboStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboStatus. +func (in *KubeturboStatus) DeepCopy() *KubeturboStatus { + if in == nil { + return nil + } + out := new(KubeturboStatus) + in.DeepCopyInto(out) + return out +} diff --git a/certified-bundle-config/manifests/bases/annotations.yaml b/certified-bundle-config/manifests/bases/annotations.yaml new file mode 100644 index 0000000..d8e95fd --- /dev/null +++ b/certified-bundle-config/manifests/bases/annotations.yaml @@ -0,0 +1,11 @@ +annotations: + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: kubeturbo-certified + operators.operatorframework.io.bundle.channels.v1: xxx + operators.operatorframework.io.bundle.channel.default.v1: stable + operators.operatorframework.io.metrics.builder: operator-sdk-v1.34.1 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 + com.redhat.openshift.versions: v4.9 \ No newline at end of file diff --git a/certified-bundle-config/manifests/bases/kubeturbo-certified.clusterserviceversion.yaml b/certified-bundle-config/manifests/bases/kubeturbo-certified.clusterserviceversion.yaml new file mode 100644 index 0000000..5fc620c --- /dev/null +++ b/certified-bundle-config/manifests/bases/kubeturbo-certified.clusterserviceversion.yaml @@ -0,0 +1,233 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[{"apiVersion":"charts.helm.k8s.io/v1","kind":"Kubeturbo","metadata":{"name":"kubeturbo-release"},"spec":{"serverMeta":{"turboServer":"https://Turbo_server_URL"},"targetConfig":{"targetName":"Cluster_Name"}}}]' + capabilities: Basic Install + categories: Monitoring + certified: "false" + createdAt: "" + description: Turbonomic Workload Automation for Multicloud simultaneously optimizes performance, compliance, and cost in real-time. Workloads are precisely resourced, automatically, to perform while satisfying business constraints. + features.operators.openshift.io/cnf: "false" + features.operators.openshift.io/cni: "false" + features.operators.openshift.io/csi: "false" + features.operators.openshift.io/disconnected: "true" + features.operators.openshift.io/fips-compliant: "false" + features.operators.openshift.io/proxy-aware: "false" + features.operators.openshift.io/tls-profiles: "false" + features.operators.openshift.io/token-auth-aws: "false" + features.operators.openshift.io/token-auth-azure: "false" + features.operators.openshift.io/token-auth-gcp: "false" + operators.operatorframework.io/builder: operator-sdk-v1.34.1 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 + repository: https://github.com/turbonomic/kubeturbo-deploy + support: Turbonomic, Inc. + olm.skipRange: ">=xxx matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + tolerations: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + logging: + default: + level: 2 + description: Optional logging level configuration. Changing this value + does not require restart of Kubeturbo but takes about 1 minute to + take effect + properties: + level: + default: 2 + description: Define logging level + type: integer + type: object + nodePoolSize: + default: + max: 1000 + min: 1 + description: Optional node pool configuration. Changing this value + does not require restart of Kubeturbo but takes about 1 minute to + take effect + properties: + max: + default: 1000 + description: maximum number of nodes allowed in the node pool + type: integer + min: + default: 1 + description: minimum number of nodes allowed in the node pool + type: integer + type: object + ormOwners: + description: Cluster Role rules for ORM owners. It's required when + using ORM with ClusterRole 'turbo-cluster-admin'. It's recommended + to use ORM with ClusterRole 'cluster-admin' + properties: + apiGroup: + description: API group for ORM owners + items: + type: string + type: array + resources: + description: resources for ORM owners + items: + type: string + type: array + type: object + replicaCount: + description: Kubeturbo replicaCount + format: int32 + type: integer + resources: + description: Kubeturbo resource configuration + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restAPIConfig: + default: + turbonomicCredentialsSecretName: turbonomic-credentials + description: Credentials to register probe with Turbo Server + properties: + opsManagerPassword: + description: Turbo admin user password + type: string + opsManagerUserName: + description: Turbo admin user id + type: string + turbonomicCredentialsSecretName: + default: turbonomic-credentials + description: Name of k8s secret that contains the turbo credentials + type: string + type: object + roleBinding: + default: turbo-all-binding + description: |- + The name of cluster role binding. Default is turbo-all-binding. If role binding is updated from an existing kubeturbo instance, + the operator will not delete the existing role binding in the clsuter. Therefore, the user may want to manually delete the old + clusterrolebinding from the cluster so that the service account is no longer tied to the previous role binding. + type: string + roleName: + default: cluster-admin + description: |- + Specify 'turbo-cluster-reader' or 'turbo-cluster-admin' as role name instead of the default using + the 'cluster-admin' role. A cluster role with this name will be created during deployment + If using a role name other than the pre-defined role names, cluster role will not be created. This role should be + existing in the cluster and should have the necessary permissions required for kubeturbo to work accurately. + pattern: ^[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?::[a-z0-9](?:[-a-z0-9]*[a-z0-9])?)*$ + type: string + sdkProtocolConfig: + default: + registrationTimeoutSec: 300 + restartOnRegistrationTimeout: true + description: Configurations to register probe with Turbo Server + properties: + registrationTimeoutSec: + default: 300 + description: Time in seconds to wait for registration response + from the Turbo Server + type: integer + restartOnRegistrationTimeout: + default: true + description: Restart probe container on registration timeout + type: boolean + type: object + serverMeta: + default: + turboServer: https://Turbo_server_URL + description: Configuration for Turbo Server + properties: + proxy: + description: Proxy server address + type: string + turboServer: + default: https://Turbo_server_URL + description: URL for Turbo Server endpoint + type: string + version: + description: Turbo Server major version + type: string + type: object + serviceAccountName: + default: turbo-user + description: The name of the service account name. Default is turbo-user + type: string + systemWorkloadDetectors: + default: + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + description: Flag system workloads such as those defined in kube-system, + openshift-system, etc. Kubeturbo will not generate actions for workloads + that match the supplied patterns + properties: + namespacePatterns: + default: + - kube-.* + - openshift-.* + - cattle.* + description: A list of regular expressions that match the namespace + names for system workloads + items: + type: string + type: array + type: object + targetConfig: + description: Optional target configuration + properties: + targetName: + type: string + type: object + wiremock: + default: + enabled: false + url: wiremock:8080 + description: WireMock mode configuration + properties: + enabled: + default: false + description: Enable WireMock mode + type: boolean + url: + default: wiremock:8080 + description: WireMock service URL + type: string + type: object + type: object + status: + description: KubeturboStatus defines the observed state of Kubeturbo + properties: + configHash: + description: Hash of the constructed turbo.config file + type: string + lastUpdatedTimestamp: + description: Timestamp of the last sync up + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Kubeturbo is the Schema for the kubeturbos API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KubeturboSpec defines the desired state of Kubeturbo + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: KubeturboStatus defines the observed state of Kubeturbo + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 0000000..212077c --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,25 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/charts.helm.k8s.io_kubeturbos.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- path: patches/webhook_in_kubeturbos.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- path: patches/cainjection_in_kubeturbos.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. + +- path: patches/api-approved-annotation.yaml + +#configurations: +#- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000..ec5c150 --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/api-approved-annotation.yaml b/config/crd/patches/api-approved-annotation.yaml new file mode 100644 index 0000000..61df1fb --- /dev/null +++ b/config/crd/patches/api-approved-annotation.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: "https://github.com/operator-framework/operator-sdk/pull/2703" + name: kubeturbos.charts.helm.k8s.io \ No newline at end of file diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 0000000..920d6a7 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,142 @@ +# Adds namespace to all resources. +namespace: __NAMESPACE__ + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +# namePrefix: kubeturbo-deploy- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patches: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your kubeturbo-operator to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +# - path: manager_auth_proxy_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- path: webhookcainjection_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - source: # Add cert-manager annotation to the webhook Service +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000..dbfb962 --- /dev/null +++ b/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,39 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubeturbo-operator + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml new file mode 100644 index 0000000..3dc592a --- /dev/null +++ b/config/default/manager_config_patch.yaml @@ -0,0 +1,10 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubeturbo-operator + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 0000000..f5b3cab --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: icr.io/cpopen/kubeturbo-operator + newTag: 8.15.1-SNAPSHOT diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 0000000..f53c160 --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,112 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + name: kubeturbo-operator + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubeturbo-operator + namespace: system + labels: + name: kubeturbo-operator + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + name: kubeturbo-operator + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + image: controller:latest + name: kubeturbo-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + # watch the namespace in which the operator is deployed + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml new file mode 100644 index 0000000..5c97d2a --- /dev/null +++ b/config/manifests/kustomization.yaml @@ -0,0 +1,28 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/kubeturbo-deploy.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard + +# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. +# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. +# These patches remove the unnecessary "cert" volume and its manager container volumeMount. +#patchesJson6902: +#- target: +# group: apps +# version: v1 +# kind: Deployment +# name: kubeturbo-operator +# namespace: system +# patch: |- +# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. +# - op: remove + +# path: /spec/template/spec/containers/0/volumeMounts/0 +# # Remove the "cert" volume, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing volumes in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/volumes/0 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 0000000..1992f8d --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,25 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + name: kubeturbo-operator + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: kubeturbo-operator-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: kubeturbo-operator-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + name: kubeturbo-operator diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000..b060681 --- /dev/null +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..acc05c6 --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..1ca2c6d --- /dev/null +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..ee14c39 --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: kubeturbo-operator + app.kubernetes.io/name: service + app.kubernetes.io/instance: kubeturbo-operator-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: kubeturbo-operator-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + name: kubeturbo-operator diff --git a/config/rbac/kubeturbo-operator-cluster-role.yaml b/config/rbac/kubeturbo-operator-cluster-role.yaml new file mode 100644 index 0000000..234bb70 --- /dev/null +++ b/config/rbac/kubeturbo-operator-cluster-role.yaml @@ -0,0 +1,81 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: + - verbs: + - '*' + apiGroups: + - '' + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + - verbs: + - get + - list + - watch + apiGroups: + - '' + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + - verbs: + - get + apiGroups: + - '' + resources: + - nodes/spec + - nodes/stats + - verbs: + - '*' + apiGroups: + - charts.helm.k8s.io + resources: + - '*' + - verbs: + - '*' + apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - verbs: + - create + - get + - list + - update + apiGroups: + - coordination.k8s.io + resources: + - leases + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list diff --git a/config/rbac/kubeturbo_editor_role.yaml b/config/rbac/kubeturbo_editor_role.yaml new file mode 100644 index 0000000..3b7794e --- /dev/null +++ b/config/rbac/kubeturbo_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit kubeturbos. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: kubeturbo-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: kubeturbo-editor-role +rules: +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos/status + verbs: + - get diff --git a/config/rbac/kubeturbo_viewer_role.yaml b/config/rbac/kubeturbo_viewer_role.yaml new file mode 100644 index 0000000..d2edc2c --- /dev/null +++ b/config/rbac/kubeturbo_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view kubeturbos. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: kubeturbo-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: kubeturbo-viewer-role +rules: +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos + verbs: + - get + - list + - watch +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos/status + verbs: + - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000..a5be160 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- kubeturbo-operator-cluster-role.yaml +- role_binding.yaml +# - leader_election_role.yaml +# - leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +# - auth_proxy_service.yaml +# - auth_proxy_role.yaml +# - auth_proxy_role_binding.yaml +# - auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..6aeba7a --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,44 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..714702c --- /dev/null +++ b/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: system diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000..46eab4c --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos/finalizers + verbs: + - update +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos/status + verbs: + - get + - patch + - update diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000..649daa9 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 0000000..1f68229 --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + name: kubeturbo-operator + namespace: system diff --git a/config/samples/charts_v1_kubeturbo.yaml b/config/samples/charts_v1_kubeturbo.yaml new file mode 100644 index 0000000..34bb122 --- /dev/null +++ b/config/samples/charts_v1_kubeturbo.yaml @@ -0,0 +1,120 @@ +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: cluster-admin + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/config/samples/charts_v1alpha1_kubeturbo.yaml b/config/samples/charts_v1alpha1_kubeturbo.yaml new file mode 100644 index 0000000..d81eae7 --- /dev/null +++ b/config/samples/charts_v1alpha1_kubeturbo.yaml @@ -0,0 +1,50 @@ +apiVersion: charts.helm.k8s.io/v1alpha1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-sample + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-sample + namespace: turbo +spec: + image: + tag: "8.14.4-SNAPSHOT" + serverMeta: + turboServer: "https://" + version: "8.14.4" + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + targetConfig: + targetName: + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 0000000..cec36e6 --- /dev/null +++ b/config/samples/kustomization.yaml @@ -0,0 +1,5 @@ +## Append samples of your project ## +resources: +- charts_v1_kubeturbo.yaml +- charts_v1alpha1_kubeturbo.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/scorecard/bases/config.yaml b/config/scorecard/bases/config.yaml new file mode 100644 index 0000000..c770478 --- /dev/null +++ b/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/config/scorecard/kustomization.yaml b/config/scorecard/kustomization.yaml new file mode 100644 index 0000000..50cd2d0 --- /dev/null +++ b/config/scorecard/kustomization.yaml @@ -0,0 +1,16 @@ +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml new file mode 100644 index 0000000..78ad61a --- /dev/null +++ b/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.34.1 + labels: + suite: basic + test: basic-check-spec-test diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml new file mode 100644 index 0000000..69dda63 --- /dev/null +++ b/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.34.1 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.34.1 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.34.1 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.34.1 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.34.1 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 0000000..5473f6f --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,33 @@ +**Documentation** is being maintained on the Wiki for this project. Visit the [Kubeturbo Deployment Overview](https://github.com/turbonomic/kubeturbo/wiki/Deployment-Options). Visit [Kubeturbo Wiki](https://github.com/turbonomic/kubeturbo/wiki) for the full documentation, examples and guides. + +## Kubeturbo Deployment Options # + +The kubeturbo pod can be easily deployed one of 3 ways: +1. [Helm Chart](https://github.com/turbonomic/kubeturbo/wiki/Helm-Deployment-Details) +2. [Deploy Resources via yaml](https://github.com/turbonomic/kubeturbo/wiki/Yaml-Deployment-Details) +3. [Operator](https://github.com/turbonomic/kubeturbo/wiki/Operator-Details) + + +## Prerequisites + +Visit [Kubeturbo Prerequisites](https://github.com/turbonomic/kubeturbo/wiki/Prerequisites) + +## Helm Chart + +Helm charts are an easy way to deploy and update kubeturbo. We provide you a helm chart that you can download locally, and install specifying a few parameters. + +For more details go to the [Helm Chart option](https://github.com/turbonomic/kubeturbo/wiki/Helm-Deployment-Details). + + +## Deploy with YAMLs + +You can deploy the kubeturbo pod using yamls that define the resources required. For more information, go to [Deploy Resources via yaml](https://github.com/turbonomic/kubeturbo/wiki/Yaml-Deployment-Details) + +Strongly advise you to use the sample yamls provided [here](https://github.com/turbonomic/kubeturbo/tree/master/deploy/kubeturbo_yamls). + + +## Deploy with an Operator + +You can leverage an Operator to call the Helm charts to deploy kubeturbo. For details go to [Kubeturbo Operator deployment](https://github.com/turbonomic/kubeturbo/wiki/Operator-Details). + +There's no place like home... go back to the [Turbonomic Overview](https://github.com/turbonomic/kubeturbo/wiki/Overview). diff --git a/deploy/kubeturbo/.helmignore b/deploy/kubeturbo/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/deploy/kubeturbo/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/kubeturbo/Chart.yaml b/deploy/kubeturbo/Chart.yaml new file mode 100644 index 0000000..7a43fb5 --- /dev/null +++ b/deploy/kubeturbo/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "8.0" +description: A Helm chart for Kubernetes +name: kubeturbo +version: 1.0.0 diff --git a/deploy/kubeturbo/HELM_README.md b/deploy/kubeturbo/HELM_README.md new file mode 100644 index 0000000..e22625f --- /dev/null +++ b/deploy/kubeturbo/HELM_README.md @@ -0,0 +1,16 @@ +**Documentation** is being maintained on the Wiki for this project. Visit the [Helm Chart details here](https://github.com/turbonomic/kubeturbo/wiki/Helm-Deployment-Details). Visit [Kubeturbo Wiki](https://github.com/turbonomic/kubeturbo/wiki) for the full documentation, examples and guides. + +## Kubeturbo Deploy via Helm Charts + +[Helm](https://helm.sh/) is a kubernetes package manager that allows you to more easily manage charts, which are a way to package all resources associated with an application. Helm provides a way to package, deploy and update using simple commands, and provides a way to customize or update parameters of your resources, without the worry of yaml formatting. For more info see: [Helm: The Kubernetes Package Manager](https://github.com/helm/helm) + +To use this method, you will already have a helm client and tiller server installed, and are familiar with how to use helm and chart repositories. Go to [Helm Docs](https://helm.sh/docs/using_helm/%23quickstart-guide) to get started. + +The Helm Chart provided [here](https://github.com/turbonomic/kubeturbo/tree/master/deploy/kubeturbo) will deploy kubeturbo and create the following resources: +1. Create a Namespace or Project (default is "turbo") +1. Service Account and binding to cluster-admin clusterrole (default is "turbo-user" with "turbo-all-binding" role) +1. ConfigMap for kubeturbo to connect to the Turbonomic server +1. Deploy kubeturbo Pod + + +There's no place like home... go back to the [Turbonomic Overview](https://github.com/turbonomic/kubeturbo/wiki/Overview). diff --git a/deploy/kubeturbo/templates/_helpers.tpl b/deploy/kubeturbo/templates/_helpers.tpl new file mode 100644 index 0000000..763fad3 --- /dev/null +++ b/deploy/kubeturbo/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kubeturbo.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kubeturbo.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" $name .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubeturbo.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/deploy/kubeturbo/templates/configmap.yaml b/deploy/kubeturbo/templates/configmap.yaml new file mode 100644 index 0000000..f4a68e8 --- /dev/null +++ b/deploy/kubeturbo/templates/configmap.yaml @@ -0,0 +1,88 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: turbo-config-{{ .Release.Name }} +data: + turbo.config: |- + { + "communicationConfig": { + "serverMeta": { + {{- if .Values.serverMeta.proxy }} + "proxy": "{{ .Values.serverMeta.proxy }}", + {{- end }} + "version": "{{ .Values.serverMeta.version }}", + "turboServer": "{{ .Values.serverMeta.turboServer }}" + }, + "restAPIConfig": { + "opsManagerUserName": "{{ .Values.restAPIConfig.opsManagerUserName }}", + "opsManagerPassword": "{{ .Values.restAPIConfig.opsManagerPassword }}" + }, + "sdkProtocolConfig": { + "registrationTimeoutSec": {{ .Values.sdkProtocolConfig.registrationTimeoutSec }}, + "restartOnRegistrationTimeout": {{ .Values.sdkProtocolConfig.restartOnRegistrationTimeout }} + } + }, + {{- if .Values.featureGates }} + "featureGates": {{ .Values.featureGates | toJson }}, + {{- end }} + "HANodeConfig": { + "nodeRoles": [{{ .Values.HANodeConfig.nodeRoles }}] + {{- if .Values.targetConfig }} + {{- if or .Values.targetConfig.targetName .Values.targetConfig.targetType }} + }, + "targetConfig": { + {{- if and .Values.targetConfig.targetName .Values.targetConfig.targetType }} + "targetName": "{{ .Values.targetConfig.targetName }}", + "targetType": "{{ .Values.targetConfig.targetType }}" + {{- else if .Values.targetConfig.targetName }} + "targetName": "{{ .Values.targetConfig.targetName }}" + {{- else }} + "targetType": "{{ .Values.targetConfig.targetType }}" + {{- end}} + {{- end }} + {{- end }} + {{- if .Values.annotationWhitelist }} + }, + "annotationWhitelist": { + "containerSpec": "{{ default "" .Values.annotationWhitelist.containerSpec }}", + "namespace": "{{ default "" .Values.annotationWhitelist.namespace }}", + "workloadController": "{{ default "" .Values.annotationWhitelist.workloadController }}" + {{- end }} + } + } + turbo-autoreload.config: |- + { + "logging": { + "level": {{ .Values.logging.level }} + }, + "nodePoolSize": { + "min": {{ .Values.nodePoolSize.min }}, + "max": {{ .Values.nodePoolSize.max }} + }, + {{- if .Values.wiremock.enabled }} + "wiremock": { + "enabled": {{ .Values.wiremock.enabled }}, + "url": "{{ .Values.wiremock.url }}" + }, + {{- end }} + {{- $systemWorkloadDetectors := .Values.systemWorkloadDetectors | default dict }} + {{- $exclusionDetectors := .Values.exclusionDetectors | default dict }} + "systemWorkloadDetectors": { + "namespacePatterns": {{ $systemWorkloadDetectors.namespacePatterns | default list | toJson }} + }, + "exclusionDetectors": { + "operatorControlledWorkloadsPatterns": {{ $exclusionDetectors.operatorControlledWorkloadsPatterns | default list | toJson }}, + "operatorControlledNamespacePatterns": {{ $exclusionDetectors.operatorControlledNamespacePatterns | default list | toJson }} + }, + "daemonPodDetectors": { + "namespaces": {{ .Values.daemonPodDetectors.namespacePatterns | default list | toJson }}, + "podNamePatterns": {{ .Values.daemonPodDetectors.podNamePatterns | default list | toJson }} + } + {{- if .Values.discovery }} + , + "discovery": { + "chunkSendDelayMillis": {{ default 0 .Values.discovery.chunkSendDelayMillis }}, + "numObjectsPerChunk": {{ default 5000 .Values.discovery.numObjectsPerChunk }} + } + {{- end }} + } diff --git a/deploy/kubeturbo/templates/deployment.yaml b/deploy/kubeturbo/templates/deployment.yaml new file mode 100644 index 0000000..06525d5 --- /dev/null +++ b/deploy/kubeturbo/templates/deployment.yaml @@ -0,0 +1,135 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kubeturbo.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "kubeturbo.name" . }} + helm.sh/chart: {{ include "kubeturbo.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "kubeturbo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + strategy: + type: Recreate + template: + metadata: + annotations: + {{- with .Values.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app.kubernetes.io/name: {{ include "kubeturbo.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ .Values.serviceAccountName }} +{{- if .Values.image.imagePullSecret }} + imagePullSecrets: + - name: {{ .Values.image.imagePullSecret }} +{{- end }} +{{- if .Values.kubeturboPodScheduling.nodeSelector }} + nodeSelector: {{- .Values.kubeturboPodScheduling.nodeSelector | toYaml | nindent 8 }} +{{- end }} +{{- if .Values.kubeturboPodScheduling.affinity }} + affinity: {{- .Values.kubeturboPodScheduling.affinity | toYaml | nindent 8 }} +{{- end }} +{{- if .Values.kubeturboPodScheduling.tolerations }} + tolerations: {{- .Values.kubeturboPodScheduling.tolerations | toYaml | nindent 8 }} +{{- end }} + containers: + - name: {{ .Chart.Name }} + env: + - name: KUBETURBO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --turboconfig=/etc/kubeturbo/turbo.config + - --v={{ .Values.args.logginglevel }} + {{- if .Values.args.kubelethttps }} + - --kubelet-https={{ .Values.args.kubelethttps }} + - --kubelet-port={{ .Values.args.kubeletport }} + {{- end }} + {{- if .Values.args.discoveryIntervalSec }} + - --discovery-interval-sec={{ .Values.args.discoveryIntervalSec }} + {{- end }} + {{- if .Values.args.discoverySampleIntervalSec }} + - --discovery-sample-interval={{ .Values.args.discoverySampleIntervalSec }} + {{- end }} + {{- if .Values.args.discoverySamples }} + - --discovery-samples={{ .Values.args.discoverySamples }} + {{- end }} + {{- if .Values.args.discoveryTimeoutSec }} + - --discovery-timeout-sec={{ .Values.args.discoveryTimeoutSec }} + {{- end }} + {{- if .Values.args.garbageCollectionIntervalMin }} + - --garbage-collection-interval={{ .Values.args.garbageCollectionIntervalMin }} + {{- end }} + {{- if .Values.args.discoveryWorkers }} + - --discovery-workers={{ .Values.args.discoveryWorkers }} + {{- end }} + {{- if .Values.args.sccsupport }} + - --scc-support={{ .Values.args.sccsupport }} + {{- end }} + {{- if .Values.args.readinessRetryThreshold }} + - --readiness-retry-threshold={{ .Values.args.readinessRetryThreshold }} + {{- end }} + {{- if .Values.args.failVolumePodMoves }} + - --fail-volume-pod-moves={{ .Values.args.failVolumePodMoves }} + {{- end }} + {{- if .Values.image.busyboxRepository }} + - --busybox-image={{ .Values.image.busyboxRepository }} + {{- end }} + {{- if .Values.image.imagePullSecret }} + - --busybox-image-pull-secret={{ .Values.image.imagePullSecret }} + - --cpufreqgetter-image-pull-secret={{ .Values.image.imagePullSecret }} + {{- end }} + {{- if .Values.image.cpufreqgetterRepository }} + - --cpufreqgetter-image={{ .Values.image.cpufreqgetterRepository }} + {{- end }} + {{- if .Values.args.busyboxExcludeNodeLabels }} + - --cpufreq-job-exclude-node-labels={{ .Values.args.busyboxExcludeNodeLabels }} + {{- end }} + {{- if not .Values.args.stitchuuid }} + - --stitch-uuid={{ .Values.args.stitchuuid }} + {{- end }} + {{- if .Values.args.pre16k8sVersion }} + - --k8sVersion=1.5 + {{- end }} + {{- if not .Values.args.cleanupSccImpersonationResources }} + - --cleanup-scc-impersonation-resources={{ .Values.args.cleanupSccImpersonationResources }} + {{- end }} + {{- if .Values.args.skipCreatingSccImpersonationResources }} + - --skip-creating-scc-impersonation-resources={{ .Values.args.skipCreatingSccImpersonationResources }} + {{- end }} + {{- if .Values.args.satelliteLocationProvider }} + - --satellite-location-provider={{ .Values.args.satelliteLocationProvider }} + {{- end }} + resources: +{{ toYaml .Values.resources | indent 12 }} + volumeMounts: + - name: turbo-volume + mountPath: /etc/kubeturbo + readOnly: true + - name: turbonomic-credentials-volume + mountPath: /etc/turbonomic-credentials + readOnly: true + - name: varlog + mountPath: /var/log + volumes: + - name: turbo-volume + configMap: + name: turbo-config-{{ .Release.Name }} + - name: turbonomic-credentials-volume + secret: + defaultMode: 420 + optional: true + secretName: {{ .Values.restAPIConfig.turbonomicCredentialsSecretName | default "turbonomic-credentials" | quote }} + - name: varlog + emptyDir: {} + restartPolicy: Always diff --git a/deploy/kubeturbo/templates/serviceaccount.yaml b/deploy/kubeturbo/templates/serviceaccount.yaml new file mode 100644 index 0000000..df6a478 --- /dev/null +++ b/deploy/kubeturbo/templates/serviceaccount.yaml @@ -0,0 +1,238 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccountName }} +{{- if eq .Values.roleName "turbo-cluster-reader" }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.roleName }}-{{ .Release.Name }}-{{ .Release.Namespace }} +rules: + - apiGroups: + - "" + - apps + - app.k8s.io + - apps.openshift.io + - batch + - extensions + - turbonomic.com # Need it for backward compatibility with ORM v1 + - devops.turbonomic.io + - config.openshift.io + resources: + - nodes + - pods + - deployments + - replicasets + - replicationcontrollers + - services + - endpoints + - namespaces + - limitranges + - resourcequotas + - persistentvolumes + - persistentvolumeclaims + - applications + - jobs + - cronjobs + - statefulsets + - daemonsets + - deploymentconfigs + - operatorresourcemappings + - clusterversions + verbs: + - get + - watch + - list + - apiGroups: + - machine.openshift.io + resources: + - machines + - machinesets + verbs: + - get + - list + - apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + - nodes/metrics + - nodes/proxy + verbs: + - get + - apiGroups: + - policy.turbonomic.io + resources: + - slohorizontalscales + - containerverticalscales + - policybindings + verbs: + - get + - list + - watch +{{- end }} +{{- if eq .Values.roleName "turbo-cluster-admin" }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.roleName }}-{{ .Release.Name }}-{{ .Release.Namespace }} +rules: + - apiGroups: + - "" + - batch + resources: + - pods + - jobs + verbs: + - '*' + - apiGroups: + - "" + - apps + - apps.openshift.io + - extensions + - turbonomic.com # Need it for backward compatibility with ORM v1 + - devops.turbonomic.io + - charts.helm.k8s.io + {{- range .Values.ormOwners.apiGroup }} + - {{.}} + {{- end }} + resources: + - deployments + - replicasets + - replicationcontrollers + - statefulsets + - daemonsets + - deploymentconfigs + - resourcequotas + - operatorresourcemappings + - operatorresourcemappings/status + - xls + {{- range .Values.ormOwners.resources }} + - {{.}} + {{- end }} + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + - apps + - batch + - extensions + - policy + - app.k8s.io + - argoproj.io + - apiextensions.k8s.io + - config.openshift.io + resources: + - nodes + - services + - endpoints + - namespaces + - limitranges + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - cronjobs + - applications + - customresourcedefinitions + - clusterversions + verbs: + - get + - list + - watch + - apiGroups: + - machine.openshift.io + resources: + - machines + - machinesets + verbs: + - get + - list + - update + - apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + - nodes/metrics + - nodes/proxy + - pods/log + verbs: + - get + - apiGroups: + - policy.turbonomic.io + resources: + - slohorizontalscales + - containerverticalscales + - policybindings + verbs: + - get + - list + - watch + # Need it for SCC impersonation + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - list + - use + # Need it for SCC impersonation + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + {{- if not .Values.args.skipCreatingSccImpersonationResources }} + - create + - delete + {{- else }} + - get + {{- end }} + - impersonate + # Need it for SCC impersonation + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + {{- if not .Values.args.skipCreatingSccImpersonationResources }} + - create + - delete + - update + {{- else }} + - get + {{- end }} +{{- end }} +--- +kind: ClusterRoleBinding +# For OpenShift 3.4-3.7 use apiVersion: v1 +# For kubernetes 1.9 use rbac.authorization.k8s.io/v1 +# For kubernetes 1.8 use rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.roleBinding }}-{{ .Release.Name }}-{{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccountName }} + namespace: {{ .Release.Namespace }} +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole + {{- if eq .Values.roleName "cluster-admin" }} + name: "cluster-admin" + {{- else if or (eq .Values.roleName "turbo-cluster-admin") (eq .Values.roleName "turbo-cluster-reader") }} + name: {{ .Values.roleName }}-{{ .Release.Name }}-{{ .Release.Namespace }} + {{- else }} + name: {{ .Values.roleName }} + {{- end }} + # For OpenShift v3.4 remove apiGroup line + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/deploy/kubeturbo/values.yaml b/deploy/kubeturbo/values.yaml new file mode 100644 index 0000000..ddc77c7 --- /dev/null +++ b/deploy/kubeturbo/values.yaml @@ -0,0 +1,177 @@ +# Default values for kubeturbo. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# Replace the image with desired version +image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: + pullPolicy: IfNotPresent +# busyboxRepository: busybox +# imagePullSecret: "" +# cpufreqgetterRepository: icr.io/cpopen/turbonomic/cpufreqgetter + +annotations: + kubeturbo.io/controllable: "false" + +# nameOverride: "" +# fullnameOverride: "" + +# Specify one or more kubeturbo pod scheduling constraints in the cluster +# See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on +# nodeSelector, affinity, tolerations +kubeturboPodScheduling: + nodeSelector: + affinity: + tolerations: + +# Specify 'turbo-cluster-reader' or 'turbo-cluster-admin' as role name instead of the default using +# the 'cluster-admin' role. A cluster role with this name will be created during deployment +# If using a role name other than the pre-defined role names, cluster role will not be created. This role should be +# existing in the cluster and should have the necessary permissions required for kubeturbo to work accurately. +roleName: "cluster-admin" + +# Specify the name of clusterrolebinding +roleBinding: "turbo-all-binding" + +# Specify the name of the serviceaccount +serviceAccountName: "turbo-user" + +# Turbo server address +serverMeta: + turboServer: https:// +# proxy: http://username:password@proxyserver:proxyport or http://proxyserver:proxyport + +# Turbo server api user and password stored in a secret or optionally specified as username and password +# The opsManagerUserName requires Turbo administrator role +restAPIConfig: + turbonomicCredentialsSecretName: "turbonomic-credentials" + # opsManagerUserName: + # opsManagerPassword: + +# Turbo server registration process configuration +sdkProtocolConfig: + registrationTimeoutSec: 300 + restartOnRegistrationTimeout: true + +# For targetConfig, targetName provides better group naming to identify k8s clusters in UI +# - If no targetConfig is specified, a default targetName will be created from the apiserver URL in +# the kubeconfig. +# - Specify a targetName only will register a probe with type Kubernetes-, as well as +# adding your cluster as a target with the name Kubernetes-. +# - Specify a targetType only will register a probe without adding your cluster as a target. +# The probe will appear as a Cloud Native probe in the UI with a type Kubernetes-. +# +targetConfig: + targetName: +# targetType: Target_Type + +# In kubeturbo 6.4.3+, you can define what nodes should stay high-available based on the node role +# Master nodes are by default populated by --set HANodeConfig.nodeRoles="\"foo\"\,\"bar\"" +HANodeConfig: + nodeRoles: '"master"' + +# In kubeturbo 6.3+, you can define how daemon pods are identified. Use either or both +# Note if you do not enable daemonPodDetectors, the default is to identify all pods running as kind = daemonSet +# Any entry for daemonPodDetectors would overwrite default. Recommend you do not use this parameter. +daemonPodDetectors: + namespacePatterns: [] + podNamePatterns: [] + +# The annotationWhitelist allows users to define regular expressions to allow kubeturbo to collect +# matching annotations for the specified entity type. By default, no annotations are collected. +# These regular expressions accept the RE2 syntax (except for \C) as defined here: https://github.com/google/re2/wiki/Syntax +# annotationWhitelist: +# containerSpec: "" +# namespace: "" +# workloadController: "" + +# The featureGates property defines a map of string-to-boolean values that provides users with a mechanism +# to enable/disable features. For a list of supported feature gates, see +# https://github.com/turbonomic/kubeturbo/blob/master/pkg/features/features.go +# featureGates: +# PersistentVolumes: true (default: true) +# ThrottlingMetrics: false (default: true) +# HonorAzLabelPvAffinity: true (default: false) +# GitopsApps: false (default: true) +# GoMemLimit: true (default: false) + +# Dynamic configuration Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect +# logging level +logging: + level: 2 +# Define the default values for your cluster configuration +# `nodePoolSize.min`: This parameter defines the minimum number of nodes allowed in the node pool. +# It ensures that the node pool remains resilient and can continue its normal operations even if some nodes become +# unavailable due to hardware failures or other issues. The minimum number of nodes should be set based on the desired +# level of resiliency and the specific requirements of the applications running in the node pool. +# `nodePoolSize.max`: This parameter defines the maximum number of nodes allowed in the node pool. It prevents the cluster from growing +# uncontrollably and helps manage the available resources efficiently. The maximum number of nodes should be set based +# on the available resources in the environment, such as IP addresses, CPU, memory, storage capacity, and networking bandwidth. +# It should also consider the application requirements and performance characteristics of the workloads running on the node pool. +nodePoolSize: + min: 1 + max: 1000 + +# Cluster Role rules for ORM owners. +# It's required when using ORM with ClusterRole 'turbo-cluster-admin'. +# It's recommended to use ORM with ClusterRole 'cluster-admin'. +ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + +# Flag system workloads such as those defined in kube-system, openshift-system, etc. +# Kubeturbo will not generate actions for workloads that match the supplied patterns. +systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + +# List operator-controlled workloads by name or namespace (using regular expressions) +# that should be excluded from the operator-controlled WorkloadController resize policy. +# By default, matching workloads will generate actions that are not in Recommend mode. +# exclusionDetectors: +# # A list of regular expressions representing operator-controlled Workload Controllers. +# operatorControlledWorkloadsPatterns: +# - example-.* +# - .*-example +# # A list of regular expressions representing namespaces containing operator-controlled Workload Controllers. +# operatorControlledNamespacePatterns: +# - .*-example.* + +args: + # logging level + logginglevel: 2 + # If running on k8s 1.10.x or older, set https to false and change port 10255 for kubelet + kubelethttps: true + kubeletport: 10250 + # set to false if using IP for stitching + stitchuuid: true + # if Kubernetes version is older than 1.6, then add another arg for move/resize action + pre16k8sVersion: false + # cleanup the resources for scc impersonation by deault + cleanupSccImpersonationResources: true + # Kubeturbo creates the resources for scc impersonation by deault + skipCreatingSccImpersonationResources: false + # required for OCP cluster + # sccsupport: "*" + +resources: {} + +wiremock: + enabled: false + url: wiremock:8080 +# Discovery-related configurations +# discovery: +# # time delay (in milliseconds) between transmissions of chunked discovery data +# chunkSendDelayMillis: 0 +# # desired size (in number of DTOs) of discovery data chunks +# numObjectsPerChunk: 5000 diff --git a/deploy/kubeturbo_operator_yamls/kubeturbo_crd.yaml b/deploy/kubeturbo_operator_yamls/kubeturbo_crd.yaml new file mode 100644 index 0000000..26899ec --- /dev/null +++ b/deploy/kubeturbo_operator_yamls/kubeturbo_crd.yaml @@ -0,0 +1,1330 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/operator-framework/operator-sdk/pull/2703 + controller-gen.kubebuilder.io/version: v0.15.0 + name: kubeturbos.charts.helm.k8s.io +spec: + group: charts.helm.k8s.io + names: + kind: Kubeturbo + listKind: KubeturboList + plural: kubeturbos + shortNames: + - kt + singular: kubeturbo + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Kubeturbo is the Schema for the kubeturbos API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + default: {} + description: KubeturboSpec defines the desired state of Kubeturbo + properties: + HANodeConfig: + default: + nodeRoles: '"master"' + description: Create HA placement policy for Node to Hypervisor by + node role. Master is default + properties: + nodeRoles: + default: '"master"' + description: Node role names + type: string + type: object + annotationWhitelist: + description: |- + The annotationWhitelist allows users to define regular expressions to allow kubeturbo to collect + matching annotations for the specified entity type. By default, no annotations are collected. + These regular expressions accept the RE2 syntax (except for \C) as defined here: https://github.com/google/re2/wiki/Syntax + properties: + containerSpec: + type: string + namespace: + type: string + workloadController: + type: string + type: object + annotations: + additionalProperties: + type: string + default: + kubeturbo.io/controllable: "false" + type: object + args: + default: + logginglevel: 2 + description: Kubeturbo command line arguments + properties: + busyboxExcludeNodeLabels: + description: Do not run busybox on these nodes to discover the + cpu frequency with k8s 1.18 and later, default is either of + kubernetes.io/os=windows or beta.kubernetes.io/os=windows present + as node label + type: string + cleanupSccImpersonationResources: + default: true + description: Identify if cleanup the resources created for scc + impersonation, default is true + type: boolean + discoveryIntervalSec: + default: 600 + description: The discovery interval in seconds + type: integer + discoverySampleIntervalSec: + default: 60 + description: The discovery interval in seconds to collect additional + resource usage data samples from kubelet. This should be no + smaller than 10 seconds. + type: integer + discoverySamples: + default: 10 + description: The number of resource usage data samples to be collected + from kubelet in each full discovery cycle. This should be no + larger than 60. + type: integer + discoveryTimeoutSec: + default: 180 + description: The discovery timeout in seconds for each discovery + worker. Default value is 180 seconds + type: integer + discoveryWorkers: + default: 10 + description: The number of discovery workers. Default is 10 + type: integer + failVolumePodMoves: + description: Allow kubeturbo to reschedule pods with volumes attached + type: boolean + garbageCollectionIntervalMin: + default: 10 + description: The garbage collection interval in minutes for potentially + leaked pods due to failed actions and kubeturbo restarts. Default + value is 10 minutes + type: integer + gitCommitMode: + description: The commit mode that should be used for git action + executions with ArgoCD Integration. One of request or direct. + Defaults to direct. + type: string + gitEmail: + description: The email to be used to push changes to git with + ArgoCD integration + type: string + gitSecretName: + description: The name of the secret which holds the git credentials + to be used with ArgoCD integration + type: string + gitSecretNamespace: + description: The namespace of the secret which holds the git credentials + to be used with ArgoCD integration + type: string + gitUsername: + description: The username to be used to push changes to git with + ArgoCD integration + type: string + kubelethttps: + default: true + description: Identify if kubelet requires https + type: boolean + kubeletport: + default: 10250 + description: Identify kubelet port + type: integer + logginglevel: + default: 2 + description: Define logging level, default is info = 2 + type: integer + pre16k8sVersion: + default: false + type: boolean + readinessRetryThreshold: + format: int32 + type: integer + satelliteLocationProvider: + description: The IBM cloud satellite location provider, it only + support azure as of today + type: string + sccsupport: + description: Allow kubeturbo to execute actions in OCP + type: string + skipCreatingSccImpersonationResources: + default: false + description: Skip creating the resources for scc impersonation + type: boolean + stitchuuid: + default: true + description: Identify if using uuid or ip for stitching + type: boolean + type: object + daemonPodDetectors: + default: {} + description: |- + You can use this configuration to define how daemon pods are identified. + Note if you do not enable daemonPodDetectors, the default is to identify all pods running as kind = daemonSet + Any entry for daemonPodDetectors would overwrite default. Recommend you do not use this parameter. + properties: + namespacePatterns: + items: + type: string + type: array + podNamePatterns: + items: + type: string + type: array + type: object + discovery: + default: + chunkSendDelayMillis: 0 + numObjectsPerChunk: 5000 + description: Discovery-related configurations + properties: + chunkSendDelayMillis: + default: 0 + description: time delay (in milliseconds) between transmissions + of chunked discovery data + format: int32 + type: integer + numObjectsPerChunk: + default: 5000 + description: Desired size (in number of DTOs) of discovery data + chunks (default = 5,000) + format: int32 + type: integer + type: object + exclusionDetectors: + description: Identity operator-controlled workloads by name or namespace + using regular expressions + properties: + operatorControlledNamespacePatterns: + description: A list of regular expressions representing namespaces + containing operator-controlled Workload Controllers. Workload + Controllers deployed within the matching namespaces will not + have actions generated against them. + items: + type: string + type: array + operatorControlledWorkloadsPatterns: + description: A list of regular expressions representing operator-controlled + Workload Controllers. Workload Controllers that match the supplied + expression will not have actions generated against them. + items: + type: string + type: array + type: object + featureGates: + additionalProperties: + type: boolean + description: Enable or disable features + type: object + image: + default: + pullPolicy: IfNotPresent + repository: icr.io/cpopen/turbonomic/kubeturbo + description: Kubeturbo image details for deployments outside of RH + Operator Hub + properties: + busyboxRepository: + description: Busybox repository. default is busybox. This is overridden + by cpufreqgetterRepository + type: string + cpufreqgetterRepository: + description: Repository used to get node cpufrequency. + type: string + imagePullSecret: + description: Define the secret used to authenticate to the container + image registry + type: string + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + repository: + default: icr.io/cpopen/turbonomic/kubeturbo + description: Container repository + type: string + tag: + description: Kubeturbo container image tag + type: string + type: object + kubeturboPodScheduling: + description: |- + Specify one or more kubeturbo pod scheduling constraints in the cluster. + See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on nodeSelector, affinity, tolerations + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + tolerations: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + logging: + default: + level: 2 + description: Optional logging level configuration. Changing this value + does not require restart of Kubeturbo but takes about 1 minute to + take effect + properties: + level: + default: 2 + description: Define logging level + type: integer + type: object + nodePoolSize: + default: + max: 1000 + min: 1 + description: Optional node pool configuration. Changing this value + does not require restart of Kubeturbo but takes about 1 minute to + take effect + properties: + max: + default: 1000 + description: maximum number of nodes allowed in the node pool + type: integer + min: + default: 1 + description: minimum number of nodes allowed in the node pool + type: integer + type: object + ormOwners: + description: Cluster Role rules for ORM owners. It's required when + using ORM with ClusterRole 'turbo-cluster-admin'. It's recommended + to use ORM with ClusterRole 'cluster-admin' + properties: + apiGroup: + description: API group for ORM owners + items: + type: string + type: array + resources: + description: resources for ORM owners + items: + type: string + type: array + type: object + replicaCount: + description: Kubeturbo replicaCount + format: int32 + type: integer + resources: + description: Kubeturbo resource configuration + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restAPIConfig: + default: + turbonomicCredentialsSecretName: turbonomic-credentials + description: Credentials to register probe with Turbo Server + properties: + opsManagerPassword: + description: Turbo admin user password + type: string + opsManagerUserName: + description: Turbo admin user id + type: string + turbonomicCredentialsSecretName: + default: turbonomic-credentials + description: Name of k8s secret that contains the turbo credentials + type: string + type: object + roleBinding: + default: turbo-all-binding + description: |- + The name of cluster role binding. Default is turbo-all-binding. If role binding is updated from an existing kubeturbo instance, + the operator will not delete the existing role binding in the clsuter. Therefore, the user may want to manually delete the old + clusterrolebinding from the cluster so that the service account is no longer tied to the previous role binding. + type: string + roleName: + default: cluster-admin + description: |- + Specify 'turbo-cluster-reader' or 'turbo-cluster-admin' as role name instead of the default using + the 'cluster-admin' role. A cluster role with this name will be created during deployment + If using a role name other than the pre-defined role names, cluster role will not be created. This role should be + existing in the cluster and should have the necessary permissions required for kubeturbo to work accurately. + pattern: ^[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?::[a-z0-9](?:[-a-z0-9]*[a-z0-9])?)*$ + type: string + sdkProtocolConfig: + default: + registrationTimeoutSec: 300 + restartOnRegistrationTimeout: true + description: Configurations to register probe with Turbo Server + properties: + registrationTimeoutSec: + default: 300 + description: Time in seconds to wait for registration response + from the Turbo Server + type: integer + restartOnRegistrationTimeout: + default: true + description: Restart probe container on registration timeout + type: boolean + type: object + serverMeta: + default: + turboServer: https://Turbo_server_URL + description: Configuration for Turbo Server + properties: + proxy: + description: Proxy server address + type: string + turboServer: + default: https://Turbo_server_URL + description: URL for Turbo Server endpoint + type: string + version: + description: Turbo Server major version + type: string + type: object + serviceAccountName: + default: turbo-user + description: The name of the service account name. Default is turbo-user + type: string + systemWorkloadDetectors: + default: + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + description: Flag system workloads such as those defined in kube-system, + openshift-system, etc. Kubeturbo will not generate actions for workloads + that match the supplied patterns + properties: + namespacePatterns: + default: + - kube-.* + - openshift-.* + - cattle.* + description: A list of regular expressions that match the namespace + names for system workloads + items: + type: string + type: array + type: object + targetConfig: + description: Optional target configuration + properties: + targetName: + type: string + type: object + wiremock: + default: + enabled: false + url: wiremock:8080 + description: WireMock mode configuration + properties: + enabled: + default: false + description: Enable WireMock mode + type: boolean + url: + default: wiremock:8080 + description: WireMock service URL + type: string + type: object + type: object + status: + description: KubeturboStatus defines the observed state of Kubeturbo + properties: + configHash: + description: Hash of the constructed turbo.config file + type: string + lastUpdatedTimestamp: + description: Timestamp of the last sync up + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Kubeturbo is the Schema for the kubeturbos API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KubeturboSpec defines the desired state of Kubeturbo + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: KubeturboStatus defines the observed state of Kubeturbo + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} diff --git a/deploy/kubeturbo_operator_yamls/kubeturbo_operator_full.yaml b/deploy/kubeturbo_operator_yamls/kubeturbo_operator_full.yaml new file mode 100644 index 0000000..4787bb6 --- /dev/null +++ b/deploy/kubeturbo_operator_yamls/kubeturbo_operator_full.yaml @@ -0,0 +1,330 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: cluster-admin + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_operator_yamls/kubeturbo_operator_least_admin_full.yaml b/deploy/kubeturbo_operator_yamls/kubeturbo_operator_least_admin_full.yaml new file mode 100644 index 0000000..44f284f --- /dev/null +++ b/deploy/kubeturbo_operator_yamls/kubeturbo_operator_least_admin_full.yaml @@ -0,0 +1,330 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: turbo-cluster-admin + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_operator_yamls/kubeturbo_operator_reader_full.yaml b/deploy/kubeturbo_operator_yamls/kubeturbo_operator_reader_full.yaml new file mode 100644 index 0000000..49830b8 --- /dev/null +++ b/deploy/kubeturbo_operator_yamls/kubeturbo_operator_reader_full.yaml @@ -0,0 +1,330 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: turbo-cluster-reader + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_operator_yamls/kubeturbo_sample_cr.yaml b/deploy/kubeturbo_operator_yamls/kubeturbo_sample_cr.yaml new file mode 100644 index 0000000..34bb122 --- /dev/null +++ b/deploy/kubeturbo_operator_yamls/kubeturbo_sample_cr.yaml @@ -0,0 +1,120 @@ +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: cluster-admin + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_operator_yamls/operator-bundle.yaml b/deploy/kubeturbo_operator_yamls/operator-bundle.yaml new file mode 100644 index 0000000..baacb86 --- /dev/null +++ b/deploy/kubeturbo_operator_yamls/operator-bundle.yaml @@ -0,0 +1,1528 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/operator-framework/operator-sdk/pull/2703 + controller-gen.kubebuilder.io/version: v0.15.0 + name: kubeturbos.charts.helm.k8s.io +spec: + group: charts.helm.k8s.io + names: + kind: Kubeturbo + listKind: KubeturboList + plural: kubeturbos + shortNames: + - kt + singular: kubeturbo + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Kubeturbo is the Schema for the kubeturbos API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + default: {} + description: KubeturboSpec defines the desired state of Kubeturbo + properties: + HANodeConfig: + default: + nodeRoles: '"master"' + description: Create HA placement policy for Node to Hypervisor by + node role. Master is default + properties: + nodeRoles: + default: '"master"' + description: Node role names + type: string + type: object + annotationWhitelist: + description: |- + The annotationWhitelist allows users to define regular expressions to allow kubeturbo to collect + matching annotations for the specified entity type. By default, no annotations are collected. + These regular expressions accept the RE2 syntax (except for \C) as defined here: https://github.com/google/re2/wiki/Syntax + properties: + containerSpec: + type: string + namespace: + type: string + workloadController: + type: string + type: object + annotations: + additionalProperties: + type: string + default: + kubeturbo.io/controllable: "false" + type: object + args: + default: + logginglevel: 2 + description: Kubeturbo command line arguments + properties: + busyboxExcludeNodeLabels: + description: Do not run busybox on these nodes to discover the + cpu frequency with k8s 1.18 and later, default is either of + kubernetes.io/os=windows or beta.kubernetes.io/os=windows present + as node label + type: string + cleanupSccImpersonationResources: + default: true + description: Identify if cleanup the resources created for scc + impersonation, default is true + type: boolean + discoveryIntervalSec: + default: 600 + description: The discovery interval in seconds + type: integer + discoverySampleIntervalSec: + default: 60 + description: The discovery interval in seconds to collect additional + resource usage data samples from kubelet. This should be no + smaller than 10 seconds. + type: integer + discoverySamples: + default: 10 + description: The number of resource usage data samples to be collected + from kubelet in each full discovery cycle. This should be no + larger than 60. + type: integer + discoveryTimeoutSec: + default: 180 + description: The discovery timeout in seconds for each discovery + worker. Default value is 180 seconds + type: integer + discoveryWorkers: + default: 10 + description: The number of discovery workers. Default is 10 + type: integer + failVolumePodMoves: + description: Allow kubeturbo to reschedule pods with volumes attached + type: boolean + garbageCollectionIntervalMin: + default: 10 + description: The garbage collection interval in minutes for potentially + leaked pods due to failed actions and kubeturbo restarts. Default + value is 10 minutes + type: integer + gitCommitMode: + description: The commit mode that should be used for git action + executions with ArgoCD Integration. One of request or direct. + Defaults to direct. + type: string + gitEmail: + description: The email to be used to push changes to git with + ArgoCD integration + type: string + gitSecretName: + description: The name of the secret which holds the git credentials + to be used with ArgoCD integration + type: string + gitSecretNamespace: + description: The namespace of the secret which holds the git credentials + to be used with ArgoCD integration + type: string + gitUsername: + description: The username to be used to push changes to git with + ArgoCD integration + type: string + kubelethttps: + default: true + description: Identify if kubelet requires https + type: boolean + kubeletport: + default: 10250 + description: Identify kubelet port + type: integer + logginglevel: + default: 2 + description: Define logging level, default is info = 2 + type: integer + pre16k8sVersion: + default: false + type: boolean + readinessRetryThreshold: + format: int32 + type: integer + satelliteLocationProvider: + description: The IBM cloud satellite location provider, it only + support azure as of today + type: string + sccsupport: + description: Allow kubeturbo to execute actions in OCP + type: string + skipCreatingSccImpersonationResources: + default: false + description: Skip creating the resources for scc impersonation + type: boolean + stitchuuid: + default: true + description: Identify if using uuid or ip for stitching + type: boolean + type: object + daemonPodDetectors: + default: {} + description: |- + You can use this configuration to define how daemon pods are identified. + Note if you do not enable daemonPodDetectors, the default is to identify all pods running as kind = daemonSet + Any entry for daemonPodDetectors would overwrite default. Recommend you do not use this parameter. + properties: + namespacePatterns: + items: + type: string + type: array + podNamePatterns: + items: + type: string + type: array + type: object + discovery: + default: + chunkSendDelayMillis: 0 + numObjectsPerChunk: 5000 + description: Discovery-related configurations + properties: + chunkSendDelayMillis: + default: 0 + description: time delay (in milliseconds) between transmissions + of chunked discovery data + format: int32 + type: integer + numObjectsPerChunk: + default: 5000 + description: Desired size (in number of DTOs) of discovery data + chunks (default = 5,000) + format: int32 + type: integer + type: object + exclusionDetectors: + description: Identity operator-controlled workloads by name or namespace + using regular expressions + properties: + operatorControlledNamespacePatterns: + description: A list of regular expressions representing namespaces + containing operator-controlled Workload Controllers. Workload + Controllers deployed within the matching namespaces will not + have actions generated against them. + items: + type: string + type: array + operatorControlledWorkloadsPatterns: + description: A list of regular expressions representing operator-controlled + Workload Controllers. Workload Controllers that match the supplied + expression will not have actions generated against them. + items: + type: string + type: array + type: object + featureGates: + additionalProperties: + type: boolean + description: Enable or disable features + type: object + image: + default: + pullPolicy: IfNotPresent + repository: icr.io/cpopen/turbonomic/kubeturbo + description: Kubeturbo image details for deployments outside of RH + Operator Hub + properties: + busyboxRepository: + description: Busybox repository. default is busybox. This is overridden + by cpufreqgetterRepository + type: string + cpufreqgetterRepository: + description: Repository used to get node cpufrequency. + type: string + imagePullSecret: + description: Define the secret used to authenticate to the container + image registry + type: string + pullPolicy: + default: IfNotPresent + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + repository: + default: icr.io/cpopen/turbonomic/kubeturbo + description: Container repository + type: string + tag: + description: Kubeturbo container image tag + type: string + type: object + kubeturboPodScheduling: + description: |- + Specify one or more kubeturbo pod scheduling constraints in the cluster. + See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ for examples on nodeSelector, affinity, tolerations + properties: + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the pod to fit on a node. + Selector which must match a node's labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + tolerations: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + logging: + default: + level: 2 + description: Optional logging level configuration. Changing this value + does not require restart of Kubeturbo but takes about 1 minute to + take effect + properties: + level: + default: 2 + description: Define logging level + type: integer + type: object + nodePoolSize: + default: + max: 1000 + min: 1 + description: Optional node pool configuration. Changing this value + does not require restart of Kubeturbo but takes about 1 minute to + take effect + properties: + max: + default: 1000 + description: maximum number of nodes allowed in the node pool + type: integer + min: + default: 1 + description: minimum number of nodes allowed in the node pool + type: integer + type: object + ormOwners: + description: Cluster Role rules for ORM owners. It's required when + using ORM with ClusterRole 'turbo-cluster-admin'. It's recommended + to use ORM with ClusterRole 'cluster-admin' + properties: + apiGroup: + description: API group for ORM owners + items: + type: string + type: array + resources: + description: resources for ORM owners + items: + type: string + type: array + type: object + replicaCount: + description: Kubeturbo replicaCount + format: int32 + type: integer + resources: + description: Kubeturbo resource configuration + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restAPIConfig: + default: + turbonomicCredentialsSecretName: turbonomic-credentials + description: Credentials to register probe with Turbo Server + properties: + opsManagerPassword: + description: Turbo admin user password + type: string + opsManagerUserName: + description: Turbo admin user id + type: string + turbonomicCredentialsSecretName: + default: turbonomic-credentials + description: Name of k8s secret that contains the turbo credentials + type: string + type: object + roleBinding: + default: turbo-all-binding + description: |- + The name of cluster role binding. Default is turbo-all-binding. If role binding is updated from an existing kubeturbo instance, + the operator will not delete the existing role binding in the clsuter. Therefore, the user may want to manually delete the old + clusterrolebinding from the cluster so that the service account is no longer tied to the previous role binding. + type: string + roleName: + default: cluster-admin + description: |- + Specify 'turbo-cluster-reader' or 'turbo-cluster-admin' as role name instead of the default using + the 'cluster-admin' role. A cluster role with this name will be created during deployment + If using a role name other than the pre-defined role names, cluster role will not be created. This role should be + existing in the cluster and should have the necessary permissions required for kubeturbo to work accurately. + pattern: ^[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?::[a-z0-9](?:[-a-z0-9]*[a-z0-9])?)*$ + type: string + sdkProtocolConfig: + default: + registrationTimeoutSec: 300 + restartOnRegistrationTimeout: true + description: Configurations to register probe with Turbo Server + properties: + registrationTimeoutSec: + default: 300 + description: Time in seconds to wait for registration response + from the Turbo Server + type: integer + restartOnRegistrationTimeout: + default: true + description: Restart probe container on registration timeout + type: boolean + type: object + serverMeta: + default: + turboServer: https://Turbo_server_URL + description: Configuration for Turbo Server + properties: + proxy: + description: Proxy server address + type: string + turboServer: + default: https://Turbo_server_URL + description: URL for Turbo Server endpoint + type: string + version: + description: Turbo Server major version + type: string + type: object + serviceAccountName: + default: turbo-user + description: The name of the service account name. Default is turbo-user + type: string + systemWorkloadDetectors: + default: + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + description: Flag system workloads such as those defined in kube-system, + openshift-system, etc. Kubeturbo will not generate actions for workloads + that match the supplied patterns + properties: + namespacePatterns: + default: + - kube-.* + - openshift-.* + - cattle.* + description: A list of regular expressions that match the namespace + names for system workloads + items: + type: string + type: array + type: object + targetConfig: + description: Optional target configuration + properties: + targetName: + type: string + type: object + wiremock: + default: + enabled: false + url: wiremock:8080 + description: WireMock mode configuration + properties: + enabled: + default: false + description: Enable WireMock mode + type: boolean + url: + default: wiremock:8080 + description: WireMock service URL + type: string + type: object + type: object + status: + description: KubeturboStatus defines the observed state of Kubeturbo + properties: + configHash: + description: Hash of the constructed turbo.config file + type: string + lastUpdatedTimestamp: + description: Timestamp of the last sync up + type: string + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Kubeturbo is the Schema for the kubeturbos API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KubeturboSpec defines the desired state of Kubeturbo + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: KubeturboStatus defines the observed state of Kubeturbo + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 diff --git a/deploy/kubeturbo_yamls/YAMLS_README.md b/deploy/kubeturbo_yamls/YAMLS_README.md new file mode 100644 index 0000000..aff6692 --- /dev/null +++ b/deploy/kubeturbo_yamls/YAMLS_README.md @@ -0,0 +1,26 @@ +**Documentation** is being maintained on the Wiki for this project. Visit the [Deploy Resources via yaml](https://github.com/turbonomic/kubeturbo/wiki/Yaml-Deployment-Details). Visit [Kubeturbo Wiki](https://github.com/turbonomic/kubeturbo/wiki) for the full documentation, examples and guides. + +## Kubeturbo Deploy via YAMLs + + +This document describes the resources you will create to deploy kubeturbo, and values you would want to change for your deployment. It is **strongly advised** you start with the sample yamls provided [here](https://github.com/turbonomic/kubeturbo/tree/master/deploy/kubeturbo_yamls). + +#### Resources Overview +**1.** Create a namespace + +Use an existing namespace, or create one where to deploy kubeturbo. The yaml examples all will use `turbo`. + +**2.** Create a service account, and add the role of cluster-admin + +Note: This cluster-admin role can be view only, which will allow for metrics but will not allow for actions to execute. For examples of a customized admin role narrowed to minimum resources and verbs, see the sample [turbo-admin.yaml](https://github.com/turbonomic/kubeturbo/blob/master/deploy/kubeturbo_yamls/turbo-admin.yaml). For a minimal admin with read only see [turbo-reader.yaml](https://github.com/turbonomic/kubeturbo/blob/master/deploy/kubeturbo_yamls/turbo-reader.yaml) + +**3.** Create a configMap for kubeturbo. Running CWOM? Refer to [Server Versions and Kubeturbo Tag Mappings](https://github.com/turbonomic/kubeturbo/wiki/Server-Versions-and-Kubeturbo-Tag-Mappings) for the mapping of CWOM to Turbo versions. +The ConfigMap serves two functions, depending on the kubeturbo image being used. +1. Defines how to connect to the Turbonomic Server. The Turbonomic Server instance details are defined under “communicationConfig”, and optionally what the cluster is identified as in the Turbo UI under “targetConfig”. +2. How to identify nodes by role and create HA policies. + +**4.** Create a deployment for kubeturbo. + + + +There's no place like home... go back to the [Turbonomic Overview](https://github.com/turbonomic/kubeturbo/wiki/Overview). diff --git a/deploy/kubeturbo_yamls/kubeturbo_full.yaml b/deploy/kubeturbo_yamls/kubeturbo_full.yaml new file mode 100644 index 0000000..c0d1ab2 --- /dev/null +++ b/deploy/kubeturbo_yamls/kubeturbo_full.yaml @@ -0,0 +1,237 @@ +apiVersion: v1 +kind: Namespace +metadata: + # turbo is default value used in the samples provided + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + # Update the namespace value if required + name: turbo-user + namespace: turbo +--- +#option to use secret for Turbo credentials +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # use this yaml to create a binding that will assign cluster-admin to your turbo ServiceAccount + # Provide a value for the binding name: and update namespace if needed + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-turbo + namespace: turbo +subjects: +- kind: ServiceAccount + # Provide the correct value for service account name: and namespace if needed + name: turbo-user + namespace: turbo +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole +# for other limited cluster admin roles, see samples provided + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ConfigMap +metadata: + # use this yaml to provide details kubeturbo will use to connect to the Turbo Server + # requires Turbo Server and kubeturbo pod 6.4.3 and higher + # Provide a value for the config name: and update namespace if needed + name: turbo-config + namespace: turbo +data: + # Update the values for version, turboServer, opsManagerUserName, opsManagerPassword + # For version, use Turbo Server Version, even when running CWOM + # The opsManagerUserName requires Turbo administrator role + # + # For targetConfig, targetName provides better group naming to identify k8s clusters in UI + # - If no targetConfig is specified, a default targetName will be created from the apiserver URL in + # the kubeconfig. + # - Specify a targetName only will register a probe with type Kubernetes-, as well as + # adding your cluster as a target with the name Kubernetes-. + # - Specify a targetType only will register a probe without adding your cluster as a target. + # The probe will appear as a Cloud Native probe in the UI with a type Kubernetes-. + # + # Define node groups by node role, and automatically enable placement policies to limit to 1 per host + # DaemonSets are identified by default. Use daemonPodDetectors to identify by name patterns using regex or by namespace. + # + # serverMeta.proxy format for authenticated and non-authenticated "http://username:password@proxyserver:proxyport or http://proxyserver:proxyport" + turbo-autoreload.config: |- + { + "logging": { + "level": 2 + }, + "nodePoolSize": { + "min": 1, + "max": 1000 + }, + "systemWorkloadDetectors": { + "namespacePatterns": ["kube-.*","openshift-.*","cattle.*"] + }, + "exclusionDetectors": { + "operatorControlledWorkloadsPatterns": [], + "operatorControlledNamespacePatterns": [] + }, + "daemonPodDetectors": { + "namespaces": [], + "podNamePatterns": [] + } + } + turbo.config: |- + { + "communicationConfig": { + "serverMeta": { + "version": "", + "turboServer": "" + }, + "restAPIConfig": { + "turbonomicCredentialsSecretName": "turbonomic-credentials" + } + }, + "targetConfig": { + "targetName":"" + }, + "HANodeConfig": { + "nodeRoles": [ "master"] + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + # use this yaml to deploy the kubeturbo pod + # Provide a value for the deploy/pod name: and update namespace if needed + name: kubeturbo + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kubeturbo + strategy: + type: Recreate + template: + metadata: + annotations: + kubeturbo.io/monitored: "false" + labels: + app.kubernetes.io/name: kubeturbo + spec: + # Update serviceAccount if needed + serviceAccount: turbo-user + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + containers: + - name: kubeturbo + # Replace the image version with matching Turbo Server version such as 8.13.0 + image: icr.io/cpopen/turbonomic/kubeturbo: + env: + - name: KUBETURBO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - --turboconfig=/etc/kubeturbo/turbo.config + - --v=2 + # Comment out the following two args if running in k8s 1.10 or older, or + # change to https=false and port=10255 if unsecure kubelet read only is configured + - --kubelet-https=true + - --kubelet-port=10250 + # Uncomment for pod moves in OpenShift + #- --scc-support=* + # Uncomment for pod moves with pvs + #- --fail-volume-pod-moves=false + # Uncomment to override default, and specify your own location + #- --busybox-image=docker.io/busybox + # or uncomment below to pull from RHCC + #- --busybox-image=registry.access.redhat.com/ubi9/ubi-minimal + # Uncomment to specify the secret name which holds the credentials to busybox image + #- --busybox-image-pull-secret= + # Specify nodes to exclude from cpu frequency getter job. + # Note kubernetes.io/os=windows and/or beta.kubernetes.io/os=windows labels will be automatically excluded by default. + # If specified all the labels will be used to select the node ignoring the default. + #- --cpufreq-job-exclude-node-labels=kubernetes.io/key=value + # The complete cpufreqgetter image uri used for fallback node cpu frequency getter job. + #- --cpufreqgetter-image=icr.io/cpopen/turbonomic/cpufreqgetter + # The name of the secret that stores the image pull credentials for cpufreqgetter image. + #- --cpufreqgetter-image-pull-secret= + # Uncomment to stitch using IP, or if using Openstack, Hyper-V/VMM + #- --stitch-uuid=false + # Uncomment to customize readiness retry threshold. Kubeturbo will try readiness-retry-threshold times before giving up. Default is 60. The retry interval is 10s. + #- --readiness-retry-threshold=60 + # Uncomment to disable the cleanup of the resources which are created by kubeturbo for the scc impersonation. + #- --cleanup-scc-impersonation-resources=false + # Uncomment to skip creating the resources the scc impersonation + #- --skip-creating-scc-impersonation-resources=true + # [ArgoCD integration] The email to be used to push changes to git + #- --git-email="" + # [ArgoCD integration] The username to be used to push changes to git + #- --git-username="" + # [ArgoCD integration] The name of the secret which holds the git credentials + #- --git-secret-name"" + # [ArgoCD integration] The namespace of the secret which holds the git credentials + #- --git-secret-namespace="" + # [ArgoCD integration] The commit mode that should be used for git action executions. One of {request|direct}. Defaults to direct + #- --git-commit-mode="" + volumeMounts: + # volume will be created, any name will work and must match below + - name: turbo-volume + mountPath: /etc/kubeturbo + readOnly: true + - name: turbonomic-credentials-volume + # This mount path cannot be changed + mountPath: /etc/turbonomic-credentials + readOnly: true + - name: varlog + mountPath: /var/log + volumes: + - name: turbo-volume + configMap: + # Update configMap name if needed + name: turbo-config + - name: turbonomic-credentials-volume + secret: + defaultMode: 420 + optional: true + # Update secret name if needed + secretName: turbonomic-credentials + - name: varlog + emptyDir: {} + restartPolicy: Always +--- diff --git a/deploy/kubeturbo_yamls/kubeturbo_least_admin_full.yaml b/deploy/kubeturbo_yamls/kubeturbo_least_admin_full.yaml new file mode 100644 index 0000000..60bf7a4 --- /dev/null +++ b/deploy/kubeturbo_yamls/kubeturbo_least_admin_full.yaml @@ -0,0 +1,373 @@ +apiVersion: v1 +kind: Namespace +metadata: + # turbo is default value used in the samples provided + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + # Update the namespace value if required + name: turbo-user + namespace: turbo +--- +#option to use secret for Turbo credentials +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: turbo-cluster-admin +rules: + - apiGroups: + - "" + - batch + resources: + - pods + - jobs + verbs: + - '*' + - apiGroups: + - "" + - apps + - apps.openshift.io + - extensions + - turbonomic.com # Need it for backward compatibility with ORM v1 + - devops.turbonomic.io + # API groups for ORM owners + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + - deployments + - replicasets + - replicationcontrollers + - statefulsets + - daemonsets + - deploymentconfigs + - resourcequotas + - operatorresourcemappings + - operatorresourcemappings/status + + # Resources for ORM owners + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + # - redis + # - xls + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + - apps + - batch + - extensions + - policy + - app.k8s.io + - argoproj.io + - apiextensions.k8s.io + - config.openshift.io + resources: + - nodes + - services + - endpoints + - namespaces + - limitranges + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - cronjobs + - applications + - customresourcedefinitions + - clusterversions + verbs: + - get + - list + - watch + - apiGroups: + - machine.openshift.io + resources: + - machines + - machinesets + verbs: + - get + - list + - update + - apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + - nodes/metrics + - nodes/proxy + - pods/log + verbs: + - get + - apiGroups: + - policy.turbonomic.io + resources: + - slohorizontalscales + - containerverticalscales + - policybindings + verbs: + - get + - list + - watch + # Need it for SCC impersonation + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - list + - use + # Need it for SCC impersonation + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - create # It should be commented out if the SCC resources created externally. + - delete # It should be commented out if the SCC resources created externally. + - impersonate + # Need it for SCC impersonation + # It should be commented out if the SCC resources created externally. + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - get + - create + - delete + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # use this yaml to create a binding that will assign cluster-admin to your turbo ServiceAccount + # Provide a value for the binding name: and update namespace if needed + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-turbo + namespace: turbo +subjects: +- kind: ServiceAccount + # Provide the correct value for service account name: and namespace if needed + name: turbo-user + namespace: turbo +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole +# for other limited cluster admin roles, see samples provided + name: turbo-cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ConfigMap +metadata: + # use this yaml to provide details kubeturbo will use to connect to the Turbo Server + # requires Turbo Server and kubeturbo pod 6.4.3 and higher + # Provide a value for the config name: and update namespace if needed + name: turbo-config + namespace: turbo +data: + # Update the values for version, turboServer, opsManagerUserName, opsManagerPassword + # For version, use Turbo Server Version, even when running CWOM + # The opsManagerUserName requires Turbo administrator role + # + # For targetConfig, targetName provides better group naming to identify k8s clusters in UI + # - If no targetConfig is specified, a default targetName will be created from the apiserver URL in + # the kubeconfig. + # - Specify a targetName only will register a probe with type Kubernetes-, as well as + # adding your cluster as a target with the name Kubernetes-. + # - Specify a targetType only will register a probe without adding your cluster as a target. + # The probe will appear as a Cloud Native probe in the UI with a type Kubernetes-. + # + # Define node groups by node role, and automatically enable placement policies to limit to 1 per host + # DaemonSets are identified by default. Use daemonPodDetectors to identify by name patterns using regex or by namespace. + # + # serverMeta.proxy format for authenticated and non-authenticated "http://username:password@proxyserver:proxyport or http://proxyserver:proxyport" + turbo-autoreload.config: |- + { + "logging": { + "level": 2 + }, + "nodePoolSize": { + "min": 1, + "max": 1000 + }, + "systemWorkloadDetectors": { + "namespacePatterns": ["kube-.*","openshift-.*","cattle.*"] + }, + "exclusionDetectors": { + "operatorControlledWorkloadsPatterns": [], + "operatorControlledNamespacePatterns": [] + }, + "daemonPodDetectors": { + "namespaces": [], + "podNamePatterns": [] + } + } + turbo.config: |- + { + "communicationConfig": { + "serverMeta": { + "version": "", + "turboServer": "" + }, + "restAPIConfig": { + "turbonomicCredentialsSecretName": "turbonomic-credentials" + } + }, + "targetConfig": { + "targetName":"" + }, + "HANodeConfig": { + "nodeRoles": [ "master"] + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + # use this yaml to deploy the kubeturbo pod + # Provide a value for the deploy/pod name: and update namespace if needed + name: kubeturbo + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kubeturbo + strategy: + type: Recreate + template: + metadata: + annotations: + kubeturbo.io/monitored: "false" + labels: + app.kubernetes.io/name: kubeturbo + spec: + # Update serviceAccount if needed + serviceAccount: turbo-user + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + containers: + - name: kubeturbo + # Replace the image version with matching Turbo Server version such as 8.13.0 + image: icr.io/cpopen/turbonomic/kubeturbo: + env: + - name: KUBETURBO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - --turboconfig=/etc/kubeturbo/turbo.config + - --v=2 + # Comment out the following two args if running in k8s 1.10 or older, or + # change to https=false and port=10255 if unsecure kubelet read only is configured + - --kubelet-https=true + - --kubelet-port=10250 + # Uncomment for pod moves in OpenShift + #- --scc-support=* + # Uncomment for pod moves with pvs + #- --fail-volume-pod-moves=false + # Uncomment to override default, and specify your own location + #- --busybox-image=docker.io/busybox + # or uncomment below to pull from RHCC + #- --busybox-image=registry.access.redhat.com/ubi9/ubi-minimal + # Uncomment to specify the secret name which holds the credentials to busybox image + #- --busybox-image-pull-secret= + # Specify nodes to exclude from cpu frequency getter job. + # Note kubernetes.io/os=windows and/or beta.kubernetes.io/os=windows labels will be automatically excluded by default. + # If specified all the labels will be used to select the node ignoring the default. + #- --cpufreq-job-exclude-node-labels=kubernetes.io/key=value + # The complete cpufreqgetter image uri used for fallback node cpu frequency getter job. + #- --cpufreqgetter-image=icr.io/cpopen/turbonomic/cpufreqgetter + # The name of the secret that stores the image pull credentials for cpufreqgetter image. + #- --cpufreqgetter-image-pull-secret= + # Uncomment to stitch using IP, or if using Openstack, Hyper-V/VMM + #- --stitch-uuid=false + # Uncomment to customize readiness retry threshold. Kubeturbo will try readiness-retry-threshold times before giving up. Default is 60. The retry interval is 10s. + #- --readiness-retry-threshold=60 + # Uncomment to disable the cleanup of the resources which are created by kubeturbo for the scc impersonation. + #- --cleanup-scc-impersonation-resources=false + # Uncomment to skip creating the resources the scc impersonation + #- --skip-creating-scc-impersonation-resources=true + # [ArgoCD integration] The email to be used to push changes to git + #- --git-email="" + # [ArgoCD integration] The username to be used to push changes to git + #- --git-username="" + # [ArgoCD integration] The name of the secret which holds the git credentials + #- --git-secret-name"" + # [ArgoCD integration] The namespace of the secret which holds the git credentials + #- --git-secret-namespace="" + # [ArgoCD integration] The commit mode that should be used for git action executions. One of {request|direct}. Defaults to direct + #- --git-commit-mode="" + volumeMounts: + # volume will be created, any name will work and must match below + - name: turbo-volume + mountPath: /etc/kubeturbo + readOnly: true + - name: turbonomic-credentials-volume + # This mount path cannot be changed + mountPath: /etc/turbonomic-credentials + readOnly: true + - name: varlog + mountPath: /var/log + volumes: + - name: turbo-volume + configMap: + # Update configMap name if needed + name: turbo-config + - name: turbonomic-credentials-volume + secret: + defaultMode: 420 + optional: true + # Update secret name if needed + secretName: turbonomic-credentials + - name: varlog + emptyDir: {} + restartPolicy: Always +--- diff --git a/deploy/kubeturbo_yamls/kubeturbo_namespace_turbo_credentials_secret.yaml b/deploy/kubeturbo_yamls/kubeturbo_namespace_turbo_credentials_secret.yaml new file mode 100644 index 0000000..12ea507 --- /dev/null +++ b/deploy/kubeturbo_yamls/kubeturbo_namespace_turbo_credentials_secret.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Namespace +metadata: + # turbo is default value used in the samples provided + name: turbo +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- diff --git a/deploy/kubeturbo_yamls/kubeturbo_reader_full.yaml b/deploy/kubeturbo_yamls/kubeturbo_reader_full.yaml new file mode 100644 index 0000000..3d7a266 --- /dev/null +++ b/deploy/kubeturbo_yamls/kubeturbo_reader_full.yaml @@ -0,0 +1,305 @@ +apiVersion: v1 +kind: Namespace +metadata: + # turbo is default value used in the samples provided + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + # Update the namespace value if required + name: turbo-user + namespace: turbo +--- +#option to use secret for Turbo credentials +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: turbo-cluster-reader +rules: + - apiGroups: + - "" + - apps + - app.k8s.io + - apps.openshift.io + - batch + - extensions + - turbonomic.com # Need it for backward compatibility with ORM v1 + - devops.turbonomic.io + - config.openshift.io + resources: + - nodes + - pods + - deployments + - replicasets + - replicationcontrollers + - services + - endpoints + - namespaces + - limitranges + - resourcequotas + - persistentvolumes + - persistentvolumeclaims + - applications + - jobs + - cronjobs + - statefulsets + - daemonsets + - deploymentconfigs + - operatorresourcemappings + - clusterversions + verbs: + - get + - watch + - list + - apiGroups: + - machine.openshift.io + resources: + - machines + - machinesets + verbs: + - get + - list + - apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + - nodes/metrics + - nodes/proxy + verbs: + - get + - apiGroups: + - policy.turbonomic.io + resources: + - slohorizontalscales + - containerverticalscales + - policybindings + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # use this yaml to create a binding that will assign cluster-admin to your turbo ServiceAccount + # Provide a value for the binding name: and update namespace if needed + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-turbo + namespace: turbo +subjects: +- kind: ServiceAccount + # Provide the correct value for service account name: and namespace if needed + name: turbo-user + namespace: turbo +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole +# for other limited cluster admin roles, see samples provided + name: turbo-cluster-reader + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ConfigMap +metadata: + # use this yaml to provide details kubeturbo will use to connect to the Turbo Server + # requires Turbo Server and kubeturbo pod 6.4.3 and higher + # Provide a value for the config name: and update namespace if needed + name: turbo-config + namespace: turbo +data: + # Update the values for version, turboServer, opsManagerUserName, opsManagerPassword + # For version, use Turbo Server Version, even when running CWOM + # The opsManagerUserName requires Turbo administrator role + # + # For targetConfig, targetName provides better group naming to identify k8s clusters in UI + # - If no targetConfig is specified, a default targetName will be created from the apiserver URL in + # the kubeconfig. + # - Specify a targetName only will register a probe with type Kubernetes-, as well as + # adding your cluster as a target with the name Kubernetes-. + # - Specify a targetType only will register a probe without adding your cluster as a target. + # The probe will appear as a Cloud Native probe in the UI with a type Kubernetes-. + # + # Define node groups by node role, and automatically enable placement policies to limit to 1 per host + # DaemonSets are identified by default. Use daemonPodDetectors to identify by name patterns using regex or by namespace. + # + # serverMeta.proxy format for authenticated and non-authenticated "http://username:password@proxyserver:proxyport or http://proxyserver:proxyport" + turbo-autoreload.config: |- + { + "logging": { + "level": 2 + }, + "nodePoolSize": { + "min": 1, + "max": 1000 + }, + "systemWorkloadDetectors": { + "namespacePatterns": ["kube-.*","openshift-.*","cattle.*"] + }, + "exclusionDetectors": { + "operatorControlledWorkloadsPatterns": [], + "operatorControlledNamespacePatterns": [] + }, + "daemonPodDetectors": { + "namespaces": [], + "podNamePatterns": [] + } + } + turbo.config: |- + { + "communicationConfig": { + "serverMeta": { + "version": "", + "turboServer": "" + }, + "restAPIConfig": { + "turbonomicCredentialsSecretName": "turbonomic-credentials" + } + }, + "targetConfig": { + "targetName":"" + }, + "HANodeConfig": { + "nodeRoles": [ "master"] + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + # use this yaml to deploy the kubeturbo pod + # Provide a value for the deploy/pod name: and update namespace if needed + name: kubeturbo + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kubeturbo + strategy: + type: Recreate + template: + metadata: + annotations: + kubeturbo.io/monitored: "false" + labels: + app.kubernetes.io/name: kubeturbo + spec: + # Update serviceAccount if needed + serviceAccount: turbo-user + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + containers: + - name: kubeturbo + # Replace the image version with matching Turbo Server version such as 8.13.0 + image: icr.io/cpopen/turbonomic/kubeturbo: + env: + - name: KUBETURBO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - --turboconfig=/etc/kubeturbo/turbo.config + - --v=2 + # Comment out the following two args if running in k8s 1.10 or older, or + # change to https=false and port=10255 if unsecure kubelet read only is configured + - --kubelet-https=true + - --kubelet-port=10250 + # Uncomment for pod moves in OpenShift + #- --scc-support=* + # Uncomment for pod moves with pvs + #- --fail-volume-pod-moves=false + # Uncomment to override default, and specify your own location + #- --busybox-image=docker.io/busybox + # or uncomment below to pull from RHCC + #- --busybox-image=registry.access.redhat.com/ubi9/ubi-minimal + # Uncomment to specify the secret name which holds the credentials to busybox image + #- --busybox-image-pull-secret= + # Specify nodes to exclude from cpu frequency getter job. + # Note kubernetes.io/os=windows and/or beta.kubernetes.io/os=windows labels will be automatically excluded by default. + # If specified all the labels will be used to select the node ignoring the default. + #- --cpufreq-job-exclude-node-labels=kubernetes.io/key=value + # The complete cpufreqgetter image uri used for fallback node cpu frequency getter job. + #- --cpufreqgetter-image=icr.io/cpopen/turbonomic/cpufreqgetter + # The name of the secret that stores the image pull credentials for cpufreqgetter image. + #- --cpufreqgetter-image-pull-secret= + # Uncomment to stitch using IP, or if using Openstack, Hyper-V/VMM + #- --stitch-uuid=false + # Uncomment to customize readiness retry threshold. Kubeturbo will try readiness-retry-threshold times before giving up. Default is 60. The retry interval is 10s. + #- --readiness-retry-threshold=60 + # Uncomment to disable the cleanup of the resources which are created by kubeturbo for the scc impersonation. + #- --cleanup-scc-impersonation-resources=false + # Uncomment to skip creating the resources the scc impersonation + #- --skip-creating-scc-impersonation-resources=true + # [ArgoCD integration] The email to be used to push changes to git + #- --git-email="" + # [ArgoCD integration] The username to be used to push changes to git + #- --git-username="" + # [ArgoCD integration] The name of the secret which holds the git credentials + #- --git-secret-name"" + # [ArgoCD integration] The namespace of the secret which holds the git credentials + #- --git-secret-namespace="" + # [ArgoCD integration] The commit mode that should be used for git action executions. One of {request|direct}. Defaults to direct + #- --git-commit-mode="" + volumeMounts: + # volume will be created, any name will work and must match below + - name: turbo-volume + mountPath: /etc/kubeturbo + readOnly: true + - name: turbonomic-credentials-volume + # This mount path cannot be changed + mountPath: /etc/turbonomic-credentials + readOnly: true + - name: varlog + mountPath: /var/log + volumes: + - name: turbo-volume + configMap: + # Update configMap name if needed + name: turbo-config + - name: turbonomic-credentials-volume + secret: + defaultMode: 420 + optional: true + # Update secret name if needed + secretName: turbonomic-credentials + - name: varlog + emptyDir: {} + restartPolicy: Always +--- diff --git a/deploy/kubeturbo_yamls/pasadena_kubeturbo.yaml b/deploy/kubeturbo_yamls/pasadena_kubeturbo.yaml new file mode 100644 index 0000000..c5d26da --- /dev/null +++ b/deploy/kubeturbo_yamls/pasadena_kubeturbo.yaml @@ -0,0 +1,126 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: iks +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: turbo-user + namespace: iks +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-iks + namespace: iks +subjects: +- kind: ServiceAccount + name: turbo-user + namespace: iks +roleRef: + kind: ClusterRole +# for other limited cluster admin roles, see samples provided + name: cluster-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: turbo-config + namespace: iks +data: + turbo.config: |- + { + "communicationConfig": { + "serverMeta": { + "version": "8.0", + "turboServer": "http://topology-processor:8080", + "proxy": "http://localhost:9004" + }, + "restAPIConfig": { + "turbonomicCredentialsSecretName": "turbonomic-credentials" + } + }, + "targetConfig": { + "targetName":"iks-cluster" + } + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubeturbo + namespace: iks +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kubeturbo + strategy: + type: Recreate + template: + metadata: + annotations: + kubeturbo.io/monitored: "false" + labels: + app.kubernetes.io/name: kubeturbo + spec: + serviceAccountName: turbo-user + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + containers: + - name: kubeturbo + # Replace the image with desired version:8.7.5 or snapshot version:8.7.5-SNAPSHOT from icr.io + image: icr.io/cpopen/turbonomic/kubeturbo:8.7.5 + args: + - --turboconfig=/etc/kubeturbo/turbo.config + - --v=2 + # Comment out the following two args if running in k8s 1.10 or older, or + # change to https=false and port=10255 if unsecure kubelet read only is configured + - --kubelet-https=true + - --kubelet-port=10250 + # Uncomment to stitch using IP, or if using Openstack, Hyper-V/VMM + #- --stitch-uuid=false + volumeMounts: + - name: turbo-volume + mountPath: /etc/kubeturbo + readOnly: true + - name: varlog + mountPath: /var/log + - name: iks-device-connector + image: dockerhub.cisco.com/cspg-docker/andromeda/pasadena:latest + volumeMounts: + - name: varlog + mountPath: /cisco/pasadena/logs + volumes: + - name: turbo-volume + configMap: + name: turbo-config + - name: varlog + emptyDir: {} + restartPolicy: Always diff --git a/deploy/kubeturbo_yamls/step1_turbo_namespace.yaml b/deploy/kubeturbo_yamls/step1_turbo_namespace.yaml new file mode 100644 index 0000000..1acd7fe --- /dev/null +++ b/deploy/kubeturbo_yamls/step1_turbo_namespace.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + # use this yaml to create a namespace where you will deploy kubeturbo. + # Provide a value for name: + # turbo is default value used in the samples provided + name: turbo diff --git a/deploy/kubeturbo_yamls/step2_turbo_serviceAccount_sample.yaml b/deploy/kubeturbo_yamls/step2_turbo_serviceAccount_sample.yaml new file mode 100644 index 0000000..a38f297 --- /dev/null +++ b/deploy/kubeturbo_yamls/step2_turbo_serviceAccount_sample.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + # use this yaml to create a service account to run kubeturbo. + # Provide a value for name: + # Update the namespace value if required + name: turbo-user + namespace: turbo diff --git a/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_admin_sample.yaml b/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_admin_sample.yaml new file mode 100644 index 0000000..8ee6594 --- /dev/null +++ b/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_admin_sample.yaml @@ -0,0 +1,22 @@ +kind: ClusterRoleBinding +# For OpenShift 3.4-3.7 use apiVersion: v1 +# For kubernetes 1.9 use rbac.authorization.k8s.io/v1 +# For kubernetes 1.8 use rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # use this yaml to create a binding that will assign cluster-admin to your turbo ServiceAccount + # Provide a value for the binding name: and update namespace if needed + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-turbo +subjects: +- kind: ServiceAccount + # Provide the correct value for service account name: and namespace if needed + name: turbo-user + namespace: turbo +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole +# accepted values turbo-cluster-reader and turbo-cluster-admin + name: cluster-admin + # For OpenShift v3.4 remove apiGroup line + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-admin_sample.yaml b/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-admin_sample.yaml new file mode 100644 index 0000000..a18fb5d --- /dev/null +++ b/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-admin_sample.yaml @@ -0,0 +1,22 @@ +kind: ClusterRoleBinding +# For OpenShift 3.4-3.7 use apiVersion: v1 +# For kubernetes 1.9 use rbac.authorization.k8s.io/v1 +# For kubernetes 1.8 use rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # use this yaml to create a binding that will assign cluster-admin to your turbo ServiceAccount + # Provide a value for the binding name: and update namespace if needed + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-turbo +subjects: +- kind: ServiceAccount + # Provide the correct value for service account name: and namespace if needed + name: turbo-user + namespace: turbo +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole +# accepted values turbo-cluster-reader and turbo-cluster-admin + name: turbo-cluster-admin + # For OpenShift v3.4 remove apiGroup line + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-reader_sample.yaml b/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-reader_sample.yaml new file mode 100644 index 0000000..59acc8f --- /dev/null +++ b/deploy/kubeturbo_yamls/step3_turbo_serviceAccountRoleBinding_turbo-cluster-reader_sample.yaml @@ -0,0 +1,22 @@ +kind: ClusterRoleBinding +# For OpenShift 3.4-3.7 use apiVersion: v1 +# For kubernetes 1.9 use rbac.authorization.k8s.io/v1 +# For kubernetes 1.8 use rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # use this yaml to create a binding that will assign cluster-admin to your turbo ServiceAccount + # Provide a value for the binding name: and update namespace if needed + # The name should be unique for Kubeturbo instance + name: turbo-all-binding-kubeturbo-turbo +subjects: +- kind: ServiceAccount + # Provide the correct value for service account name: and namespace if needed + name: turbo-user + namespace: turbo +roleRef: + # User creating this resource must have permissions to add this policy to the SA + kind: ClusterRole +# accepted values turbo-cluster-reader and turbo-cluster-admin + name: turbo-cluster-reader + # For OpenShift v3.4 remove apiGroup line + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/kubeturbo_yamls/step4_turbo_configMap_sample.yaml b/deploy/kubeturbo_yamls/step4_turbo_configMap_sample.yaml new file mode 100644 index 0000000..3e44717 --- /dev/null +++ b/deploy/kubeturbo_yamls/step4_turbo_configMap_sample.yaml @@ -0,0 +1,78 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + # use this yaml to create a config that kubeturbo will use to connect to the Turbo Server + # requires Turbo Server and kubeturbo pod 6.4.3 and higher + # Provide a value for the config name: and update namespace if needed + name: turbo-config + namespace: turbo +data: + # Update the values for version, turboServer, opsManagerUserName, opsManagerPassword + # For version, use Turbo Server Version, even when running CWOM + # The opsManagerUserName requires Turbo administrator role + # + # For targetConfig, targetName provides better group naming to identify k8s clusters in UI + # - If no targetConfig is specified, a default targetName will be created from the apiserver URL in + # the kubeconfig. + # - Specify a targetName only will register a probe with type Kubernetes-, as well as + # adding your cluster as a target with the name Kubernetes-. + # - Specify a targetType only will register a probe without adding your cluster as a target. + # The probe will appear as a Cloud Native probe in the UI with a type Kubernetes-. + # + # Define node groups by node role, and automatically enable placement policies to limit to 1 per host + # DaemonSets are identified by default. Use daemonPodDetectors to identify by name patterns using regex or by namespace. + # + # The annotationWhitelist provides a mechanism for discovering annotations for kubernetes objects. + # By default, no annotations are collected. In order to collect annotations, provide a regular + # expression for each entity type for which the annotations are desired. + # + # serverMeta.proxy format for authenticated and non-authenticated "http://username:password@proxyserver:proxyport or http://proxyserver:proxyport" + turbo.config: |- + { + "communicationConfig": { + "serverMeta": { + "version": "", + "turboServer": "https://" + }, + "restAPIConfig": { + "turbonomicCredentialsSecretName": "turbonomic-credentials" + }, + "sdkProtocolConfig": { + "registrationTimeoutSec": 300, + "restartOnRegistrationTimeout": true + } + }, + "targetConfig": { + "targetName": "whateverYouWant" + }, + "HANodeConfig": { + "nodeRoles": [ "master" ] + }, + "annotationWhitelist": { + "containerSpec": "", + "namespace": "", + "workloadController": "" + } + } + # Autoreload configuration will be applied without pod restart but takes about 1 minute to take effect + turbo-autoreload.config: |- + { + "logging": { + "level": 2 + }, + "nodePoolSize": { + "min": 1, + "max": 1000 + }, + "systemWorkloadDetectors": { + "namespacePatterns": ["kube-.*","openshift-.*","cattle.*"] + }, + "exclusionDetectors": { + "operatorControlledWorkloadsPatterns": [], + "operatorControlledNamespacePatterns": [] + }, + "daemonPodDetectors": { + "namespaces": [], + "podNamePatterns": [] + } + } diff --git a/deploy/kubeturbo_yamls/step5_turbo_kubeturboDeploy.yaml b/deploy/kubeturbo_yamls/step5_turbo_kubeturboDeploy.yaml new file mode 100644 index 0000000..cc064ef --- /dev/null +++ b/deploy/kubeturbo_yamls/step5_turbo_kubeturboDeploy.yaml @@ -0,0 +1,135 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + # use this yaml to deploy the kubeturbo pod + # Provide a value for the deploy/pod name: and update namespace if needed + name: kubeturbo + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kubeturbo + strategy: + type: Recreate + template: + metadata: + annotations: + kubeturbo.io/monitored: "false" + labels: + app.kubernetes.io/name: kubeturbo + spec: + # Update serviceAccount if needed + serviceAccountName: turbo-user + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + containers: + - name: kubeturbo + image: icr.io/cpopen/turbonomic/kubeturbo: + env: + - name: KUBETURBO_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - --turboconfig=/etc/kubeturbo/turbo.config + - --v=2 + # Comment out the following two args if running in k8s 1.10 or older, or + # change to https=false and port=10255 if unsecure kubelet read only is configured + - --kubelet-https=true + - --kubelet-port=10250 + # Uncomment for pod moves in OpenShift + #- --scc-support=* + # Uncomment for discovery interval in seconds, default value is 600 + #- --discovery-interval-sec=600 + # Uncomment for discovery sample interval in seconds to collect additional resource usage data samples from kubelet. This should be no smaller than 10 seconds. Default value is 60 + #- --discovery-sample-interval=60 + # Uncomment for the number of resource usage data samples to be collected from kubelet in each full discovery cycle. This should be no larger than 60. Default is 10 + #- --discovery-samples=10 + # Uncomment for discovery time out in seconds, default value is 180 + #- --discovery-timeout-sec=180 + # Uncomment for garbage collection interval in minutes, defaault is 10 + #- --garbage-collection-interval=10 + # Uncomment for the number of discovery workers. Default is 10 + #- --discovery-workers=10 + # Uncomment for pod moves with pvs + #- --fail-volume-pod-moves=false + # Uncomment to override default, and specify your own location + #- --busybox-image=docker.io/busybox + # or uncomment below to pull from RHCC + #- --busybox-image=registry.access.redhat.com/ubi9/ubi-minimal + # Uncomment to specify the secret name which holds the credentials to busybox image + #- --busybox-image-pull-secret= + # Specify nodes to exclude from cpu frequency getter job. + # Note kubernetes.io/os=windows and/or beta.kubernetes.io/os=windows labels will be automatically excluded by default. + # If specified all the labels will be used to select the node ignoring the default. + #- --cpufreq-job-exclude-node-labels=kubernetes.io/key=value + # The complete cpufreqgetter image uri used for fallback node cpu frequency getter job. + #- --cpufreqgetter-image=icr.io/cpopen/turbonomic/cpufreqgetter + # The name of the secret that stores the image pull credentials for cpufreqgetter image. + #- --cpufreqgetter-image-pull-secret= + # Uncomment to stitch using IP, or if using Openstack, Hyper-V/VMM + #- --stitch-uuid=false + # Uncomment to customize readiness retry threshold. Kubeturbo will try readiness-retry-threshold times before giving up. Default is 60. The retry interval is 10s. + #- --readiness-retry-threshold=60 + # Uncomment to disable the cleanup of the resources which are created by kubeturbo for the scc impersonation + #- --cleanup-scc-impersonation-resources=false + # Uncomment to skip creating the resources the scc impersonation + #- --skip-creating-scc-impersonation-resources=true + # [ArgoCD integration] The email to be used to push changes to git + #- --git-email="" + # [ArgoCD integration] The username to be used to push changes to git + #- --git-username="" + # [ArgoCD integration] The name of the secret which holds the git credentials + #- --git-secret-name"" + # [ArgoCD integration] The namespace of the secret which holds the git credentials + #- --git-secret-namespace="" + # [ArgoCD integration] The commit mode that should be used for git action executions. One of {request|direct}. Defaults to direct + #- --git-commit-mode="" + volumeMounts: + # volume will be created, any name will work and must match below + - name: turbo-volume + mountPath: /etc/kubeturbo + readOnly: true + - name: turbonomic-credentials-volume + mountPath: /etc/turbonomic-credentials + readOnly: true + - name: varlog + mountPath: /var/log + volumes: + - name: turbo-volume + configMap: + # Update configMap name if needed + name: turbo-config + - name: turbonomic-credentials-volume + secret: + defaultMode: 420 + optional: true + # Update secret name if needed + secretName: turbonomic-credentials + - name: varlog + emptyDir: {} + restartPolicy: Always diff --git a/deploy/kubeturbo_yamls/turbo-admin.yaml b/deploy/kubeturbo_yamls/turbo-admin.yaml new file mode 100644 index 0000000..cca7622 --- /dev/null +++ b/deploy/kubeturbo_yamls/turbo-admin.yaml @@ -0,0 +1,124 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: turbo-cluster-admin +rules: + - apiGroups: + - "" + - batch + resources: + - pods + - jobs + verbs: + - '*' + - apiGroups: + - "" + - apps + - apps.openshift.io + - extensions + - turbonomic.com + - devops.turbonomic.io + - redis.redis.opstreelabs.in + - charts.helm.k8s.io + resources: + - deployments + - replicasets + - replicationcontrollers + - statefulsets + - daemonsets + - deploymentconfigs + - resourcequotas + - operatorresourcemappings + - operatorresourcemappings/status + - redis + - xls + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - "" + - apps + - batch + - extensions + - policy + - app.k8s.io + - argoproj.io + - apiextensions.k8s.io + - config.openshift.io + resources: + - nodes + - services + - endpoints + - namespaces + - limitranges + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - cronjobs + - applications + - customresourcedefinitions + - clusterversions + verbs: + - get + - list + - watch + - apiGroups: + - machine.openshift.io + resources: + - machines + - machinesets + verbs: + - get + - list + - update + - apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + - nodes/metrics + - nodes/proxy + - pods/log + verbs: + - get + - apiGroups: + - policy.turbonomic.io + resources: + - slohorizontalscales + - containerverticalscales + - policybindings + verbs: + - get + - list + - watch + - apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - list + - use + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - create + - delete + - impersonate + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - get + - create + - delete + - update diff --git a/deploy/kubeturbo_yamls/turbo-reader.yaml b/deploy/kubeturbo_yamls/turbo-reader.yaml new file mode 100644 index 0000000..2323627 --- /dev/null +++ b/deploy/kubeturbo_yamls/turbo-reader.yaml @@ -0,0 +1,67 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: turbo-cluster-reader +rules: + - apiGroups: + - "" + - apps + - app.k8s.io + - apps.openshift.io + - batch + - extensions + - turbonomic.com + - devops.turbonomic.io + - config.openshift.io + resources: + - nodes + - pods + - deployments + - replicasets + - replicationcontrollers + - services + - endpoints + - namespaces + - limitranges + - resourcequotas + - persistentvolumes + - persistentvolumeclaims + - applications + - jobs + - cronjobs + - statefulsets + - daemonsets + - deploymentconfigs + - operatorresourcemappings + - clusterversions + verbs: + - get + - watch + - list + - apiGroups: + - machine.openshift.io + resources: + - machines + - machinesets + verbs: + - get + - list + - apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + - nodes/metrics + - nodes/proxy + verbs: + - get + - apiGroups: + - policy.turbonomic.io + resources: + - slohorizontalscales + - containerverticalscales + - policybindings + verbs: + - get + - list + - watch diff --git a/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_full.yaml b/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_full.yaml new file mode 100644 index 0000000..4787bb6 --- /dev/null +++ b/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_full.yaml @@ -0,0 +1,330 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: cluster-admin + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_least_admin_full.yaml b/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_least_admin_full.yaml new file mode 100644 index 0000000..44f284f --- /dev/null +++ b/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_least_admin_full.yaml @@ -0,0 +1,330 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: turbo-cluster-admin + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_reader_full.yaml b/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_reader_full.yaml new file mode 100644 index 0000000..49830b8 --- /dev/null +++ b/deploy/kubeturbo_yamls/turbo_kubeturbo_operator_reader_full.yaml @@ -0,0 +1,330 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: turbo +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeturbo-operator +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - nodes + - pods + - configmaps + - endpoints + - events + - deployments + - persistentvolumeclaims + - replicasets + - replicationcontrollers + - services + - secrets + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + - policy + resources: + - daemonsets + - endpoints + - limitranges + - namespaces + - persistentvolumes + - persistentvolumeclaims + - poddisruptionbudget + - resourcequotas + - services + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/spec + - nodes/stats + verbs: + - get +- apiGroups: + - charts.helm.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - '*' +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeturbo-operator +subjects: +- kind: ServiceAccount + name: kubeturbo-operator + namespace: turbo +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: kubeturbo-deploy + app.kubernetes.io/instance: kubeturbo-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: kubeturbo-deploy + name: kubeturbo-operator + name: kubeturbo-operator + namespace: turbo +spec: + replicas: 1 + selector: + matchLabels: + name: kubeturbo-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: kubeturbo-operator + labels: + name: kubeturbo-operator + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: icr.io/cpopen/kubeturbo-operator:8.15.1-SNAPSHOT + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: kubeturbo-operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: kubeturbo-operator + terminationGracePeriodSeconds: 10 +--- +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: +--- +apiVersion: charts.helm.k8s.io/v1 +kind: Kubeturbo +metadata: + labels: + app.kubernetes.io/name: kubeturbo + app.kubernetes.io/instance: kubeturbo-release + app.kubernetes.io/part-of: kubeturbo-deploy + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: kubeturbo-deploy + name: kubeturbo-release + namespace: turbo +spec: + serverMeta: + turboServer: "https://" + + restAPIConfig: + turbonomicCredentialsSecretName: turbonomic-credentials + + # Supply a targetName for user friendly identification of the k8s cluster + targetConfig: + targetName: + + # Specify custom turbo-cluster-reader or turbo-cluster-admin role instead of the default cluster-admin role + roleName: turbo-cluster-reader + + image: + repository: icr.io/cpopen/turbonomic/kubeturbo + tag: "" + # imagePullSecret: "" + # Uncomment to use an image from RHCC for cpu-frequency getter job - predefined in OCP Operator Hub version + # busyboxRepository: registry.access.redhat.com/ubi9/ubi-minimal + + # Assigning Kubeturbo to node, see + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + # + # kubeturboPodScheduling: + # nodeSelector: + # kubernetes.io/hostname: worker0 + # + # Or, use affinity: + # + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/hostname + # operator: In + # values: + # - worker1 + # + # Or, use taints and tolerations + # + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "mytaint" + # effect: "NoSchedule" + + # Configurations to register probe with Turbo Server + # sdkProtocolConfig: + # registrationTimeoutSec: 300 + # restartOnRegistrationTimeout: true + + # Uncomment out lines to configure HA Node to ESX policies by node role. Default is master + # Add more roles using format "\"foo\"\,\"bar\"" + # HANodeConfig: + # nodeRoles: "\"master\"" + + # Uncomment next lines to use dynamic logging level + # Changing this value does not require restart of Kubeturbo but takes about 1 minute to take effect + # logging: + # level: 2 + # nodePoolSize: + # min: 1 + # max: 1000 + + # Uncomment out to allow execution in OCP environments + #args: + # sccsupport: "*" + + # Uncomment out to specify kubeturbo container specifications when needed (quotas set on ns) + #resources: + # limits: + # memory: 4Gi + # cpu: "2" + # requests: + # memory: 512Mi + # cpu: "1" + + # Cluster Role rules for ORM owners. + # It's required when using ORM with ClusterRole 'turbo-cluster-admin'. + # It's recommended to use ORM with ClusterRole 'cluster-admin'. + ormOwners: + apiGroup: + # - redis.redis.opstreelabs.in + # - charts.helm.k8s.io + resources: + # - redis + # - xls + # Flag system workloads such as those defined in kube-system, openshift-system, etc. + # Kubeturbo will not generate actions for workloads that match the supplied patterns. + systemWorkloadDetectors: + # A list of regular expressions that match the namespace names for system workloads. + namespacePatterns: + - kube-.* + - openshift-.* + - cattle.* + # List operator-controlled workloads by name or namespace (using regular expressions) + # that should be excluded from the operator-controlled WorkloadController resize policy. + # By default, matching workloads will generate actions that are not in Recommend mode. + # exclusionDetectors: + # A list of regular expressions representing operator-controlled Workload Controllers. + # operatorControlledNamespacePatterns: + # - example-.* + # - .*-example + # A list of regular expressions representing namespaces containing operator-controlled + # Workload Controllers. + # operatorControlledWorkloadsPatterns: + # - .*-example.* diff --git a/deploy/kubeturbo_yamls/turbo_opsmgr_credentials_secret_sample.yaml b/deploy/kubeturbo_yamls/turbo_opsmgr_credentials_secret_sample.yaml new file mode 100644 index 0000000..1e3c655 --- /dev/null +++ b/deploy/kubeturbo_yamls/turbo_opsmgr_credentials_secret_sample.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: turbonomic-credentials + namespace: turbo +type: Opaque +data: + # username: + # password: + clientid: + clientsecret: diff --git a/deploy/tsc_operator_yamls/README b/deploy/tsc_operator_yamls/README new file mode 100644 index 0000000..507de21 --- /dev/null +++ b/deploy/tsc_operator_yamls/README @@ -0,0 +1,10 @@ +# Getting Started with the Turbonomic Secure Connect Operator + +## Installing the Turbonomic Secure Connect Operator without Operator Lifecycle Manager (OLM) +``` +export namespace=turbonomic +kubectl create ns $namespace +kubectl apply -f https://raw.githubusercontent.com/turbonomic/kubeturbo-deploy/staging/deploy/tsc_operator_yamls/operator-bundle.yaml +``` + +Note: the TSC operator yaml bundle [operator-yaml](https://raw.githubusercontent.com/turbonomic/kubeturbo-deploy/staging/deploy/tsc_operator_yamls/operator-bundle.yaml) is port from https://github.ibm.com/turbonomic/hybrid-saas-client-operator/blob/staging/deploy/operator_bundle.yaml \ No newline at end of file diff --git a/deploy/tsc_operator_yamls/operator-bundle.yaml b/deploy/tsc_operator_yamls/operator-bundle.yaml new file mode 100644 index 0000000..f16dde1 --- /dev/null +++ b/deploy/tsc_operator_yamls/operator-bundle.yaml @@ -0,0 +1,2318 @@ +### GENERATED VIA 'make operator-yaml' ### +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: turbonomicclients.clients.turbonomic.ibm.com +spec: + group: clients.turbonomic.ibm.com + names: + kind: TurbonomicClient + listKind: TurbonomicClientList + plural: turbonomicclients + shortNames: + - tc + singular: turbonomicclient + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.global.version + name: Version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TurbonomicClient is the Schema for the turbonomicclients API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TurbonomicClientSpec defines the desired state of TurbonomicClient + properties: + global: + description: Global defines the global state of TurbonomicClient + properties: + imagePullPolicy: + type: string + imagePullSecrets: + items: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + registry: + type: string + version: + type: string + required: + - version + type: object + kubeStateMetrics: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + kubeturbo: + properties: + manage: + type: boolean + turboServer: + type: string + type: object + probes: + properties: + actionScript: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + actionStreamKafka: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + appDynamics: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + appInsights: + description: DEPRECATED - no longer supported + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + bareMetal: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + compellent: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + datadog: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + dynatrace: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + flexera: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + hds: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + horizon: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + hpe3par: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + hyperFlex: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + hyperV: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + ibmStorageFlashSystem: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + instana: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + jBoss: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + jvm: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + mssql: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + mysql: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + netApp: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + newRelic: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + nutanix: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + oneView: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + oracle: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + powerVM: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + pure: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + scaleIO: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + serviceNow: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + snmp: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + tanium: + description: DEPRECATED - no longer supported + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + terraform: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + tomcat: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + ucs: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + ucsDirector: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + vCenter: + properties: + enabled: + type: boolean + mediationVCenter: + properties: + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + mediationVCenterBrowsing: + properties: + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + type: object + vmax: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + vmm: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + vplex: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + webLogic: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + webSphere: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + wmi: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + xen: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + xtremIO: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + javaComponentOptions: + type: string + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + type: object + properties: + properties: + remoteMediationAddress: + type: string + type: object + rsyslogCourier: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + tscSiteResources: + properties: + enabled: + type: boolean + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + tunnel: + properties: + configSync: + properties: + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + enabled: + type: boolean + router: + properties: + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + serviceController: + properties: + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + siteController: + properties: + image: + properties: + name: + type: string + registry: + type: string + tag: + type: string + type: object + imagePullPolicy: + type: string + type: object + type: object + required: + - global + type: object + status: + description: TurbonomicClientStatus defines the observed state of TurbonomicClient + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: versionmanagers.clients.turbonomic.ibm.com +spec: + group: clients.turbonomic.ibm.com + names: + kind: VersionManager + listKind: VersionManagerList + plural: versionmanagers + shortNames: + - vmgr + singular: versionmanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.version + name: Product Version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: VersionManager is the Schema for the versionmanagers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VersionManagerSpec defines the desired state of VersionManager + properties: + url: + type: string + type: object + status: + description: VersionManagerStatus defines the observed state of VersionManager + properties: + version: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: t8c-client-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: t8c-client-operator + name: t8c-client-operator-controller-manager + namespace: turbonomic +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: t8c-client-operator + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: role + app.kubernetes.io/part-of: t8c-client-operator + name: t8c-client-operator-leader-election-role + namespace: turbonomic +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + name: t8c-client-operator-manager-role + namespace: turbonomic +rules: +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - pods/exec + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - apps.openshift.io + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos + verbs: + - get + - list + - update + - watch +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - turbonomicclients + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - turbonomicclients/finalizers + verbs: + - update +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - turbonomicclients/status + verbs: + - get + - patch + - update +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - versionmanagers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - versionmanagers/finalizers + verbs: + - update +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - versionmanagers/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - watch +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: t8c-client-operator-manager-role +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - charts.helm.k8s.io + resources: + - kubeturbos + verbs: + - get + - list + - watch +- apiGroups: + - clients.turbonomic.ibm.com + resources: + - turbonomicclients + - versionmanagers + verbs: + - get + - list + - watch +- apiGroups: + - operators.coreos.com + resources: + - clusterserviceversions + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + verbs: + - create + - delete + - get + - list + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: t8c-client-operator + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: rolebinding + app.kubernetes.io/part-of: t8c-client-operator + name: t8c-client-operator-leader-election-rolebinding + namespace: turbonomic +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: t8c-client-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: t8c-client-operator-controller-manager + namespace: turbonomic +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: t8c-client-operator + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: rolebinding + app.kubernetes.io/part-of: t8c-client-operator + name: t8c-client-operator-manager-rolebinding + namespace: turbonomic +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: t8c-client-operator-manager-role +subjects: +- kind: ServiceAccount + name: t8c-client-operator-controller-manager + namespace: turbonomic +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: t8c-client-operator + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: t8c-client-operator + name: t8c-client-operator-manager-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: t8c-client-operator-manager-role +subjects: +- kind: ServiceAccount + name: t8c-client-operator-controller-manager + namespace: turbonomic +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: t8c-client-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: t8c-client-operator + control-plane: controller-manager + name: t8c-client-operator-controller-manager + namespace: turbonomic +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --leader-elect + command: + - /manager + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: icr.io/cpopen/t8c-client-operator:1.2.8 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: t8c-client-operator-controller-manager + terminationGracePeriodSeconds: 10 diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..bc6a823 --- /dev/null +++ b/go.mod @@ -0,0 +1,73 @@ +module github.ibm.com/turbonomic/kubeturbo-deploy + +go 1.21 + +require ( + github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/gomega v1.33.0 + k8s.io/api v0.28.3 + k8s.io/apimachinery v0.28.3 + k8s.io/client-go v0.28.3 + sigs.k8s.io/controller-runtime v0.16.3 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.25.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.20.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.28.3 // indirect + k8s.io/component-base v0.28.3 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..57f0121 --- /dev/null +++ b/go.sum @@ -0,0 +1,226 @@ +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= +github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= +k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= +k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= +k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= +k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= +k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= +k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= +k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..ff72ff2 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/internal/api/kubeturbo/reconciler.go b/internal/api/kubeturbo/reconciler.go new file mode 100644 index 0000000..aefa9d9 --- /dev/null +++ b/internal/api/kubeturbo/reconciler.go @@ -0,0 +1,753 @@ +package kubeturbo + +import ( + "context" + "encoding/json" + "fmt" + "hash/fnv" + "reflect" + "strings" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + kubeturbosv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/constants" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/utils" +) + +const ( + serviceAccountFinalizer = "helm.k8s.io/finalizer" +) + +type kubeturbo struct { + *KubeturboRequest + spec kubeturbosv1.KubeturboSpec + logger logr.Logger +} + +type block = utils.Block + +func Reconcile(ctx context.Context, client client.Client, scheme *runtime.Scheme, ktV1 *kubeturbosv1.Kubeturbo) error { + logger := log.FromContext(ctx).WithName("TearUp-cycle") + kr := NewKubeturboRequest(client, ctx, scheme, ktV1) + kt := kubeturbo{KubeturboRequest: kr, spec: kr.Cr.Spec, logger: logger} + return kt.reconcileKubeTurbo() +} + +func Teardown(ctx context.Context, client client.Client, scheme *runtime.Scheme, ktV1 *kubeturbosv1.Kubeturbo) error { + logger := log.FromContext(ctx).WithName("TearDown-cycle") + kr := NewKubeturboRequest(client, ctx, scheme, ktV1) + kt := kubeturbo{KubeturboRequest: kr, spec: kr.Cr.Spec, logger: logger} + return kt.cleanUpClusterResources() +} + +func (kt *kubeturbo) reconcileKubeTurbo() error { + return utils.ReturnOnError( + kt.createOrUpdateConfigMap, + kt.createOrUpdateServiceAccount, + kt.createOrUpdateClusterRole, + kt.createOrUpdateClusterRoleBinding, + kt.createOrUpdateDeployment, + kt.updateClusterResource, + ) +} + +func (kt *kubeturbo) deployment() *appsv1.Deployment { + return &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: kt.Name(), Namespace: kt.Namespace()}} +} + +func (kt *kubeturbo) createOrUpdateDeployment() error { + dep := kt.deployment() + kt.SetControllerReference(dep) + + // The Kubeturbo pod need to restart to loop in config updates + oldConfigMapHash := kt.Cr.Status.ConfigHash + newConfigMapHash, hashErr := kt.getKubeturboConfigHash() + if hashErr != nil { + return hashErr + } + + // race condition: When deploy get deleted by the previous request while + // the second reconcile cycle arrived between the deployment get deleted + // and CR status updates + if oldConfigMapHash != "" && oldConfigMapHash != newConfigMapHash { + // update CR hash to prevent infinity loop + kt.updateClusterResource() + + kt.logger.Info("Kubeturbo deploy needs to restart to pick up changes") + if err := kt.DeleteIfExists(kt.deployment()); err != nil { + return err + } + return constants.ErrRequeueOnDeletion + } + + _, err := kt.CreateOrUpdate(dep, func() error { + return kt.mutateDeployment(dep) + }) + return err +} + +func (kt *kubeturbo) mutateDeployment(dep *appsv1.Deployment) error { + labels := kt.labels() + + // If customer upgrade from helm operator to go-based operator, the labels under selector will be different. + // Since selector in a deployment is immutable, we will need to delete the deployment and recreate it. + if dep.Spec.Selector != nil && !reflect.DeepEqual(labels, dep.Spec.Selector.MatchLabels) { + if err := kt.DeleteIfExists(kt.deployment()); err != nil { + return err + } + return constants.ErrRequeueOnDeletion + } + + metadata := &dep.ObjectMeta + metadata.Labels = labels + + imagePullSecrets := make([]corev1.LocalObjectReference, 0, 1) + if kt.spec.Image.ImagePullSecret != nil { + imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{ + Name: *kt.spec.Image.ImagePullSecret, + }) + } + + imagePullPolicy := corev1.PullIfNotPresent + if kt.spec.Image.PullPolicy != nil { + imagePullPolicy = *kt.spec.Image.PullPolicy + } + + var resourceRequirements corev1.ResourceRequirements + if kt.spec.Resources != nil { + resourceRequirements = kt.spec.Resources.Internalize() + } + + dep.Spec = appsv1.DeploymentSpec{ + Replicas: kt.spec.ReplicaCount, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: kt.spec.Annotations, + Labels: labels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: kt.serviceAccountName(), + ImagePullSecrets: imagePullSecrets, + RestartPolicy: corev1.RestartPolicyAlways, + NodeSelector: kt.spec.KubeturboPodScheduling.NodeSelector, + Affinity: kt.spec.KubeturboPodScheduling.Affinity, + Tolerations: kt.spec.KubeturboPodScheduling.Tolerations, + Containers: []corev1.Container{ + { + Name: constants.KubeturboContainerName, + Env: []corev1.EnvVar{ + { + Name: "KUBETURBO_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + }, + Image: fmt.Sprint(kt.spec.Image.Repository, ":", *kt.spec.Image.Tag), + ImagePullPolicy: imagePullPolicy, + Args: kt.containerArgs(), + Resources: resourceRequirements, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "turbo-volume", + MountPath: "/etc/kubeturbo", + ReadOnly: true, + }, + { + Name: "turbonomic-credentials-volume", + MountPath: "/etc/turbonomic-credentials", + ReadOnly: true, + }, + { + Name: "varlog", + MountPath: "/var/log", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "turbo-volume", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: kt.configMap().Name, + }, + }, + }, + }, + { + Name: "turbonomic-credentials-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: kt.spec.RestAPIConfig.TurbonomicCredentialsSecretName, + Optional: utils.AsPtr(true), + DefaultMode: utils.AsPtr(int32(420)), + }, + }, + }, + { + Name: "varlog", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + }, + } + + return nil +} + +func (kt *kubeturbo) containerArgs() []string { + args := make([]string, 0, 25) + + ktArgs := kt.spec.Args + + args = append(args, "--turboconfig=/etc/kubeturbo/turbo.config") + if ktArgs.Logginglevel != nil { + args = append(args, fmt.Sprintf("--v=%d", *ktArgs.Logginglevel)) + } + if ktArgs.Kubelethttps != nil { + args = append(args, fmt.Sprintf("--kubelet-https=%t", *ktArgs.Kubelethttps)) + } + if ktArgs.Kubeletport != nil { + args = append(args, fmt.Sprintf("--kubelet-port=%d", *ktArgs.Kubeletport)) + } + if ktArgs.Sccsupport != nil { + args = append(args, fmt.Sprint("--scc-support=", *ktArgs.Sccsupport)) + } + if ktArgs.ReadinessRetryThreshold != nil { + args = append(args, fmt.Sprint("--readiness-retry-threshold=", *ktArgs.ReadinessRetryThreshold)) + } + if ktArgs.FailVolumePodMoves != nil { + args = append(args, fmt.Sprint("--fail-volume-pod-moves=", *ktArgs.FailVolumePodMoves)) + } + if kt.spec.Image.BusyboxRepository != nil { + args = append(args, fmt.Sprint("--busybox-image=", *kt.spec.Image.BusyboxRepository)) + } + if kt.spec.Image.ImagePullSecret != nil { + args = append(args, fmt.Sprint("--busybox-image-pull-secret=", *kt.spec.Image.ImagePullSecret)) + args = append(args, fmt.Sprint("--cpufreqgetter-image-pull-secret=", *kt.spec.Image.ImagePullSecret)) + } + if kt.spec.Image.CpufreqgetterRepository != nil { + args = append(args, fmt.Sprint("--cpufreqgetter-image=", *kt.spec.Image.CpufreqgetterRepository)) + } + if ktArgs.BusyboxExcludeNodeLabels != nil { + args = append(args, fmt.Sprint("--cpufreq-job-exclude-node-labels=", *ktArgs.BusyboxExcludeNodeLabels)) + } + if ktArgs.Stitchuuid != nil { + args = append(args, fmt.Sprintf("--stitch-uuid=%t", *ktArgs.Stitchuuid)) + } + if ktArgs.Pre16K8sVersion != nil && *ktArgs.Pre16K8sVersion { + args = append(args, "--k8sVersion=1.5") + } + if ktArgs.CleanupSccImpersonationResources != nil { + args = append(args, fmt.Sprintf("--cleanup-scc-impersonation-resources=%t", *ktArgs.CleanupSccImpersonationResources)) + } + if ktArgs.SkipCreatingSccImpersonationResources != nil { + args = append(args, fmt.Sprintf("--skip-creating-scc-impersonation-resources=%t", *ktArgs.SkipCreatingSccImpersonationResources)) + } + if ktArgs.GitEmail != nil { + args = append(args, fmt.Sprintf("--git-email=%s", *ktArgs.GitEmail)) + } + if ktArgs.GitUsername != nil { + args = append(args, fmt.Sprintf("--git-username=%s", *ktArgs.GitUsername)) + } + if ktArgs.GitSecretName != nil { + args = append(args, fmt.Sprintf("--git-secret-name=%s", *ktArgs.GitSecretName)) + } + if ktArgs.GitSecretNamespace != nil { + args = append(args, fmt.Sprintf("--git-secret-namespace=%s", *ktArgs.GitSecretNamespace)) + } + if ktArgs.GitCommitMode != nil { + args = append(args, fmt.Sprintf("--git-commit-mode=%s", *ktArgs.GitCommitMode)) + } + if ktArgs.SatelliteLocationProvider != nil { + args = append(args, fmt.Sprintf("--satellite-location-provider=%s", *ktArgs.SatelliteLocationProvider)) + } + if ktArgs.DiscoveryIntervalSec != nil { + args = append(args, fmt.Sprintf("--discovery-interval-sec=%d", *ktArgs.DiscoveryIntervalSec)) + } + if ktArgs.DiscoverySampleIntervalSec != nil { + args = append(args, fmt.Sprintf("--discovery-sample-interval=%d", *ktArgs.DiscoverySampleIntervalSec)) + } + if ktArgs.DiscoverySamples != nil { + args = append(args, fmt.Sprintf("--discovery-samples=%d", *ktArgs.DiscoverySamples)) + } + if ktArgs.DiscoveryTimeoutSec != nil { + args = append(args, fmt.Sprintf("--discovery-timeout-sec=%d", *ktArgs.DiscoveryTimeoutSec)) + } + if ktArgs.GarbageCollectionIntervalMin != nil { + args = append(args, fmt.Sprintf("--garbage-collection-interval=%d", *ktArgs.GarbageCollectionIntervalMin)) + } + if ktArgs.DiscoveryWorkers != nil { + args = append(args, fmt.Sprintf("--discovery-workers=%d", *ktArgs.DiscoveryWorkers)) + } + return args +} + +func (kt *kubeturbo) configMapName() string { + return fmt.Sprint("turbo-config", "-", kt.Name()) +} + +func (kt *kubeturbo) configMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: kt.configMapName(), Namespace: kt.Namespace()}} +} + +func (kt *kubeturbo) createOrUpdateConfigMap() error { + cm := kt.configMap() + kt.SetControllerReference(cm) + _, err := kt.CreateOrUpdate(cm, func() error { + return kt.mutateConfigMap(cm) + }) + return err +} + +func (kt *kubeturbo) mutateConfigMap(cm *corev1.ConfigMap) error { + // kubeturbo config + cByteString, cError := kt.buildKubeturboConfig() + if cError != nil { + return cError + } + + // dynamic config + dcByteString, dcError := kt.buildKubeturboDynamicConfig() + if dcError != nil { + return dcError + } + + labels := kt.labels() + cm.ObjectMeta.Labels = labels + cm.Data = map[string]string{ + "turbo.config": string(cByteString), + "turbo-autoreload.config": string(dcByteString), + } + + return nil +} + +func (kt *kubeturbo) getKubeturboConfigHash() (string, error) { + cByteString, err := kt.buildKubeturboConfig() + if err != nil { + return "", err + } + + hash := fnv.New64() + if _, err = hash.Write(cByteString); err != nil { + return "", err + } + + return fmt.Sprint(hash.Sum64()), nil +} + +func (kt *kubeturbo) buildKubeturboConfig() ([]byte, error) { + shortVersion := *kt.spec.ServerMeta.Version + + serverMeta := block{ + "version": shortVersion, + } + if kt.spec.ServerMeta.TurboServer != "" { + serverMeta["turboServer"] = kt.spec.ServerMeta.TurboServer + } + if kt.spec.ServerMeta.Proxy != nil { + serverMeta["proxy"] = *kt.spec.ServerMeta.Proxy + } + + commConfig := block{"serverMeta": serverMeta} + + restApiConfig := block{} + if kt.spec.RestAPIConfig.OpsManagerUserName != nil && kt.spec.RestAPIConfig.OpsManagerPassword != nil { + if kt.spec.RestAPIConfig.OpsManagerUserName != nil { + restApiConfig["opsManagerUserName"] = *kt.spec.RestAPIConfig.OpsManagerUserName + } + if kt.spec.RestAPIConfig.OpsManagerPassword != nil { + restApiConfig["opsManagerPassword"] = *kt.spec.RestAPIConfig.OpsManagerPassword + } + commConfig["restAPIConfig"] = restApiConfig + } + + sdkProtocolConfig := block{} + if kt.spec.SdkProtocolConfig.RegistrationTimeoutSec != nil || kt.spec.SdkProtocolConfig.RestartOnRegistrationTimeout != nil { + if kt.spec.SdkProtocolConfig.RegistrationTimeoutSec != nil { + sdkProtocolConfig["registrationTimeoutSec"] = *kt.spec.SdkProtocolConfig.RegistrationTimeoutSec + } + if kt.spec.SdkProtocolConfig.RestartOnRegistrationTimeout != nil { + sdkProtocolConfig["restartOnRegistrationTimeout"] = *kt.spec.SdkProtocolConfig.RestartOnRegistrationTimeout + } + commConfig["sdkProtocolConfig"] = sdkProtocolConfig + } + + // convert HANodeConfig from string to slice to strip the quotation marks + nodeRoles := strings.Split(kt.spec.HANodeConfig.NodeRoles, ",") + for i, nr := range nodeRoles { + if nr[0] == '"' && nr[len(nr)-1] == '"' { + nodeRoles[i] = nr[1 : len(nr)-1] + } + } + config := block{ + "communicationConfig": commConfig, + "HANodeConfig": block{ + "roles": nodeRoles, + }, + } + + if kt.spec.FeatureGates != nil && len(kt.spec.FeatureGates) > 0 { + config["featureGates"] = kt.spec.FeatureGates + } + + targetConfig := block{} + if kt.spec.TargetConfig.TargetName != nil { + targetConfig["targetName"] = *kt.spec.TargetConfig.TargetName + config["targetConfig"] = targetConfig + } + + hasAnnotationWhiteList := false + annotationWhiteList := block{} + if kt.spec.AnnotationWhitelist.ContainerSpec != nil { + hasAnnotationWhiteList = true + annotationWhiteList["containerSpec"] = *kt.spec.AnnotationWhitelist.ContainerSpec + } + if kt.spec.AnnotationWhitelist.Namespace != nil { + hasAnnotationWhiteList = true + annotationWhiteList["namespace"] = *kt.spec.AnnotationWhitelist.Namespace + } + if kt.spec.AnnotationWhitelist.WorkloadController != nil { + hasAnnotationWhiteList = true + annotationWhiteList["workloadController"] = *kt.spec.AnnotationWhitelist.WorkloadController + } + if hasAnnotationWhiteList { + config["annotationWhiteList"] = annotationWhiteList + } + + return json.MarshalIndent(config, "", " ") +} + +func (kt *kubeturbo) buildKubeturboDynamicConfig() ([]byte, error) { + config := block{} + + if kt.spec.Logging.Level != nil { + config["logging"] = *kt.spec.Logging.Level + } + + nodePoolSizeConfig := block{} + if kt.spec.NodePoolSize.Min != nil || kt.spec.NodePoolSize.Max != nil { + if kt.spec.NodePoolSize.Min != nil { + nodePoolSizeConfig["min"] = *kt.spec.NodePoolSize.Min + } + if kt.spec.NodePoolSize.Max != nil { + nodePoolSizeConfig["mac"] = *kt.spec.NodePoolSize.Max + } + config["nodePoolSize"] = nodePoolSizeConfig + } + + if kt.spec.SystemWorkloadDetectors.NamespacePatterns != nil { + config["systemWorkloadDetectors"] = block{ + "namespacePatterns": kt.spec.SystemWorkloadDetectors.NamespacePatterns, + } + } + + exclusionDetectorsConfigs := block{} + if kt.spec.ExclusionDetectors.OperatorControlledWorkloadsPatterns != nil || kt.spec.ExclusionDetectors.OperatorControlledNamespacePatterns != nil { + if kt.spec.ExclusionDetectors.OperatorControlledWorkloadsPatterns != nil { + exclusionDetectorsConfigs["operatorControlledWorkloadsPatterns"] = kt.spec.ExclusionDetectors.OperatorControlledWorkloadsPatterns + } + if kt.spec.ExclusionDetectors.OperatorControlledNamespacePatterns != nil { + exclusionDetectorsConfigs["operatorControlledNamespacePatterns"] = kt.spec.ExclusionDetectors.OperatorControlledNamespacePatterns + } + config["exclusionDetectors"] = exclusionDetectorsConfigs + } + + daemonPodDetectorsConfig := block{} + if kt.spec.DaemonPodDetectors.NamespacePatterns != nil || kt.spec.DaemonPodDetectors.PodNamePatterns != nil { + if kt.spec.DaemonPodDetectors.NamespacePatterns != nil { + daemonPodDetectorsConfig["operatorControlledWorkloadsPatterns"] = kt.spec.DaemonPodDetectors.NamespacePatterns + } + if kt.spec.DaemonPodDetectors.PodNamePatterns != nil { + daemonPodDetectorsConfig["operatorControlledNamespacePatterns"] = kt.spec.DaemonPodDetectors.PodNamePatterns + } + config["daemonPodDetectors"] = daemonPodDetectorsConfig + } + + discoveryConfig := block{} + if kt.spec.Discovery.ChunkSendDelayMillis != nil || kt.spec.Discovery.NumObjectsPerChunk != nil { + if kt.spec.Discovery.ChunkSendDelayMillis != nil { + discoveryConfig["chunkSendDelayMillis"] = *kt.spec.Discovery.ChunkSendDelayMillis + } + if kt.spec.Discovery.NumObjectsPerChunk != nil { + discoveryConfig["numObjectsPerChunk"] = *kt.spec.Discovery.NumObjectsPerChunk + } + config["discovery"] = discoveryConfig + } + + if kt.spec.Wiremock.Enabled != nil && kt.spec.Wiremock.URL != nil && *kt.spec.Wiremock.Enabled { + config["wiremock"] = block{ + "enabled": *kt.spec.Wiremock.Enabled, + "url": *kt.spec.Wiremock.URL, + } + } + + return json.MarshalIndent(config, "", " ") +} + +func (kt *kubeturbo) serviceAccountName() string { + return kt.spec.ServiceAccountName +} + +func (kt *kubeturbo) serviceAccount() *corev1.ServiceAccount { + return &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: kt.serviceAccountName(), Namespace: kt.Namespace()}} +} + +func (kt *kubeturbo) createOrUpdateServiceAccount() error { + sa := kt.serviceAccount() + kt.SetControllerReference(sa) + _, err := kt.CreateOrUpdate(sa, func() error { + return kt.mutateServiceAccount(sa, true) + }) + return err +} + +func (kt *kubeturbo) mutateServiceAccount(sa *corev1.ServiceAccount, addFinalizer bool) error { + sa.ObjectMeta.Labels = kt.labels() + if !addFinalizer { + kt.logger.Info("Remove finalizer from service account.") + sa.ObjectMeta.Finalizers = []string{} + } else if !controllerutil.ContainsFinalizer(sa, serviceAccountFinalizer) { + kt.logger.Info("Add finalizer to service account.") + sa.ObjectMeta.Finalizers = []string{serviceAccountFinalizer} + } + return nil +} + +func (kt *kubeturbo) clusterRoleName() string { + roleName := kt.spec.RoleName + if kt.spec.RoleName == kubeturbosv1.RoleTypeAdmin || kt.spec.RoleName == kubeturbosv1.RoleTypeReadOnly { + roleName = kt.spec.RoleName + "-" + kt.Name() + "-" + kt.Namespace() + } + return roleName +} + +func (kt *kubeturbo) clusterRole() *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: kt.clusterRoleName()}} +} + +func (kt *kubeturbo) createOrUpdateClusterRole() error { + + // if roleName is cluster-admin or any custom names other than "turbo-cluster-admin" or "turbo-cluster-reader", don't override it + if kt.spec.RoleName == kubeturbosv1.RoleTypeClusterAdmin || (kt.spec.RoleName != kubeturbosv1.RoleTypeAdmin && kt.spec.RoleName != kubeturbosv1.RoleTypeReadOnly) { + return nil + } + + cr := kt.clusterRole() + + _, err := kt.CreateOrUpdate(cr, func() error { + return kt.mutateClusterRole(cr) + }) + return err +} + +func (kt *kubeturbo) mutateClusterRole(cr *rbacv1.ClusterRole) error { + + cr.Labels = kt.labels() + + // turbo-cluster-reader + if kt.spec.RoleName == kubeturbosv1.RoleTypeReadOnly { + cr.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{"", "apps", "app.k8s.io", "apps.openshift.io", "batch", "extensions", "turbonomic.com", "devops.turbonomic.io", "config.openshift.io"}, + Resources: []string{ + // "" + "endpoints", "limitranges", "namespaces", "nodes", "persistentvolumeclaims", "persistentvolumes", "pods", "replicationcontrollers", "resourcequotas", "services", + // "apps" + "daemonsets", "deployments", "replicasets", "statefulsets", + // "app.k8s.io" + "applications", + // "apps.openshift.io" + "deploymentconfigs", + // "batch" + "jobs", "cronjobs", + // "turbonomic.com" + "operatorresourcemappings", "clusterversions", + }, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"machine.openshift.io"}, + Resources: []string{"machines", "machinesets"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes/spec", "nodes/stats", "nodes/metrics", "nodes/proxy"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{"policy.turbonomic.io"}, + Resources: []string{"slohorizontalscales", "containerverticalscales", "policybindings"}, + Verbs: []string{"get", "list", "watch"}, + }, + } + } else if kt.spec.RoleName == kubeturbosv1.RoleTypeAdmin { + // turbo-cluster-admin + cr.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{"", "batch"}, + Resources: []string{"pods", "jobs"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"", "apps", "apps.openshift.io", "extensions", "turbonomic.com", "devops.turbonomic.io", "redis.redis.opstreelabs.in", "charts.helm.k8s.io"}, + Resources: []string{"deployments", "replicasets", "replicationcontrollers", "statefulsets", "daemonsets", "deploymentconfigs", "resourcequotas", "operatorresourcemappings", "operatorresourcemappings/status", "redis", "xls"}, + Verbs: []string{"get", "list", "patch", "update", "watch"}, + }, + { + APIGroups: []string{"", "apps", "batch", "extensions", "policy", "app.k8s.io", "argoproj.io", "apiextensions.k8s.io", "config.openshift.io"}, + Resources: []string{"nodes", "services", "endpoints", "namespaces", "limitranges", "persistentvolumes", "persistentvolumeclaims", "poddisruptionbudget", "cronjobs", "applications", "customresourcedefinitions", "clusterversions"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"machine.openshift.io"}, + Resources: []string{"machines", "machinesets"}, + Verbs: []string{"get", "list", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes/spec", "nodes/stats", "nodes/metrics", "nodes/proxy", "pods/log"}, + Verbs: []string{"get"}, + }, + { + APIGroups: []string{"policy.turbonomic.io"}, + Resources: []string{"slohorizontalscales", "containerverticalscales", "policybindings"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"security.openshift.io"}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"list", "use"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"serviceaccounts"}, + Verbs: []string{"get", "create", "delete", "impersonate"}, + }, + { + APIGroups: []string{"rbac.authorization.k8s.io"}, + Resources: []string{"roles", "rolebindings", "clusterroles", "clusterrolebindings"}, + Verbs: []string{"get", "create", "delete", "update"}, + }, + } + } + + if kt.spec.OrmOwners.ApiGroup != nil && kt.spec.OrmOwners.Resources != nil { + cr.Rules = append(cr.Rules, rbacv1.PolicyRule{ + APIGroups: kt.spec.OrmOwners.ApiGroup, + Resources: kt.spec.OrmOwners.Resources, + Verbs: []string{"get", "list", "patch", "update", "watch"}, + }) + } + + return nil +} + +func (kt *kubeturbo) clusterRoleBinding() *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: kt.spec.RoleBinding + "-" + kt.Name() + "-" + kt.Namespace()}} +} + +func (kt *kubeturbo) createOrUpdateClusterRoleBinding() error { + crb := kt.clusterRoleBinding() + // TODO - this doesn't work on cluster-level resources + // kt.SetControllerReference(crb) + _, err := kt.CreateOrUpdate(crb, func() error { + return kt.mutateClusterRoleBinding(crb) + }) + return err +} + +func (kt *kubeturbo) mutateClusterRoleBinding(crb *rbacv1.ClusterRoleBinding) error { + // role ref cannot be updated in an existing role binding. Therefore, + // if role name is updated in the CR, delete the existing role binding before creating it + if crb.RoleRef.Name != "" && crb.RoleRef.Name != kt.clusterRoleName() { + if err := kt.DeleteIfExists(kt.clusterRoleBinding()); err != nil { + return err + } + return constants.ErrRequeueOnDeletion + } + crb.Labels = kt.labels() + + crb.Subjects = []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: kt.serviceAccount().Name, + Namespace: kt.Namespace(), + }, + } + + crb.RoleRef = rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: kt.clusterRoleName(), + APIGroup: rbacv1.GroupName, + } + + return nil +} + +func (kt *kubeturbo) updateClusterResource() error { + newConfigMapHash, hashErr := kt.getKubeturboConfigHash() + if hashErr != nil { + return hashErr + } + // update hash once the hash got changed + oldConfigHash := kt.Cr.Status.ConfigHash + if newConfigMapHash != oldConfigHash { + kt.Cr.Status.LastUpdatedTimestamp = time.Now().Format(time.RFC3339) + kt.Cr.Status.ConfigHash = newConfigMapHash + return kt.UpdateStatus() + } + return nil +} + +func (kt *kubeturbo) labels() map[string]string { + return utils.NewMapBuilder[string, string](). + PutAll(kt.ReleaseLabels()). + Put(constants.NameLabelKey, kt.Name()). + Put(constants.ComponentLabelKey, constants.KubeturboComponentType). + Build() +} + +func (kt *kubeturbo) cleanUpClusterResources() error { + //remove finalizer from service account + sa := kt.serviceAccount() + _, err := kt.CreateOrUpdate(sa, func() error { + return kt.mutateServiceAccount(sa, false) + }) + if err != nil { + return err + } + + // Only delete cluster role if it's turbo-cluster-admin or turbo-cluster-reader + if kt.spec.RoleName == kubeturbosv1.RoleTypeAdmin || kt.spec.RoleName == kubeturbosv1.RoleTypeReadOnly { + return kt.DeleteIfExists(kt.clusterRole(), kt.clusterRoleBinding()) + } else { + return kt.DeleteIfExists(kt.clusterRoleBinding()) + } +} diff --git a/internal/api/kubeturbo/request.go b/internal/api/kubeturbo/request.go new file mode 100644 index 0000000..9179934 --- /dev/null +++ b/internal/api/kubeturbo/request.go @@ -0,0 +1,59 @@ +package kubeturbo + +import ( + "context" + "time" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + kubeturbosv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" + consts "github.ibm.com/turbonomic/kubeturbo-deploy/internal/constants" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/request" +) + +type KubeturboRequest struct { + request.BaseRequest[*kubeturbosv1.Kubeturbo] +} + +func NewKubeturboRequest( + client client.Client, + ctx context.Context, + scheme *runtime.Scheme, + kt *kubeturbosv1.Kubeturbo, +) *KubeturboRequest { + + return &KubeturboRequest{ + BaseRequest: request.BaseRequest[*kubeturbosv1.Kubeturbo]{ + Cr: kt, + Client: client, + Context: ctx, + Scheme: scheme, + }, + } +} + +func (kr *KubeturboRequest) ReleaseLabels() map[string]string { + return map[string]string{ + consts.InstanceLabelKey: kr.Instance(), + consts.PartOfLabelKey: kr.Name(), + consts.ManagedByLabelKey: consts.OperatorName, + consts.CreatedByLabelKey: consts.OperatorName, + } +} + +func (kr *KubeturboRequest) RestartDeployment(dep *appsv1.Deployment) (err error) { + deployment := &appsv1.Deployment{} + err = kr.Client.Get(kr.Context, client.ObjectKeyFromObject(dep), deployment) + if err != nil { + return + } + podAnnotations := deployment.Spec.Template.ObjectMeta.Annotations + if podAnnotations == nil { + podAnnotations = make(map[string]string) + } + podAnnotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + err = kr.Update(deployment) + return +} diff --git a/internal/constants/constants.go b/internal/constants/constants.go new file mode 100644 index 0000000..fea75a5 --- /dev/null +++ b/internal/constants/constants.go @@ -0,0 +1,28 @@ +package constants + +import "errors" + +const ( + OperatorName = "kubeturbo-operator" + + // recommended Kubernetes labels + NameLabelKey = "app.kubernetes.io/name" + InstanceLabelKey = "app.kubernetes.io/instance" + PartOfLabelKey = "app.kubernetes.io/part-of" + ComponentLabelKey = "app.kubernetes.io/component" + ManagedByLabelKey = "app.kubernetes.io/managed-by" + CreatedByLabelKey = "app.kubernetes.io/created-by" + + KubeturboCRDName = "kubeturbos.charts.helm.k8s.io" + KubeturboContainerName = "kubeturbo" + KubeturboComponentType = "kubeturbo" + KubeturboAnnotation = "charts.helm.k8s.io/kubeturbo" + ControlGenAnnotation = "controller-gen.kubebuilder.io/version" + + KubeturboFinalizer = "helm.k8s.io/finalizer" + + RequeueDelaySeconds = 1 + TimeoutInSeconds = 5 +) + +var ErrRequeueOnDeletion = errors.New("resource deletion detected") diff --git a/internal/controller/kubeturbo_controller.go b/internal/controller/kubeturbo_controller.go new file mode 100644 index 0000000..1a529e9 --- /dev/null +++ b/internal/controller/kubeturbo_controller.go @@ -0,0 +1,207 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + "fmt" + "time" + + kubeturbosv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/api/kubeturbo" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/constants" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/reconcile" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// KubeturboReconciler reconciles a Kubeturbo object +type KubeturboReconciler struct { + client.Client + Scheme *runtime.Scheme + PostCheckDone *chan interface{} +} + +//+kubebuilder:rbac:groups=charts.helm.k8s.io,resources=kubeturbos,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=charts.helm.k8s.io,resources=kubeturbos/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=charts.helm.k8s.io,resources=kubeturbos/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Kubeturbo object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile +func (r *KubeturboReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // Block processing CR until all pre-checks completed + if r.PostCheckDone != nil { + <-*r.PostCheckDone + } + + logger := log.FromContext(ctx) + + logger.Info("Reconciling...") + defer logger.Info("Reconcile complete") + + var kt kubeturbosv1.Kubeturbo + err := r.Get(ctx, req.NamespacedName, &kt) + + // if failed to load to the Kubeturbo object + if err != nil { + if errors.IsNotFound(err) { + return reconcile.DoNotRequeue().Get() // Custom resource has been deleted + } + return reconcile.RequeueOnError(err).Get() + } + + // When the operator accidentally hit CR that is processed by the old CRD, + // there might be some fields missed default values, so we need to scan and + // patch fields default values that aren't bring up by the CR creation. + err = kt.SetSpecDefault() + if err != nil { + logger.Error(err, "") + return reconcile.DoNotRequeue().Get() + } + + // Ensures only the finalizer added by this opeartor is used by the CR + needToUpdateCR := false + for _, f := range kt.GetFinalizers() { + if f != constants.KubeturboFinalizer { + controllerutil.RemoveFinalizer(&kt, f) + needToUpdateCR = true + } + } + if !controllerutil.ContainsFinalizer(&kt, constants.KubeturboFinalizer) { + logger.Info("Patching finalizer to CR") + controllerutil.AddFinalizer(&kt, constants.KubeturboFinalizer) + needToUpdateCR = true + } + + // Check if the Kubeturbo instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + isKubeturboMarkedToBeDeleted := kt.GetDeletionTimestamp() != nil + if isKubeturboMarkedToBeDeleted { + if controllerutil.ContainsFinalizer(&kt, constants.KubeturboFinalizer) { + // Remove kubeturboFinalizer. Once all finalizers have been + // removed, the object will be deleted. + controllerutil.RemoveFinalizer(&kt, constants.KubeturboFinalizer) + err := r.Update(ctx, &kt) + if err != nil { + return reconcile.RequeueOnError(err).Get() + } + + if kt.Spec.Args.CleanupSccImpersonationResources != nil && *kt.Spec.Args.CleanupSccImpersonationResources { + //If cleanup scc flag is true, we need to wait for scc resources, pod to delete before removing + //service account. + r.waitForPodDeletion(ctx, req.NamespacedName, &kt) + } + //clean up ClusterResources, serviceaccount + if err = kubeturbo.Teardown(ctx, r.Client, r.Scheme, &kt); err != nil { + return reconcile.RequeueOnError(err).Get() + } + } + return reconcile.DoNotRequeue().Get() + } + + // Patch the existing CR with a specific finalizer, if needed + if needToUpdateCR { + logger.Info("Updating CR with finalizer") + jsonPatch, err := json.Marshal([]map[string]interface{}{ + { + "op": "replace", + "path": "/metadata/finalizers", + "value": kt.GetFinalizers(), + }, + }) + + // Patch only necessary field to minimize impact to reconcile loop + if err != nil { + return reconcile.RequeueOnError(err).Get() + } else if err := r.Patch(ctx, &kt, client.RawPatch(types.JSONPatchType, jsonPatch)); err != nil { + return reconcile.RequeueOnError(err).Get() + } + + // Do not requeue, since patching CR will trigger another reconcile cycle + return reconcile.DoNotRequeue().Get() + } + + // Only CR that patches with correct finalizer will reach to the reconcile cycle + if err := kubeturbo.Reconcile(ctx, r.Client, r.Scheme, &kt); err != nil { + // if race condition happened or on resource deletion, delay the requeue + if errors.IsConflict(err) || err == constants.ErrRequeueOnDeletion { + logger.Info(fmt.Sprintf("Warning: To avoid race condition, retry reconciliation process in %ds", constants.RequeueDelaySeconds)) + return reconcile.RequeueAfter(time.Duration(constants.RequeueDelaySeconds * time.Second)).Get() + } + return reconcile.RequeueOnError(err).Get() + } + + // if not error, terminate the current reconcile cycle + return reconcile.DoNotRequeue().Get() +} + +func (r *KubeturboReconciler) waitForPodDeletion(ctx context.Context, namespace types.NamespacedName, kt client.Object) { + logger := log.FromContext(ctx) + if err := wait.PollUntilContextTimeout(ctx, time.Second, constants.TimeoutInSeconds*time.Second, false, func(ctx context.Context) (bool, error) { + podList := r.GetPodByDeployment(ctx, kt.GetName(), namespace.Namespace) + if len(podList.Items) == 0 { + return true, nil + } + return false, nil + }); err != nil { + logger.Error(err, fmt.Sprintf("pod for cr: %s not deleted within timeout %d seconds", kt.GetName(), constants.TimeoutInSeconds)) + } + +} + +// SetupWithManager sets up the controller with the Manager. +func (r *KubeturboReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&kubeturbosv1.Kubeturbo{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + // won't work for cluster-level resources + Complete(r) +} + +func (r *KubeturboReconciler) GetPodByDeployment(ctx context.Context, deployName string, namespace string) corev1.PodList { + logger := log.FromContext(ctx) + podList := corev1.PodList{} + err := r.List(ctx, &podList, &client.ListOptions{ + Namespace: namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + "app.kubernetes.io/name": deployName, + }), + }) + if err != nil { + logger.Error(err, "") + } + return podList +} diff --git a/internal/controller/kubeturbo_controller_test.go b/internal/controller/kubeturbo_controller_test.go new file mode 100644 index 0000000..dcdbece --- /dev/null +++ b/internal/controller/kubeturbo_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + chartsv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" +) + +var _ = Describe("Kubeturbo Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + kubeturbo := &chartsv1.Kubeturbo{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Kubeturbo") + err := k8sClient.Get(ctx, typeNamespacedName, kubeturbo) + if err != nil && errors.IsNotFound(err) { + resource := &chartsv1.Kubeturbo{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &chartsv1.Kubeturbo{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Kubeturbo") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &KubeturboReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go new file mode 100644 index 0000000..00c5d4b --- /dev/null +++ b/internal/controller/suite_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + chartsv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = chartsv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/kubeturbo/annotator/annotator.go b/internal/kubeturbo/annotator/annotator.go new file mode 100644 index 0000000..fd225df --- /dev/null +++ b/internal/kubeturbo/annotator/annotator.go @@ -0,0 +1,70 @@ +package informer + +import ( + "context" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/constants" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/kubeturbo" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + logger = ctrl.Log.WithName("kubeturbo-annotator") +) + +type kubeturboAnnotator struct { + k8sclient client.Client + events chan interface{} +} + +func NewKubeturboAnnotator(k8sclient client.Client) *kubeturboAnnotator { + return &kubeturboAnnotator{ + k8sclient: k8sclient, + events: make(chan interface{}), + } +} + +func (k *kubeturboAnnotator) Submit(obj interface{}) { + k.events <- obj +} + +func (k *kubeturboAnnotator) Run(ctx context.Context) { + logger.Info("Kubeturbo annotator is running") + defer logger.Info("Kubeturbo annotator stopped") + for { + select { + case <-ctx.Done(): + close(k.events) + return + case event := <-k.events: + k.processEvent(ctx, event) + } + } +} + +func (k *kubeturboAnnotator) processEvent(ctx context.Context, event interface{}) { + logger.Info("Processing event") + defer logger.Info("Event processing complete") + + kt, ok := event.(*kubeturbo.Kubeturbo) + if !ok { + logger.Info("Event is not a Kubeturbo instance") + return + } + + logger := logger.WithValues("name", kt.Name, "namespace", kt.Namespace) + + if kt.Annotations == nil { + kt.Annotations = map[string]string{} + } + + kt.Annotations[constants.KubeturboAnnotation] = kt.GetName() + + if err := k.k8sclient.Update(ctx, kt); err != nil { + logger.Error(err, "an error occurred when updating the Kubeturbo resource") + return + } + + logger.Info("Kubeturbo resource successfully annotated") +} diff --git a/internal/kubeturbo/informer/informer.go b/internal/kubeturbo/informer/informer.go new file mode 100644 index 0000000..9a29a1c --- /dev/null +++ b/internal/kubeturbo/informer/informer.go @@ -0,0 +1,62 @@ +package informer + +import ( + "context" + "time" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/kubeturbo" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + ctrl "sigs.k8s.io/controller-runtime" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + logger = ctrl.Log.WithName("kubeturbo-informer") +) + +func NewKubeturboInformer( + client k8sclient.WithWatch, + namespace string, + ctx context.Context, + resyncPeriod time.Duration, + indexers cache.Indexers, +) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + logger := logger.WithValues("function", "ListFunc") + kts := &kubeturbo.KubeturboList{} + err := client.List(ctx, kts, k8sclient.InNamespace(namespace)) + + if meta.IsNoMatchError(err) { + logger.Info("Could not find Kubeturbo CRD. Will not watch Kubeturbo resources") + return kts, nil + } + + return kts, err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + logger := logger.WithValues("function", "WatchFunc") + w, err := client.Watch(ctx, &kubeturbo.KubeturboList{}, k8sclient.InNamespace(namespace)) + + if meta.IsNoMatchError(err) { + logger.Info("Could not find Kubeturbo CRD. Will not watch Kubeturbo resources") + return noopWatcher(), nil + } + + return w, err + }, + }, + &kubeturbo.Kubeturbo{}, + resyncPeriod, + indexers, + ) +} + +func noopWatcher() watch.Interface { + return watch.NewProxyWatcher(make(chan watch.Event)) +} diff --git a/internal/kubeturbo/types.go b/internal/kubeturbo/types.go new file mode 100644 index 0000000..f8e67a4 --- /dev/null +++ b/internal/kubeturbo/types.go @@ -0,0 +1,72 @@ +// +kubebuilder:skip +package kubeturbo + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + Kind = "Kubeturbo" + GroupVersion = schema.GroupVersion{Group: "charts.helm.k8s.io", Version: "v1"} + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + AddToScheme = SchemeBuilder.AddToScheme +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type Kubeturbo struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec `json:",omitempty"` +} + +type Spec struct { + Object map[string]any `json:"spec,inline"` +} + +func (in *Spec) DeepCopy() *Spec { + if in == nil { + return nil + } + out := new(Spec) + in.DeepCopyInto(out) + return out +} + +func (in *Spec) DeepCopyInto(out *Spec) { + *out = *in + out.Object = runtime.DeepCopyJSON(in.Object) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type KubeturboList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Kubeturbo `json:"items"` +} + +func (k *Spec) SetNestedField(value any, fields ...string) error { + k.initMapIfNil() + return unstructured.SetNestedField(k.Object, value, fields...) +} + +func (k *Spec) GetNestedString(fields ...string) (value string, found bool, err error) { + k.initMapIfNil() + value, found, err = unstructured.NestedString(k.Object, fields...) + return +} + +func (k *Spec) initMapIfNil() { + if k.Object == nil { + k.Object = make(map[string]any) + } +} + +func init() { + SchemeBuilder.Register(&Kubeturbo{}, &KubeturboList{}) +} diff --git a/internal/kubeturbo/zz_generated.deepcopy.go b/internal/kubeturbo/zz_generated.deepcopy.go new file mode 100644 index 0000000..fdc475a --- /dev/null +++ b/internal/kubeturbo/zz_generated.deepcopy.go @@ -0,0 +1,83 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package kubeturbo + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kubeturbo) DeepCopyInto(out *Kubeturbo) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kubeturbo. +func (in *Kubeturbo) DeepCopy() *Kubeturbo { + if in == nil { + return nil + } + out := new(Kubeturbo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Kubeturbo) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeturboList) DeepCopyInto(out *KubeturboList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Kubeturbo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeturboList. +func (in *KubeturboList) DeepCopy() *KubeturboList { + if in == nil { + return nil + } + out := new(KubeturboList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeturboList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 0000000..fcc9309 --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,45 @@ +package reconcile + +import ( + "time" + + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + emptyResult = ctrl.Result{} +) + +type ReconcileResult struct { + result ctrl.Result + err error + done bool +} + +func reconcileResult(result ctrl.Result, err error) ReconcileResult { + return ReconcileResult{ + result: result, + err: err, + done: true, + } +} + +func DoNotRequeue() ReconcileResult { + return reconcileResult(emptyResult, nil) +} + +func RequeueOnError(err error) ReconcileResult { + return reconcileResult(emptyResult, err) +} + +func RequeueAfter(duration time.Duration) ReconcileResult { + return reconcileResult(ctrl.Result{RequeueAfter: duration}, nil) +} + +func (r ReconcileResult) IsDone() bool { + return r.done +} + +func (r ReconcileResult) Get() (ctrl.Result, error) { + return r.result, r.err +} diff --git a/internal/request/request.go b/internal/request/request.go new file mode 100644 index 0000000..33d5465 --- /dev/null +++ b/internal/request/request.go @@ -0,0 +1,84 @@ +package request + +import ( + "context" + "fmt" + "strings" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/kubeturbo" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type BaseRequest[T client.Object] struct { + Cr T + Client client.Client + Context context.Context + Scheme *runtime.Scheme +} + +func (r *BaseRequest[T]) Namespace() string { + return r.Cr.GetNamespace() +} + +func (r *BaseRequest[T]) Name() string { + return r.Cr.GetName() +} + +func (r *BaseRequest[T]) Instance() string { + maxNameLength := validation.LabelValueMaxLength - len(r.Cr.GetUID()) - len("-") + crName := strings.TrimRight(r.Name()[:min(len(r.Name()), maxNameLength)], "-") + return fmt.Sprintf("%s-%s", crName, r.Cr.GetUID()) +} + +// sets the CR as the owner of the object +// object will be garbage collected when CR is deleted +// also, if the object type is being watched by the CR controller (see SetupWithManager function), +// the reconcilation loop will be triggered if the object is updated or deleted +func (r *BaseRequest[T]) SetControllerReference(obj metav1.Object) { + ctrl.SetControllerReference(r.Cr, obj, r.Scheme) +} + +func (r *BaseRequest[T]) CreateOrUpdate(obj client.Object, fn controllerutil.MutateFn) (controllerutil.OperationResult, error) { + return controllerutil.CreateOrUpdate(r.Context, r.Client, obj, fn) +} + +func (r *BaseRequest[T]) UpdateStatus() error { + return r.Client.Status().Update(r.Context, r.Cr) +} + +func (r *BaseRequest[T]) Update(obj client.Object) error { + return r.Client.Update(r.Context, obj) +} + +func (r *BaseRequest[T]) Patch(obj client.Object, fn controllerutil.MutateFn) error { + before := obj.DeepCopyObject() + patch := client.MergeFrom(before.(client.Object)) + if err := fn(); err != nil { + return err + } + + return r.Client.Patch(r.Context, obj, patch) +} + +func (r *BaseRequest[T]) GetKubeturbos() (kts kubeturbo.KubeturboList, err error) { + err = r.list(&kts) + return +} + +func (r *BaseRequest[T]) list(list client.ObjectList) error { + return r.Client.List(r.Context, list, client.InNamespace(r.Namespace())) +} + +func (r *BaseRequest[T]) DeleteIfExists(objs ...client.Object) error { + for _, obj := range objs { + if err := client.IgnoreNotFound(r.Client.Delete(r.Context, obj)); err != nil { + return err + } + } + return nil +} diff --git a/internal/runnable/CRDCheck.go b/internal/runnable/CRDCheck.go new file mode 100644 index 0000000..08a5ff5 --- /dev/null +++ b/internal/runnable/CRDCheck.go @@ -0,0 +1,205 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runnable + +import ( + "context" + "fmt" + "os" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/constants" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +// CRDCheck is a custom Runnable for post-start checks +type CRDCheck struct { + client.Client + CRDName string + Recorder record.EventRecorder + CRDCheckDone *chan interface{} +} + +var ( + logger = ctrl.Log.WithName("Post-start-check") +) + +// Start is the method called after mgr.Start +func (r *CRDCheck) Start(ctx context.Context) error { + logger.Info(fmt.Sprintf("Validation CRD %s...", r.CRDName)) + + deployment := GetOperatorDeployment(ctx, r) + if deployment == nil { + logger.Info("The operator is not running in the pod mode") + } + + // Check if the crd exists in the target cluster + // Populate the error to cause the operator exit with an expectation + if err := r.IsCrdExists(ctx); err != nil { + if deployment != nil { + r.Recorder.Event(deployment, "Warning", "CRDIssue", err.Error()) + } + return err + } + + // Check the existed crd satisfy the minimum requirements or not + // Exit the operator with a message to ask the client to update their existing CRD + if err := r.IsCrdUpToDate(ctx); err != nil { + if deployment != nil { + r.Recorder.Event(deployment, "Warning", "CRDIssue", err.Error()) + } + return err + } + + logger.Info(fmt.Sprintf("Validation CRD %s passed", r.CRDName)) + + if r.CRDCheckDone != nil { + close(*r.CRDCheckDone) + } + + return nil +} + +// Get the deployment object for the current pod container +func GetOperatorDeployment(ctx context.Context, c client.Client) *appsv1.Deployment { + operator_pod_name := getOsEnv("POD_NAME") + operator_pod_namespace := getOsEnv("WATCH_NAMESPACE") + + // if the pod name and the namespace is not set, then no need to find the deployment parent + if operator_pod_name == nil || operator_pod_namespace == nil { + return nil + } + + // Get the running pod instance of the current program + pod := &corev1.Pod{} + if err := c.Get(ctx, client.ObjectKey{ + Name: *operator_pod_name, + Namespace: *operator_pod_namespace, + }, pod); err != nil { + // If the pod of the running program is not found, + // it means the program is running as a local program. + // There's no need to get the parent deployment in this case + return nil + } + + // Get the owner reference + deployment_name := "" + for _, owner := range pod.OwnerReferences { + if owner.Kind == "ReplicaSet" { + // Fetch the ReplicaSet to get the Deployment name + rsName := owner.Name + rs := &appsv1.ReplicaSet{} + if err := c.Get(ctx, client.ObjectKey{ + Name: rsName, + Namespace: *operator_pod_namespace, + }, rs); err != nil { + logger.Error(err, "") + return nil + } + for _, owner := range rs.OwnerReferences { + if owner.Kind == "Deployment" { + fmt.Printf("Running in deployment: %s\n", owner.Name) + deployment_name = owner.Name + break + } + } + } + if deployment_name != "" { + break + } + } + + if deployment_name == "" { + return nil + } + + // Get the parent deployment object + deployment := &appsv1.Deployment{} + if err := c.Get(ctx, client.ObjectKey{ + Name: deployment_name, + Namespace: *operator_pod_namespace, + }, deployment); err != nil { + return nil + } + + return deployment +} + +// Using the unconstructed object to detect if the CRD exists or not +func (r *CRDCheck) IsCrdExists(ctx context.Context) error { + unstructuredObj := &unstructured.Unstructured{} + unstructuredObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apiextensions.k8s.io", + Version: "v1", + Kind: "CustomResourceDefinition", + }) + + // Fetch the target CRD as an unstructured object + if err := r.Get(ctx, client.ObjectKey{Name: r.CRDName}, unstructuredObj); err != nil { + return err + } + + logger.Info(fmt.Sprintf("CRD %s exists", r.CRDName)) + return nil +} + +// Check if the crd is not in the old version +func (r *CRDCheck) IsCrdUpToDate(ctx context.Context) error { + // Fetch the target as an CRD object + crd := &apiextensionsv1.CustomResourceDefinition{} + if err := r.Get(ctx, client.ObjectKey{Name: r.CRDName}, crd); err != nil { + return err + } + + // Check if the fetched CRD contains necessary fields to proceed + if r.CRDName == constants.KubeturboCRDName { + // Implementation of the Go-based operator using Operator SDK. + // Enhanced CRD definitions to improve overall readability and usability. + // 'controller-gen.kubebuilder.io/version' annotation in the CRD is the key + // factor to distinguish the older CRD verses the new one + // The following section is to verify if the existing Kubeturbo CRD satisfy the requirements. + foundAnnotation := false + for k := range crd.Annotations { + if k == constants.ControlGenAnnotation { + foundAnnotation = true + break + } + } + if !foundAnnotation { + return fmt.Errorf("since 8.14.3, kubeturbo operator has moved from helm operator to go based operator. Please refer to https://ibm.biz/KubeturboCRD to install and upgrade to the latest CRD") + } + } + + logger.Info(fmt.Sprintf("CRD %s meets the minimum version requirement", r.CRDName)) + return nil +} + +// Get global variable defined in OS +func getOsEnv(field string) *string { + val, found := os.LookupEnv(field) + if !found { + return nil + } + return &val +} diff --git a/internal/utils/errors.go b/internal/utils/errors.go new file mode 100644 index 0000000..5b8c8f1 --- /dev/null +++ b/internal/utils/errors.go @@ -0,0 +1,16 @@ +package utils + +// a no-arg function which returns an error +type ErrorFn func() error + +// This is a convenience function to facilitate error handling +// Use this when you need to execute a series of failable functions +// and return as soon as the first error occurs +func ReturnOnError(fns ...ErrorFn) error { + for _, f := range fns { + if err := f(); err != nil { + return err + } + } + return nil +} diff --git a/internal/utils/errors_test.go b/internal/utils/errors_test.go new file mode 100644 index 0000000..30f8d2e --- /dev/null +++ b/internal/utils/errors_test.go @@ -0,0 +1,68 @@ +package utils_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/utils" +) + +var _ = Describe("Errors", func() { + Describe("ReturnOnError", func() { + When("All functions return nil", func() { + It("Returns nil", func() { + + f1_invoked := false + f2_invoked := false + + f1 := func() error { + f1_invoked = true + return nil + } + + f2 := func() error { + f2_invoked = true + return nil + } + + result := utils.ReturnOnError(f1, f2) + + Expect(result).To(BeNil()) + Expect(f1_invoked).To(BeTrue()) + Expect(f2_invoked).To(BeTrue()) + }) + }) + + When("A function returns an error", func() { + It("Returns the error immediately", func() { + + f2_invoked := false + err := fmt.Errorf("An error occurred") + + f1 := func() error { + return err + } + + f2 := func() error { + f2_invoked = true + return nil + } + + result := utils.ReturnOnError(f1, f2) + + Expect(result).To(Equal(err)) + Expect(f2_invoked).To(BeFalse()) + }) + }) + + When("No functions are passed in", func() { + It("Returns nil", func() { + result := utils.ReturnOnError() + + Expect(result).To(BeNil()) + }) + }) + }) +}) diff --git a/internal/utils/json.go b/internal/utils/json.go new file mode 100644 index 0000000..b253ad0 --- /dev/null +++ b/internal/utils/json.go @@ -0,0 +1,3 @@ +package utils + +type Block = map[string]any diff --git a/internal/utils/mapbuilder.go b/internal/utils/mapbuilder.go new file mode 100644 index 0000000..b7e8d22 --- /dev/null +++ b/internal/utils/mapbuilder.go @@ -0,0 +1,33 @@ +package utils + +type MapBuilder[K comparable, V any] interface { + Build() map[K]V + PutAll(map[K]V) MapBuilder[K, V] + Put(K, V) MapBuilder[K, V] +} + +type mapBuilder[K comparable, V any] struct { + m map[K]V +} + +func NewMapBuilder[K comparable, V any]() MapBuilder[K, V] { + return &mapBuilder[K, V]{ + m: make(map[K]V), + } +} + +func (mb *mapBuilder[K, V]) Put(key K, value V) MapBuilder[K, V] { + mb.m[key] = value + return mb +} + +func (mb *mapBuilder[K, V]) PutAll(m map[K]V) MapBuilder[K, V] { + for key, value := range m { + mb.Put(key, value) + } + return mb +} + +func (mb *mapBuilder[K, V]) Build() map[K]V { + return mb.m +} diff --git a/internal/utils/mapbuilder_test.go b/internal/utils/mapbuilder_test.go new file mode 100644 index 0000000..ccbbb3a --- /dev/null +++ b/internal/utils/mapbuilder_test.go @@ -0,0 +1,60 @@ +package utils_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/utils" +) + +var _ = Describe("Mapbuilder", func() { + When("No values are supplied to the builder", func() { + It("Builds an empty map", func() { + m := utils.NewMapBuilder[string, string]().Build() + + Expect(m).To(Equal(map[string]string{})) + }) + }) + + When("Values are supplied to the builder using Put()", func() { + It("Builds a map containing the supplied values", func() { + m := utils.NewMapBuilder[string, int](). + Put("key1", 1). + Put("key2", 2). + Build() + + Expect(m).To(Equal(map[string]int{ + "key1": 1, + "key2": 2, + })) + }) + }) + + When("A key which already exists is supplied to Put()", func() { + It("Builds a map containing the most recent value for that key", func() { + m := utils.NewMapBuilder[string, int](). + Put("key1", 1). + Put("key1", 2). + Build() + + Expect(m).To(Equal(map[string]int{ + "key1": 2, + })) + }) + }) + + When("Values are supplied to the builder using PutAll()", func() { + It("Builds a map containing those values", func() { + m := utils.NewMapBuilder[string, int](). + PutAll(map[string]int{ + "key1": 1, + "key2": 2, + }). + Build() + + Expect(m).To(Equal(map[string]int{ + "key1": 1, + "key2": 2, + })) + }) + }) +}) diff --git a/internal/utils/pointers.go b/internal/utils/pointers.go new file mode 100644 index 0000000..c6088d6 --- /dev/null +++ b/internal/utils/pointers.go @@ -0,0 +1,7 @@ +package utils + +// Utility function to return a pointer to the function argument. +// Useful for creating in-line primitive pointers, e.g. AsPtr(123), AsPtr(true) +func AsPtr[T any](t T) *T { + return &t +} diff --git a/internal/utils/pointers_test.go b/internal/utils/pointers_test.go new file mode 100644 index 0000000..dad6c6f --- /dev/null +++ b/internal/utils/pointers_test.go @@ -0,0 +1,43 @@ +package utils_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "reflect" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/utils" +) + +type PointersTestStruct struct { + String string + Integer int +} + +var _ = Describe("Pointers", func() { + Describe("AsPtr", func() { + When("Argument is an integer", func() { + It("Returns a pointer to that integer", func() { + p := utils.AsPtr(123) + + Expect(reflect.TypeOf(p).Kind()).To(Equal(reflect.Ptr)) + Expect(*p).To(Equal(123)) + }) + }) + + When("Argument is a struct", func() { + It("Returns a pointer to that struct", func() { + + s := PointersTestStruct{ + String: "a string", + Integer: 123, + } + + p := utils.AsPtr(s) + + Expect(reflect.TypeOf(p).Kind()).To(Equal(reflect.Ptr)) + Expect(p).To(Equal(&s)) + }) + }) + }) +}) diff --git a/internal/utils/test/kubernetesclient/directives.go b/internal/utils/test/kubernetesclient/directives.go new file mode 100644 index 0000000..ff1cdc0 --- /dev/null +++ b/internal/utils/test/kubernetesclient/directives.go @@ -0,0 +1,4 @@ +package test + +//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -generate +//counterfeiter:generate sigs.k8s.io/controller-runtime/pkg/client.Client diff --git a/internal/utils/test/kubernetesclient/kubernetesclientfakes/fake_client.go b/internal/utils/test/kubernetesclient/kubernetesclientfakes/fake_client.go new file mode 100644 index 0000000..fe6dd89 --- /dev/null +++ b/internal/utils/test/kubernetesclient/kubernetesclientfakes/fake_client.go @@ -0,0 +1,1019 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package kubernetesclientfakes + +import ( + "context" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FakeClient struct { + CreateStub func(context.Context, client.Object, ...client.CreateOption) error + createMutex sync.RWMutex + createArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.CreateOption + } + createReturns struct { + result1 error + } + createReturnsOnCall map[int]struct { + result1 error + } + DeleteStub func(context.Context, client.Object, ...client.DeleteOption) error + deleteMutex sync.RWMutex + deleteArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + } + deleteReturns struct { + result1 error + } + deleteReturnsOnCall map[int]struct { + result1 error + } + DeleteAllOfStub func(context.Context, client.Object, ...client.DeleteAllOfOption) error + deleteAllOfMutex sync.RWMutex + deleteAllOfArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteAllOfOption + } + deleteAllOfReturns struct { + result1 error + } + deleteAllOfReturnsOnCall map[int]struct { + result1 error + } + GetStub func(context.Context, types.NamespacedName, client.Object, ...client.GetOption) error + getMutex sync.RWMutex + getArgsForCall []struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + arg4 []client.GetOption + } + getReturns struct { + result1 error + } + getReturnsOnCall map[int]struct { + result1 error + } + GroupVersionKindForStub func(runtime.Object) (schema.GroupVersionKind, error) + groupVersionKindForMutex sync.RWMutex + groupVersionKindForArgsForCall []struct { + arg1 runtime.Object + } + groupVersionKindForReturns struct { + result1 schema.GroupVersionKind + result2 error + } + groupVersionKindForReturnsOnCall map[int]struct { + result1 schema.GroupVersionKind + result2 error + } + IsObjectNamespacedStub func(runtime.Object) (bool, error) + isObjectNamespacedMutex sync.RWMutex + isObjectNamespacedArgsForCall []struct { + arg1 runtime.Object + } + isObjectNamespacedReturns struct { + result1 bool + result2 error + } + isObjectNamespacedReturnsOnCall map[int]struct { + result1 bool + result2 error + } + ListStub func(context.Context, client.ObjectList, ...client.ListOption) error + listMutex sync.RWMutex + listArgsForCall []struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + } + listReturns struct { + result1 error + } + listReturnsOnCall map[int]struct { + result1 error + } + PatchStub func(context.Context, client.Object, client.Patch, ...client.PatchOption) error + patchMutex sync.RWMutex + patchArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []client.PatchOption + } + patchReturns struct { + result1 error + } + patchReturnsOnCall map[int]struct { + result1 error + } + RESTMapperStub func() meta.RESTMapper + rESTMapperMutex sync.RWMutex + rESTMapperArgsForCall []struct { + } + rESTMapperReturns struct { + result1 meta.RESTMapper + } + rESTMapperReturnsOnCall map[int]struct { + result1 meta.RESTMapper + } + SchemeStub func() *runtime.Scheme + schemeMutex sync.RWMutex + schemeArgsForCall []struct { + } + schemeReturns struct { + result1 *runtime.Scheme + } + schemeReturnsOnCall map[int]struct { + result1 *runtime.Scheme + } + StatusStub func() client.SubResourceWriter + statusMutex sync.RWMutex + statusArgsForCall []struct { + } + statusReturns struct { + result1 client.SubResourceWriter + } + statusReturnsOnCall map[int]struct { + result1 client.SubResourceWriter + } + SubResourceStub func(string) client.SubResourceClient + subResourceMutex sync.RWMutex + subResourceArgsForCall []struct { + arg1 string + } + subResourceReturns struct { + result1 client.SubResourceClient + } + subResourceReturnsOnCall map[int]struct { + result1 client.SubResourceClient + } + UpdateStub func(context.Context, client.Object, ...client.UpdateOption) error + updateMutex sync.RWMutex + updateArgsForCall []struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + } + updateReturns struct { + result1 error + } + updateReturnsOnCall map[int]struct { + result1 error + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeClient) Create(arg1 context.Context, arg2 client.Object, arg3 ...client.CreateOption) error { + fake.createMutex.Lock() + ret, specificReturn := fake.createReturnsOnCall[len(fake.createArgsForCall)] + fake.createArgsForCall = append(fake.createArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.CreateOption + }{arg1, arg2, arg3}) + stub := fake.CreateStub + fakeReturns := fake.createReturns + fake.recordInvocation("Create", []interface{}{arg1, arg2, arg3}) + fake.createMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) CreateCallCount() int { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + return len(fake.createArgsForCall) +} + +func (fake *FakeClient) CreateCalls(stub func(context.Context, client.Object, ...client.CreateOption) error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = stub +} + +func (fake *FakeClient) CreateArgsForCall(i int) (context.Context, client.Object, []client.CreateOption) { + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + argsForCall := fake.createArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) CreateReturns(result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + fake.createReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) CreateReturnsOnCall(i int, result1 error) { + fake.createMutex.Lock() + defer fake.createMutex.Unlock() + fake.CreateStub = nil + if fake.createReturnsOnCall == nil { + fake.createReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.createReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Delete(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteOption) error { + fake.deleteMutex.Lock() + ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] + fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteOption + }{arg1, arg2, arg3}) + stub := fake.DeleteStub + fakeReturns := fake.deleteReturns + fake.recordInvocation("Delete", []interface{}{arg1, arg2, arg3}) + fake.deleteMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) DeleteCallCount() int { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + return len(fake.deleteArgsForCall) +} + +func (fake *FakeClient) DeleteCalls(stub func(context.Context, client.Object, ...client.DeleteOption) error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = stub +} + +func (fake *FakeClient) DeleteArgsForCall(i int) (context.Context, client.Object, []client.DeleteOption) { + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + argsForCall := fake.deleteArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) DeleteReturns(result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + fake.deleteReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) DeleteReturnsOnCall(i int, result1 error) { + fake.deleteMutex.Lock() + defer fake.deleteMutex.Unlock() + fake.DeleteStub = nil + if fake.deleteReturnsOnCall == nil { + fake.deleteReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) DeleteAllOf(arg1 context.Context, arg2 client.Object, arg3 ...client.DeleteAllOfOption) error { + fake.deleteAllOfMutex.Lock() + ret, specificReturn := fake.deleteAllOfReturnsOnCall[len(fake.deleteAllOfArgsForCall)] + fake.deleteAllOfArgsForCall = append(fake.deleteAllOfArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.DeleteAllOfOption + }{arg1, arg2, arg3}) + stub := fake.DeleteAllOfStub + fakeReturns := fake.deleteAllOfReturns + fake.recordInvocation("DeleteAllOf", []interface{}{arg1, arg2, arg3}) + fake.deleteAllOfMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) DeleteAllOfCallCount() int { + fake.deleteAllOfMutex.RLock() + defer fake.deleteAllOfMutex.RUnlock() + return len(fake.deleteAllOfArgsForCall) +} + +func (fake *FakeClient) DeleteAllOfCalls(stub func(context.Context, client.Object, ...client.DeleteAllOfOption) error) { + fake.deleteAllOfMutex.Lock() + defer fake.deleteAllOfMutex.Unlock() + fake.DeleteAllOfStub = stub +} + +func (fake *FakeClient) DeleteAllOfArgsForCall(i int) (context.Context, client.Object, []client.DeleteAllOfOption) { + fake.deleteAllOfMutex.RLock() + defer fake.deleteAllOfMutex.RUnlock() + argsForCall := fake.deleteAllOfArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) DeleteAllOfReturns(result1 error) { + fake.deleteAllOfMutex.Lock() + defer fake.deleteAllOfMutex.Unlock() + fake.DeleteAllOfStub = nil + fake.deleteAllOfReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) DeleteAllOfReturnsOnCall(i int, result1 error) { + fake.deleteAllOfMutex.Lock() + defer fake.deleteAllOfMutex.Unlock() + fake.DeleteAllOfStub = nil + if fake.deleteAllOfReturnsOnCall == nil { + fake.deleteAllOfReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.deleteAllOfReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Get(arg1 context.Context, arg2 types.NamespacedName, arg3 client.Object, arg4 ...client.GetOption) error { + fake.getMutex.Lock() + ret, specificReturn := fake.getReturnsOnCall[len(fake.getArgsForCall)] + fake.getArgsForCall = append(fake.getArgsForCall, struct { + arg1 context.Context + arg2 types.NamespacedName + arg3 client.Object + arg4 []client.GetOption + }{arg1, arg2, arg3, arg4}) + stub := fake.GetStub + fakeReturns := fake.getReturns + fake.recordInvocation("Get", []interface{}{arg1, arg2, arg3, arg4}) + fake.getMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) GetCallCount() int { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + return len(fake.getArgsForCall) +} + +func (fake *FakeClient) GetCalls(stub func(context.Context, types.NamespacedName, client.Object, ...client.GetOption) error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = stub +} + +func (fake *FakeClient) GetArgsForCall(i int) (context.Context, types.NamespacedName, client.Object, []client.GetOption) { + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + argsForCall := fake.getArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeClient) GetReturns(result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + fake.getReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) GetReturnsOnCall(i int, result1 error) { + fake.getMutex.Lock() + defer fake.getMutex.Unlock() + fake.GetStub = nil + if fake.getReturnsOnCall == nil { + fake.getReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.getReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) GroupVersionKindFor(arg1 runtime.Object) (schema.GroupVersionKind, error) { + fake.groupVersionKindForMutex.Lock() + ret, specificReturn := fake.groupVersionKindForReturnsOnCall[len(fake.groupVersionKindForArgsForCall)] + fake.groupVersionKindForArgsForCall = append(fake.groupVersionKindForArgsForCall, struct { + arg1 runtime.Object + }{arg1}) + stub := fake.GroupVersionKindForStub + fakeReturns := fake.groupVersionKindForReturns + fake.recordInvocation("GroupVersionKindFor", []interface{}{arg1}) + fake.groupVersionKindForMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) GroupVersionKindForCallCount() int { + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + return len(fake.groupVersionKindForArgsForCall) +} + +func (fake *FakeClient) GroupVersionKindForCalls(stub func(runtime.Object) (schema.GroupVersionKind, error)) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = stub +} + +func (fake *FakeClient) GroupVersionKindForArgsForCall(i int) runtime.Object { + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + argsForCall := fake.groupVersionKindForArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) GroupVersionKindForReturns(result1 schema.GroupVersionKind, result2 error) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = nil + fake.groupVersionKindForReturns = struct { + result1 schema.GroupVersionKind + result2 error + }{result1, result2} +} + +func (fake *FakeClient) GroupVersionKindForReturnsOnCall(i int, result1 schema.GroupVersionKind, result2 error) { + fake.groupVersionKindForMutex.Lock() + defer fake.groupVersionKindForMutex.Unlock() + fake.GroupVersionKindForStub = nil + if fake.groupVersionKindForReturnsOnCall == nil { + fake.groupVersionKindForReturnsOnCall = make(map[int]struct { + result1 schema.GroupVersionKind + result2 error + }) + } + fake.groupVersionKindForReturnsOnCall[i] = struct { + result1 schema.GroupVersionKind + result2 error + }{result1, result2} +} + +func (fake *FakeClient) IsObjectNamespaced(arg1 runtime.Object) (bool, error) { + fake.isObjectNamespacedMutex.Lock() + ret, specificReturn := fake.isObjectNamespacedReturnsOnCall[len(fake.isObjectNamespacedArgsForCall)] + fake.isObjectNamespacedArgsForCall = append(fake.isObjectNamespacedArgsForCall, struct { + arg1 runtime.Object + }{arg1}) + stub := fake.IsObjectNamespacedStub + fakeReturns := fake.isObjectNamespacedReturns + fake.recordInvocation("IsObjectNamespaced", []interface{}{arg1}) + fake.isObjectNamespacedMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeClient) IsObjectNamespacedCallCount() int { + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + return len(fake.isObjectNamespacedArgsForCall) +} + +func (fake *FakeClient) IsObjectNamespacedCalls(stub func(runtime.Object) (bool, error)) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = stub +} + +func (fake *FakeClient) IsObjectNamespacedArgsForCall(i int) runtime.Object { + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + argsForCall := fake.isObjectNamespacedArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) IsObjectNamespacedReturns(result1 bool, result2 error) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = nil + fake.isObjectNamespacedReturns = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeClient) IsObjectNamespacedReturnsOnCall(i int, result1 bool, result2 error) { + fake.isObjectNamespacedMutex.Lock() + defer fake.isObjectNamespacedMutex.Unlock() + fake.IsObjectNamespacedStub = nil + if fake.isObjectNamespacedReturnsOnCall == nil { + fake.isObjectNamespacedReturnsOnCall = make(map[int]struct { + result1 bool + result2 error + }) + } + fake.isObjectNamespacedReturnsOnCall[i] = struct { + result1 bool + result2 error + }{result1, result2} +} + +func (fake *FakeClient) List(arg1 context.Context, arg2 client.ObjectList, arg3 ...client.ListOption) error { + fake.listMutex.Lock() + ret, specificReturn := fake.listReturnsOnCall[len(fake.listArgsForCall)] + fake.listArgsForCall = append(fake.listArgsForCall, struct { + arg1 context.Context + arg2 client.ObjectList + arg3 []client.ListOption + }{arg1, arg2, arg3}) + stub := fake.ListStub + fakeReturns := fake.listReturns + fake.recordInvocation("List", []interface{}{arg1, arg2, arg3}) + fake.listMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) ListCallCount() int { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + return len(fake.listArgsForCall) +} + +func (fake *FakeClient) ListCalls(stub func(context.Context, client.ObjectList, ...client.ListOption) error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = stub +} + +func (fake *FakeClient) ListArgsForCall(i int) (context.Context, client.ObjectList, []client.ListOption) { + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + argsForCall := fake.listArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) ListReturns(result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + fake.listReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) ListReturnsOnCall(i int, result1 error) { + fake.listMutex.Lock() + defer fake.listMutex.Unlock() + fake.ListStub = nil + if fake.listReturnsOnCall == nil { + fake.listReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.listReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Patch(arg1 context.Context, arg2 client.Object, arg3 client.Patch, arg4 ...client.PatchOption) error { + fake.patchMutex.Lock() + ret, specificReturn := fake.patchReturnsOnCall[len(fake.patchArgsForCall)] + fake.patchArgsForCall = append(fake.patchArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 client.Patch + arg4 []client.PatchOption + }{arg1, arg2, arg3, arg4}) + stub := fake.PatchStub + fakeReturns := fake.patchReturns + fake.recordInvocation("Patch", []interface{}{arg1, arg2, arg3, arg4}) + fake.patchMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3, arg4...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) PatchCallCount() int { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + return len(fake.patchArgsForCall) +} + +func (fake *FakeClient) PatchCalls(stub func(context.Context, client.Object, client.Patch, ...client.PatchOption) error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = stub +} + +func (fake *FakeClient) PatchArgsForCall(i int) (context.Context, client.Object, client.Patch, []client.PatchOption) { + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + argsForCall := fake.patchArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 +} + +func (fake *FakeClient) PatchReturns(result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + fake.patchReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) PatchReturnsOnCall(i int, result1 error) { + fake.patchMutex.Lock() + defer fake.patchMutex.Unlock() + fake.PatchStub = nil + if fake.patchReturnsOnCall == nil { + fake.patchReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.patchReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) RESTMapper() meta.RESTMapper { + fake.rESTMapperMutex.Lock() + ret, specificReturn := fake.rESTMapperReturnsOnCall[len(fake.rESTMapperArgsForCall)] + fake.rESTMapperArgsForCall = append(fake.rESTMapperArgsForCall, struct { + }{}) + stub := fake.RESTMapperStub + fakeReturns := fake.rESTMapperReturns + fake.recordInvocation("RESTMapper", []interface{}{}) + fake.rESTMapperMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) RESTMapperCallCount() int { + fake.rESTMapperMutex.RLock() + defer fake.rESTMapperMutex.RUnlock() + return len(fake.rESTMapperArgsForCall) +} + +func (fake *FakeClient) RESTMapperCalls(stub func() meta.RESTMapper) { + fake.rESTMapperMutex.Lock() + defer fake.rESTMapperMutex.Unlock() + fake.RESTMapperStub = stub +} + +func (fake *FakeClient) RESTMapperReturns(result1 meta.RESTMapper) { + fake.rESTMapperMutex.Lock() + defer fake.rESTMapperMutex.Unlock() + fake.RESTMapperStub = nil + fake.rESTMapperReturns = struct { + result1 meta.RESTMapper + }{result1} +} + +func (fake *FakeClient) RESTMapperReturnsOnCall(i int, result1 meta.RESTMapper) { + fake.rESTMapperMutex.Lock() + defer fake.rESTMapperMutex.Unlock() + fake.RESTMapperStub = nil + if fake.rESTMapperReturnsOnCall == nil { + fake.rESTMapperReturnsOnCall = make(map[int]struct { + result1 meta.RESTMapper + }) + } + fake.rESTMapperReturnsOnCall[i] = struct { + result1 meta.RESTMapper + }{result1} +} + +func (fake *FakeClient) Scheme() *runtime.Scheme { + fake.schemeMutex.Lock() + ret, specificReturn := fake.schemeReturnsOnCall[len(fake.schemeArgsForCall)] + fake.schemeArgsForCall = append(fake.schemeArgsForCall, struct { + }{}) + stub := fake.SchemeStub + fakeReturns := fake.schemeReturns + fake.recordInvocation("Scheme", []interface{}{}) + fake.schemeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) SchemeCallCount() int { + fake.schemeMutex.RLock() + defer fake.schemeMutex.RUnlock() + return len(fake.schemeArgsForCall) +} + +func (fake *FakeClient) SchemeCalls(stub func() *runtime.Scheme) { + fake.schemeMutex.Lock() + defer fake.schemeMutex.Unlock() + fake.SchemeStub = stub +} + +func (fake *FakeClient) SchemeReturns(result1 *runtime.Scheme) { + fake.schemeMutex.Lock() + defer fake.schemeMutex.Unlock() + fake.SchemeStub = nil + fake.schemeReturns = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *FakeClient) SchemeReturnsOnCall(i int, result1 *runtime.Scheme) { + fake.schemeMutex.Lock() + defer fake.schemeMutex.Unlock() + fake.SchemeStub = nil + if fake.schemeReturnsOnCall == nil { + fake.schemeReturnsOnCall = make(map[int]struct { + result1 *runtime.Scheme + }) + } + fake.schemeReturnsOnCall[i] = struct { + result1 *runtime.Scheme + }{result1} +} + +func (fake *FakeClient) Status() client.SubResourceWriter { + fake.statusMutex.Lock() + ret, specificReturn := fake.statusReturnsOnCall[len(fake.statusArgsForCall)] + fake.statusArgsForCall = append(fake.statusArgsForCall, struct { + }{}) + stub := fake.StatusStub + fakeReturns := fake.statusReturns + fake.recordInvocation("Status", []interface{}{}) + fake.statusMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) StatusCallCount() int { + fake.statusMutex.RLock() + defer fake.statusMutex.RUnlock() + return len(fake.statusArgsForCall) +} + +func (fake *FakeClient) StatusCalls(stub func() client.SubResourceWriter) { + fake.statusMutex.Lock() + defer fake.statusMutex.Unlock() + fake.StatusStub = stub +} + +func (fake *FakeClient) StatusReturns(result1 client.SubResourceWriter) { + fake.statusMutex.Lock() + defer fake.statusMutex.Unlock() + fake.StatusStub = nil + fake.statusReturns = struct { + result1 client.SubResourceWriter + }{result1} +} + +func (fake *FakeClient) StatusReturnsOnCall(i int, result1 client.SubResourceWriter) { + fake.statusMutex.Lock() + defer fake.statusMutex.Unlock() + fake.StatusStub = nil + if fake.statusReturnsOnCall == nil { + fake.statusReturnsOnCall = make(map[int]struct { + result1 client.SubResourceWriter + }) + } + fake.statusReturnsOnCall[i] = struct { + result1 client.SubResourceWriter + }{result1} +} + +func (fake *FakeClient) SubResource(arg1 string) client.SubResourceClient { + fake.subResourceMutex.Lock() + ret, specificReturn := fake.subResourceReturnsOnCall[len(fake.subResourceArgsForCall)] + fake.subResourceArgsForCall = append(fake.subResourceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.SubResourceStub + fakeReturns := fake.subResourceReturns + fake.recordInvocation("SubResource", []interface{}{arg1}) + fake.subResourceMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) SubResourceCallCount() int { + fake.subResourceMutex.RLock() + defer fake.subResourceMutex.RUnlock() + return len(fake.subResourceArgsForCall) +} + +func (fake *FakeClient) SubResourceCalls(stub func(string) client.SubResourceClient) { + fake.subResourceMutex.Lock() + defer fake.subResourceMutex.Unlock() + fake.SubResourceStub = stub +} + +func (fake *FakeClient) SubResourceArgsForCall(i int) string { + fake.subResourceMutex.RLock() + defer fake.subResourceMutex.RUnlock() + argsForCall := fake.subResourceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeClient) SubResourceReturns(result1 client.SubResourceClient) { + fake.subResourceMutex.Lock() + defer fake.subResourceMutex.Unlock() + fake.SubResourceStub = nil + fake.subResourceReturns = struct { + result1 client.SubResourceClient + }{result1} +} + +func (fake *FakeClient) SubResourceReturnsOnCall(i int, result1 client.SubResourceClient) { + fake.subResourceMutex.Lock() + defer fake.subResourceMutex.Unlock() + fake.SubResourceStub = nil + if fake.subResourceReturnsOnCall == nil { + fake.subResourceReturnsOnCall = make(map[int]struct { + result1 client.SubResourceClient + }) + } + fake.subResourceReturnsOnCall[i] = struct { + result1 client.SubResourceClient + }{result1} +} + +func (fake *FakeClient) Update(arg1 context.Context, arg2 client.Object, arg3 ...client.UpdateOption) error { + fake.updateMutex.Lock() + ret, specificReturn := fake.updateReturnsOnCall[len(fake.updateArgsForCall)] + fake.updateArgsForCall = append(fake.updateArgsForCall, struct { + arg1 context.Context + arg2 client.Object + arg3 []client.UpdateOption + }{arg1, arg2, arg3}) + stub := fake.UpdateStub + fakeReturns := fake.updateReturns + fake.recordInvocation("Update", []interface{}{arg1, arg2, arg3}) + fake.updateMutex.Unlock() + if stub != nil { + return stub(arg1, arg2, arg3...) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *FakeClient) UpdateCallCount() int { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + return len(fake.updateArgsForCall) +} + +func (fake *FakeClient) UpdateCalls(stub func(context.Context, client.Object, ...client.UpdateOption) error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = stub +} + +func (fake *FakeClient) UpdateArgsForCall(i int) (context.Context, client.Object, []client.UpdateOption) { + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + argsForCall := fake.updateArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeClient) UpdateReturns(result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + fake.updateReturns = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) UpdateReturnsOnCall(i int, result1 error) { + fake.updateMutex.Lock() + defer fake.updateMutex.Unlock() + fake.UpdateStub = nil + if fake.updateReturnsOnCall == nil { + fake.updateReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.updateReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakeClient) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.createMutex.RLock() + defer fake.createMutex.RUnlock() + fake.deleteMutex.RLock() + defer fake.deleteMutex.RUnlock() + fake.deleteAllOfMutex.RLock() + defer fake.deleteAllOfMutex.RUnlock() + fake.getMutex.RLock() + defer fake.getMutex.RUnlock() + fake.groupVersionKindForMutex.RLock() + defer fake.groupVersionKindForMutex.RUnlock() + fake.isObjectNamespacedMutex.RLock() + defer fake.isObjectNamespacedMutex.RUnlock() + fake.listMutex.RLock() + defer fake.listMutex.RUnlock() + fake.patchMutex.RLock() + defer fake.patchMutex.RUnlock() + fake.rESTMapperMutex.RLock() + defer fake.rESTMapperMutex.RUnlock() + fake.schemeMutex.RLock() + defer fake.schemeMutex.RUnlock() + fake.statusMutex.RLock() + defer fake.statusMutex.RUnlock() + fake.subResourceMutex.RLock() + defer fake.subResourceMutex.RUnlock() + fake.updateMutex.RLock() + defer fake.updateMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeClient) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ client.Client = new(FakeClient) diff --git a/internal/utils/utils.go b/internal/utils/utils.go new file mode 100644 index 0000000..127c3c1 --- /dev/null +++ b/internal/utils/utils.go @@ -0,0 +1,37 @@ +package utils + +import ( + "fmt" + "os" + + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + DefaultKubeturboVersionEnvVar = "DEFAULT_KUBETURBO_VERSION" +) + +var ( + staticLogger = ctrl.Log.WithName("Operator-static") +) + +func GetDefaultKubeturboVersion() (string, error) { + // Env var DEFAULT_KUBETURBO_VERSION specifies the default Kubeturbo version that + // the operator should use when the client doesn't specify the version + version, found := os.LookupEnv(DefaultKubeturboVersionEnvVar) + if !found { + return "", fmt.Errorf("%s must be set", DefaultKubeturboVersionEnvVar) + } + staticLogger.Info(fmt.Sprintf("DEFAULT_KUBETURBO_VERSION=%s", version)) + return version, nil +} + +// StringInSlice checks if a string is in a slice of strings +func StringInSlice(str string, list []string) bool { + for _, item := range list { + if item == str { + return true + } + } + return false +} diff --git a/internal/utils/utils_suite_test.go b/internal/utils/utils_suite_test.go new file mode 100644 index 0000000..9ca82ff --- /dev/null +++ b/internal/utils/utils_suite_test.go @@ -0,0 +1,13 @@ +package utils_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Utils Suite") +} diff --git a/internal/utils/utils_test.go b/internal/utils/utils_test.go new file mode 100644 index 0000000..ec59ad8 --- /dev/null +++ b/internal/utils/utils_test.go @@ -0,0 +1,28 @@ +package utils_test + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/utils" +) + +const ( + TestVersion = "0.0.0-SNAPSHOT" +) + +var _ = Describe("Utils", func() { + Describe("GetDefaultVersion", func() { + When("Argument is a struct", func() { + It("Get global version", func() { + err := os.Setenv(utils.DefaultKubeturboVersionEnvVar, TestVersion) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + kubeturboVersion, err := utils.GetDefaultKubeturboVersion() + ExpectWithOffset(1, kubeturboVersion).To(Equal(TestVersion)) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + }) + }) + }) +}) diff --git a/scripts/download_tools.sh b/scripts/download_tools.sh new file mode 100644 index 0000000..4420622 --- /dev/null +++ b/scripts/download_tools.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +# download tools to local bin +LOCALBIN=${LOCALBIN-"bin"} +mkdir -p "${LOCALBIN}" + +# Function to generate the download URL for shellcheck +download_shellcheck() { + version="$1" + + # Detect the operating system for shellcheck + unset os + case "$(uname)" in + Darwin) os="darwin";; + Linux) os="linux";; + *) echo "Unsupported OS: $(uname)" && exit 1;; + esac + + # Detect the architecture for shellcheck + unset arch + case "$(uname -m)" in + x86_64) arch="x86_64";; + arm64) arch="aarch64";; # Handle Apple Silicon M1 and M2 architecture + aarch64) arch="aarch64";; + armv6l) arch="armv6hf";; + riscv64) arch="riscv64";; + *) echo "Unsupported architecture: $(uname -m)" && exit 1;; + esac + + # Construct the URL + base_url="https://github.com/koalaman/shellcheck/releases/download/${version}" + file_name="shellcheck-${version}.${os}.${arch}.tar.xz" + download_url="${base_url}/${file_name}" + + # Download the shellcheck + curl -L "$download_url" | tar -xJf - -C "${LOCALBIN}" + + # Extract the tool to local bin + cp "${LOCALBIN}/shellcheck-${version}/shellcheck" "${LOCALBIN}" + + # Clean up + rm -rf "${LOCALBIN}/shellcheck-${version}" + + "${LOCALBIN}"/shellcheck --version +} + +# Download shellcheck +SPELLCHECK_VERSION="v0.10.0" +if [ ! -f "${LOCALBIN}"/"shellcheck" ]; then + download_shellcheck "${SPELLCHECK_VERSION}" +fi diff --git a/scripts/export_yamls.sh b/scripts/export_yamls.sh new file mode 100755 index 0000000..3a70e72 --- /dev/null +++ b/scripts/export_yamls.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env sh + +SCRIPT_DIR="$(cd "$(dirname $0)" && pwd)" +ROOT_DIR="${SCRIPT_DIR}/.." +OPERATOR_YAML_DIR="${ROOT_DIR}/deploy/kubeturbo_operator_yamls" +KUBETURBO_YAML_DIR="${ROOT_DIR}/deploy/kubeturbo_yamls" +TEMP_DIR=$(mktemp -d) + +# Some files that assume to be existed before execute the script +CRD_SOURCE="${ROOT_DIR}/config/crd/bases/charts.helm.k8s.io_kubeturbos.yaml" +CR_SOURCE="${ROOT_DIR}/config/samples/charts_v1_kubeturbo.yaml" +CRED_SOURCE="${KUBETURBO_YAML_DIR}/turbo_opsmgr_credentials_secret_sample.yaml" + +# Inherited from environment, anticipate to work from the Makefile +NAMESPACE=${NAMESPACE:-"turbo"} +LOCALBIN=${LOCALBIN:-"${ROOT_DIR}/bin"} +KUSTOMIZE=${KUSTOMIZE:-"${LOCALBIN}/kustomize"} + +# Function to compose operator full yaml samples using master files +# +# Listing of all the master files: +# - config/crd/bases/charts.helm.k8s.io_kubeturbos.yaml +# - config/samples/charts_v1_kubeturbo.yaml +# - deploy/kubeturbo_yamls/turbo_opsmgr_credentials_secret_sample.yaml +# +# Listing of all generated files: +# - deploy/kubeturbo_operator_yamls/kubeturbo_operator_full.yaml +# - deploy/kubeturbo_operator_yamls/kubeturbo_operator_least_admin_full.yaml +# - deploy/kubeturbo_operator_yamls/kubeturbo_operator_reader_full.yaml +# - (deprecating) deploy/kubeturbo_yamls/turbo_kubeturbo_operator_full.yaml +# - (deprecating) deploy/kubeturbo_yamls/turbo_kubeturbo_operator_least_admin_full.yaml +# - (deprecating) deploy/kubeturbo_yamls/turbo_kubeturbo_operator_reader_full.yaml +main() { + # To ensure all the generated files are up-to-date + update_operator_bundle + + # Check if prerequisite files exists + file_check + + # Copy over CRD and Sample CR to the exposing directory + cp "${CRD_SOURCE}" "${OPERATOR_YAML_DIR}/kubeturbo_crd.yaml" + cp "${CR_SOURCE}" "${OPERATOR_YAML_DIR}/kubeturbo_sample_cr.yaml" + + # Generate operator full yaml at the temp dir + operator_yaml_path="${TEMP_DIR}/operator.yaml" + generate_operator_yaml "${operator_yaml_path}" + + # Concat operator file, CR and turbonomic-credentials to build operator_full yaml files + echo "Start generate operator full files..." + cat <<-EOF | sed 's/namespace: .*/namespace: '${NAMESPACE}'/g' > "${OPERATOR_YAML_DIR}/kubeturbo_operator_full.yaml" + $(cat "${operator_yaml_path}") + --- + $(cat "${CRED_SOURCE}") + --- + $(cat "${OPERATOR_YAML_DIR}/kubeturbo_sample_cr.yaml") + EOF + sed 's|roleName: cluster-admin|roleName: turbo-cluster-admin|' "${OPERATOR_YAML_DIR}/kubeturbo_operator_full.yaml" > "${OPERATOR_YAML_DIR}/kubeturbo_operator_least_admin_full.yaml" + sed 's|roleName: cluster-admin|roleName: turbo-cluster-reader|' "${OPERATOR_YAML_DIR}/kubeturbo_operator_full.yaml" > "${OPERATOR_YAML_DIR}/kubeturbo_operator_reader_full.yaml" + + # Keep old files for file migration, will deprecate operator full yaml files under deploy/kubeturbo_yamls folder once done (deprecating) + cp "${OPERATOR_YAML_DIR}/kubeturbo_operator_full.yaml" "${KUBETURBO_YAML_DIR}/turbo_kubeturbo_operator_full.yaml" + cp "${OPERATOR_YAML_DIR}/kubeturbo_operator_least_admin_full.yaml" "${KUBETURBO_YAML_DIR}/turbo_kubeturbo_operator_least_admin_full.yaml" + cp "${OPERATOR_YAML_DIR}/kubeturbo_operator_reader_full.yaml" "${KUBETURBO_YAML_DIR}/turbo_kubeturbo_operator_reader_full.yaml" + + echo "Done" +} + +# Function to ensure if all prerequisite files exists +file_check() { + files="${CRD_SOURCE} ${CR_SOURCE} ${CRED_SOURCE}" + for f in ${files}; do + if [ ! -f "${f}" ]; then + echo "File not found: ${f}" + exit 1 + fi + done +} + +# Function to ensure the basic yamls (CRD and operator bundle yaml) are synced up +update_operator_bundle() { + # Move to root dir + cd "${ROOT_DIR}" || exit 1 + + # Use make command to ensure the bundle and crd file are up-to-date + make export_operator_yaml_bundle +} + +# Function to generate a yaml with operator deployment and RBAC only +generate_operator_yaml() { + output_file=${1:-"${TEMP_DIR}/operator_full.yaml"} + temp_operator_full_dir="${ROOT_DIR}/config/operator_full" + mkdir -p "${temp_operator_full_dir}" + + # Build temporary yaml with only operator deployment and its RBAC + cat <<-EOF > "${temp_operator_full_dir}/kustomization.yaml" + --- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: "${NAMESPACE}" + resources: + - ../rbac + - ../manager + EOF + ${KUSTOMIZE} build "${temp_operator_full_dir}" > ${output_file} + echo "Generated the operator full yaml at: ${output_file}" + + # Cleanup + rm -rf "${temp_operator_full_dir}" +} + +main diff --git a/scripts/install_kubeturbo_via_operator.sh b/scripts/install_kubeturbo_via_operator.sh new file mode 100755 index 0000000..355ae55 --- /dev/null +++ b/scripts/install_kubeturbo_via_operator.sh @@ -0,0 +1,868 @@ +#!/usr/bin/env sh + +################## PRE-CONFIGURATION ################## +## Place your hardcoded ARGS variable assignments here ## +# TARGET_HOST="" +# OAUTH_CLIENT_ID="" +# OAUTH_CLIENT_SECRET="" +# TSC_TOKEN="" + +################## CMD ALIAS ################## +DEPENDENCY_LIST="grep cat sleep wc awk sed base64 mktemp curl" +KUBECTL=$(command -v oc) +KUBECTL=${KUBECTL:-$(command -v kubectl)} +if ! [ -x "${KUBECTL}" ]; then + echo "ERROR: Command 'oc' and 'kubectl' are not found, please install either of them first!" >&2 && exit 1 +fi + +################## CONSTANT ################## +export KUBECTL_WARNINGS="false" + +K8S_TYPE="Kubernetes" +OCP_TYPE="RedHatOpenShift" + +CARALOG_SOURCE="certified-operators" +CARALOG_SOURCE_NS="openshift-marketplace" + +TSC_TOKEN_FILE="" +DEFAULT_RELEASE="stable" +DEFAULT_NS="turbo" +DEFAULT_TARGET_NAME="Customer-cluster" +DEFAULT_ROLE="cluster-admin" +DEFAULT_ENABLE_TSC="optional" +DEFAULT_PROXY_SERVER="" +DEFAULT_KUBETURBO_NAME="kubeturbo-release" +DEFAULT_KUBETURBO_VERSION="8.13.1" +DEFAULT_KUBETURBO_REGISTRY="icr.io/cpopen/turbonomic/kubeturbo" +DEFAULT_LOGGING_LEVEL=0 + +RETRY_INTERVAL=10 # in seconds +MAX_RETRY=10 + +################## ARGS ################## +OSTYPE=${OSTYPE:-""} +KUBECONFIG=${KUBECONFIG:-""} + +ACTION=${ACTION:-"apply"} +KT_TARGET_RELEASE=${KT_TARGET_RELEASE:-${DEFAULT_RELEASE}} +TSC_TARGET_RELEASE=${TSC_TARGET_RELEASE:-${DEFAULT_RELEASE}} +OPERATOR_RELEASE=${OPERATOR_RELEASE:-"staging"} +PWD_SECRET_ENCODED=${PWD_SECRET_ENCODED:-"true"} + +TARGET_HOST=${TARGET_HOST:-""} +OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID:-""} +OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET:-""} +TSC_TOKEN=${TSC_TOKEN:-""} + +OPERATOR_NS=${OPERATOR_NS:-${DEFAULT_NS}} +TARGET_NAME=${TARGET_NAME:-${DEFAULT_TARGET_NAME}} +KUBETURBO_ROLE=${KUBETURBO_ROLE:-${DEFAULT_ROLE}} +ENABLE_TSC=${ENABLE_TSC:-${DEFAULT_ENABLE_TSC}} +PROXY_SERVER=${PROXY_SERVER:-${DEFAULT_PROXY_SERVER}} +KUBETURBO_NAME=${KUBETURBO_NAME:-${DEFAULT_KUBETURBO_NAME}} +KUBETURBO_VERSION=${KUBETURBO_VERSION:-${DEFAULT_KUBETURBO_VERSION}} +KUBETURBO_REGISTRY=${KUBETURBO_REGISTRY:-${DEFAULT_KUBETURBO_REGISTRY}} +KUBETURBO_REGISTRY_USRNAME=${KUBETURBO_REGISTRY_USRNAME:-""} +KUBETURBO_REGISTRY_PASSWRD=${KUBETURBO_REGISTRY_PASSWRD:-""} +TARGET_SUBTYPE=${TARGET_SUBTYPE:-""} + +LOGGING_LEVEL=${LOGGING_LEVEL:-${DEFAULT_LOGGING_LEVEL}} + +################## DYNAMIC VARS ################## +CERT_OP_NAME="" +CERT_OP_RELEASE="" +CERT_OP_VERSION="" +CERT_KUBETURBO_OP_NAME="" +CERT_KUBETURBO_OP_RELEASE="" +CERT_KUBETURBO_OP_VERSION="" +CERT_TSC_OP_NAME="" +CERT_TSC_OP_RELEASE="" +CERT_TSC_OP_VERSION="" +K8S_CLUSTER_KINDS="" + +################## FUNCTIONS ################## +# check if the current system supports all the commands needed to run the script +dependencies_check() { + missing_dependencies="" + for dependency in ${DEPENDENCY_LIST}; do + dependency_path=$(command -v "${dependency}") + if ! [ -x "${dependency_path}" ]; then + missing_dependencies="${missing_dependencies} ${dependency}" + fi + done + + if [ -n "${missing_dependencies}" ]; then + echo "ERROR: Missing the required command: $(echo "${missing_dependencies}" | sed 's/ /, /g')" + echo "Please refer to the official documentation or use your package manager to install to continue." + exit 1 + fi +} + +validate_args() { + while [ $# -gt 0 ]; do + case $1 in + --host) shift; TARGET_HOST="$1"; [ -n "${TARGET_HOST}" ] && shift;; + --kubeconfig) shift; KUBECONFIG="$1"; [ -n "${KUBECONFIG}" ] && shift;; + -*) echo "ERROR: Unknown option $1" >&2; usage; exit 1;; + *) shift;; + esac + done + + if [ -z "${TARGET_HOST}" ]; then + echo "ERROR: Missing target host" >&2; usage; exit 1 + fi + + if [ -n "${KUBECONFIG}" ]; then + KUBECTL="${KUBECTL} --kubeconfig=${KUBECONFIG}" + fi + + # Prioritize the TSC deployment approach once the value is set + if [ "${ENABLE_TSC}" = "optional" ]; then + if [ -n "${TSC_TOKEN}" ] || [ -n "${TSC_TOKEN_FILE}" ]; then ENABLE_TSC="true"; fi + fi + + # Pre-process the encoded secrets and passwords + if [ "${PWD_SECRET_ENCODED}" = "true" ]; then + KUBETURBO_REGISTRY_PASSWRD=$(password_secret_handler "${KUBETURBO_REGISTRY_PASSWRD}") + OAUTH_CLIENT_ID=$(password_secret_handler "${OAUTH_CLIENT_ID}") + OAUTH_CLIENT_SECRET=$(password_secret_handler "${OAUTH_CLIENT_SECRET}") + PROXY_SERVER=$(password_secret_handler "${PROXY_SERVER}") + fi +} + +usage() { + echo "This program helps to install Kubeturbo to the Kubernetes cluster" + echo "Syntax: ./$0 --host --kubeconfig " + echo + echo "options:" + echo "--host host of the Turbonomic instance (required)" + echo "--kubeconfig Path to the kubeconfig file to use for CLI requests" + echo +} + +# confirm args that are passed into the script and get user's consent +confirm_installation() { + proxy_server_enabled=$([ -n "${PROXY_SERVER}" ] && echo 'true' || echo 'false') + if [ -n "${KUBETURBO_REGISTRY_USRNAME}" ] && [ -n "${KUBETURBO_REGISTRY_PASSWRD}" ]; then + private_registry_enabled="true" + else + private_registry_enabled="false" + fi + echo "Here is the summary for the current installation:" + echo "" + printf "%-20s %-20s\n" "---------" "---------" + printf "%-20s %-20s\n" "Parameter" "Value" + printf "%-20s %-20s\n" "---------" "---------" + printf "%-20s %-20s\n" "Mode" "$([ "${ACTION}" = 'delete' ] && echo 'Delete' || echo 'Create/Update')" + printf "%-20s %-20s\n" "Kubeconfig" "${KUBECONFIG:-default}" + printf "%-20s %-20s\n" "Host" "${TARGET_HOST}" + printf "%-20s %-20s\n" "Namespace" "${OPERATOR_NS}" + printf "%-20s %-20s\n" "Target Name" "${TARGET_NAME}" + printf "%-20s %-20s\n" "Target Subtype" "${TARGET_SUBTYPE}" + printf "%-20s %-20s\n" "Role" "${KUBETURBO_ROLE}" + printf "%-20s %-20s\n" "Version" "${KUBETURBO_VERSION}" + printf "%-20s %-20s\n" "Auto-Update" "${ENABLE_TSC}" + printf "%-20s %-20s\n" "Auto-Logging" "${ENABLE_TSC}" + printf "%-20s %-20s\n" "Proxy Server" "${proxy_server_enabled}" + printf "%-20s %-20s\n" "Private Registry" "${private_registry_enabled}" + echo "" + echo "Please confirm the above settings [Y/n]: " && read -r continueInstallation + [ "${continueInstallation}" = "n" ] || [ "${continueInstallation}" = "N" ] && echo "Please retry the script with correct settings!" && exit 1 + cluster_type_check +} + +# To determine whether the current kubectl context is an Openshift cluster +cluster_type_check() { + confirm_installation_cluster + + if [ -z "${TARGET_SUBTYPE}" ]; then + TARGET_SUBTYPE=$(auto_detect_cluster_type) + return + fi + + current_context=$(${KUBECTL} config current-context) + current_oc_cluster_type=$(auto_detect_cluster_type) + TARGET_SUBTYPE=$(normalize_target_cluster_type "${TARGET_SUBTYPE}") + if [ "${current_oc_cluster_type}" != "${TARGET_SUBTYPE}" ]; then + echo "Your current cluster type [${current_oc_cluster_type}] mismatches with the target type [${TARGET_SUBTYPE}] you specified from the UI!" + echo "Do you want to continue the installation as the [${current_oc_cluster_type}] target? [y/N]: " && read -r allowMismatch + if [ "${allowMismatch}" = "y" ] || [ "${allowMismatch}" = "Y" ]; then + TARGET_SUBTYPE="${current_oc_cluster_type}" + else + echo "Please double check your current Kubernetes context before the other try!" && exit 1 + fi + fi +} + +# get client's concent to install to the current cluster +confirm_installation_cluster() { + echo "Info: Your current Kubernetes context is set to the following:" + show_current_kube_context + echo "Please confirm if the script should work in the above cluster [Y/n]: " && read -r continueInstallation + [ "${continueInstallation}" = "n" ] || [ "${continueInstallation}" = "N" ] && echo "Please double check your current Kubernetes context before the other try!" && exit 1 +} + +# display current kubeconfig context in a table format +show_current_kube_context() { + # exit if the current context is not set + if ! current_context=$(${KUBECTL} config current-context); then + echo "ERROR: Current context is not set in your cluster!" + exit 1 + fi + + # get detail from the raw oject + cluster=$(${KUBECTL} config view -o jsonpath='{.contexts[?(@.name == "'"${current_context}"'")].context.cluster}') + user=$(${KUBECTL} config view -o jsonpath='{.contexts[?(@.name == "'"${current_context}"'")].context.user}') + namespace=$(${KUBECTL} config view -o jsonpath='{.contexts[?(@.name == "'"${current_context}"'")].context.namespace}') + + # print out in the table + spacing=5 + name_width=$((${#current_context} + spacing)) + cluster_width=$((${#cluster} + spacing)) + user_width=$((${#user} + spacing)) + namespace_width=$((${#namespace} + spacing)) + + # Construct the dynamic format string + format="%-${name_width}s %-${cluster_width}s %-${user_width}s %-${namespace_width}s\n" + + # Use printf with the format string as an argument + eval "printf '${format}' 'NAME' 'CLUSTER' 'AUTHINFO' 'NAMESPACE'" + eval "printf '${format}' \"${current_context}\" \"${cluster}\" \"${user}\" \"${namespace}\"" + + # exit if the current the cluster is not reachable + if ! ${KUBECTL} get nodes > /dev/null 2>&1; then + echo "ERROR: Context used by the current cluster is not reachable!" + exit 1 + fi +} + +# detech the cluster type +auto_detect_cluster_type() { + is_current_oc_cluster=$(${KUBECTL} api-resources --api-group=route.openshift.io -o name) + normalize_target_cluster_type "$([ -n "${is_current_oc_cluster}" ] && echo "${OCP_TYPE}")" +} + +# normalize cluster type to either Openshift or Kubernetes +normalize_target_cluster_type() { + cluster_type=$1 + is_target_oc_cluster=$(echo "${cluster_type}" | grep -i "${OCP_TYPE}") + [ -n "${is_target_oc_cluster}" ] && echo "${OCP_TYPE}" || echo "${K8S_TYPE}" +} + +main() { + # gather all cluster level resource kinds + K8S_CLUSTER_KINDS=$(${KUBECTL} api-resources --namespaced=false --no-headers | awk '{print $NF}') + + NS_EXISTS=$(${KUBECTL} get ns --field-selector=metadata.name="${OPERATOR_NS}" -o name) + if [ -z "${NS_EXISTS}" ]; then + echo "Creating ${OPERATOR_NS} namespace to deploy Kubeturbo operator" + ${KUBECTL} create ns "${OPERATOR_NS}" --dry-run=client -o yaml | ${KUBECTL} apply -f - + fi + + if [ "${ACTION}" != "delete" ] && [ "${ENABLE_TSC}" = "optional" ]; then + echo "Do you want to install with the auto logging and auto version updates? [Y/n]: " && read -r enableTSC + if [ "${enableTSC}" = "n" ] || [ "${enableTSC}" = "N" ]; then + ENABLE_TSC="false" + else + ENABLE_TSC="true" + fi + fi + + apply_operator_group + setup_kubeturbo + + # applicable scenario: user switch from tsc approach to oauth2 approach + is_tsc_launched=$(${KUBECTL} -n "${OPERATOR_NS}" get deploy --field-selector=metadata.name=t8c-client-operator-controller-manager -o name) + if [ -n "${is_tsc_launched}" ] && [ "${ENABLE_TSC}" = "false" ]; then + echo "Info: Dismounting Auto-logging & Auto-updating feature as no longer required ..." + ACTION="delete" + fi + + setup_tsc + + echo "Done!" + exit 0 +} + +apply_operator_group() { + if [ "${TARGET_SUBTYPE}" != "${OCP_TYPE}" ]; then return; fi + op_gp_count=$(${KUBECTL} -n "${OPERATOR_NS}" get OperatorGroup -o name | wc -l) + if [ "${op_gp_count}" -eq 1 ]; then + return + elif [ "${op_gp_count}" -gt 1 ]; then + echo "ERROR: Found multiple Operator Groups in the namespace ${OPERATOR_NS}" >&2 && exit 1 + fi + + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + cat <<-EOF | ${KUBECTL} "${action}" -f - ${config} + --- + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: kubeturbo-opeartorgroup + namespace: "${OPERATOR_NS}" + spec: + targetNamespaces: + - "${OPERATOR_NS}" + --- + EOF +} + +setup_kubeturbo() { + if [ "${ENABLE_TSC}" != "true" ]; then + apply_oauth2_token + fi + + if [ "${ACTION}" = "delete" ]; then + apply_kubeturbo_cr + apply_kubeturbo_op + else + apply_kubeturbo_op + apply_kubeturbo_cr + fi + + echo "Successfully ${ACTION} Kubeturbo in ${OPERATOR_NS} namespace!" + ${KUBECTL} -n "${OPERATOR_NS}" get role,rolebinding,sa,pod,deploy,cm -l 'app.kubernetes.io/created-by in (kubeturbo-deploy, kubeturbo-operator)' +} + +apply_kubeturbo_op() { + if [ "${TARGET_SUBTYPE}" = "${OCP_TYPE}" ]; then + apply_kubeturbo_op_subscription + else + apply_kubeturbo_op_yaml + fi +} + +apply_kubeturbo_op_subscription() { + select_cert_op_from_operatorhub "kubeturbo" + CERT_KUBETURBO_OP_NAME="${CERT_OP_NAME}" + + select_cert_op_channel_from_operatorhub "${CERT_KUBETURBO_OP_NAME}" "${KT_TARGET_RELEASE}" + CERT_KUBETURBO_OP_RELEASE="${CERT_OP_RELEASE}" + CERT_KUBETURBO_OP_VERSION="${CERT_OP_VERSION}" + + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + echo "${ACTION} Certified Kubeturbo operator subscription ..." + if [ "${ACTION}" = "delete" ]; then + ${KUBECTL} -n "${OPERATOR_NS}" "${action}" Subscription "${CERT_KUBETURBO_OP_NAME}" ${config} + ${KUBECTL} -n "${OPERATOR_NS}" "${action}" csv "${CERT_KUBETURBO_OP_VERSION}" ${config} + return + fi + cat <<-EOF | ${KUBECTL} "${action}" -f - ${config} + --- + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: "${CERT_KUBETURBO_OP_NAME}" + namespace: "${OPERATOR_NS}" + spec: + channel: "${CERT_KUBETURBO_OP_RELEASE}" + installPlanApproval: "Automatic" + name: "${CERT_KUBETURBO_OP_NAME}" + source: "${CARALOG_SOURCE}" + sourceNamespace: "${CARALOG_SOURCE_NS}" + startingCSV: "${CERT_KUBETURBO_OP_VERSION}" + --- + EOF + wait_for_deployment "${OPERATOR_NS}" "kubeturbo-operator" +} + +apply_kubeturbo_op_yaml() { + operator_deploy_name="kubeturbo-operator" + operator_service_account="kubeturbo-operator" + + source_github_repo="https://raw.githubusercontent.com/turbonomic/kubeturbo-deploy" + operator_yaml_path="deploy/kubeturbo_operator_yamls/operator-bundle.yaml" + operator_yaml_bundle=$(curl "${source_github_repo}/${OPERATOR_RELEASE}/${operator_yaml_path}" | sed "s/: turbo$/: ${OPERATOR_NS}/g" | sed '/^\s*#/d' | sed "s/image: icr.io\/cpopen\/kubeturbo-operator.*$/image: icr.io\/cpopen\/kubeturbo-operator:${KUBETURBO_VERSION}/") + + apply_operator_bundle "${operator_service_account}" "${operator_deploy_name}" "${operator_yaml_bundle}" +} + +apply_kubeturbo_cr() { + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + echo "${ACTION} Kubeturbo CR ..." + private_docker_registry="" + if [ -n "${KUBETURBO_REGISTRY_USRNAME}" ] && [ -n "${KUBETURBO_REGISTRY_PASSWRD}" ]; then + private_docker_registry="private-docker-registry" + ${KUBECTL} "${action}" secret docker-registry "${private_docker_registry}" ${config} + ${KUBECTL} create secret docker-registry "${private_docker_registry}" \ + --docker-username="${KUBETURBO_REGISTRY_USRNAME}" \ + --docker-password="${KUBETURBO_REGISTRY_PASSWRD}" \ + --docker-server="${KUBETURBO_REGISTRY}" \ + --namespace="${OPERATOR_NS}" \ + --dry-run="client" -o yaml | ${KUBECTL} "${action}" -f - + fi + + if [ "${ACTION}" = "delete" ]; then + # skip deletion if the CRD is not found + if ! ${KUBECTL} api-resources | grep -qE "Kubeturbo"; then + return + fi + fi + + # get user's consent to overwrite the current Kubeturbo CR in the target namespace + is_cr_exists=$(${KUBECTL} -n "${OPERATOR_NS}" get Kubeturbo --field-selector=metadata.name="${KUBETURBO_NAME}" -o name) + if [ -n "${is_cr_exists}" ] && [ "${ACTION}" != "delete" ]; then + echo "Warning: Kubeturbo CR(${KUBETURBO_NAME}) detected in the namespace(${OPERATOR_NS})!" + echo "Please confirm to overwrite the current Kubeturbo CR [Y/n]: " && read -r overwriteCR + [ "${overwriteCR}" = "n" ] || [ "${overwriteCR}" = "N" ] && echo "Installation aborted..." && exit 1 + fi + + cat <<-EOF | ${KUBECTL} "${action}" -f - ${config} + --- + kind: Kubeturbo + apiVersion: charts.helm.k8s.io/v1 + metadata: + name: "${KUBETURBO_NAME}" + namespace: "${OPERATOR_NS}" + spec: + serverMeta: + turboServer: "${TARGET_HOST}" + version: "${KUBETURBO_VERSION}" + proxy: "${PROXY_SERVER}" + targetConfig: + targetName: "${TARGET_NAME}" + image: + repository: "${KUBETURBO_REGISTRY}" + tag: "${KUBETURBO_VERSION}" + imagePullSecret: "${private_docker_registry}" + roleName: "${KUBETURBO_ROLE}" + --- + EOF + wait_for_deployment "${OPERATOR_NS}" "${KUBETURBO_NAME}" +} + +apply_oauth2_token() { + if [ "${ACTION}" != "delete" ]; then + if [ -z "${OAUTH_CLIENT_ID}" ] || [ -z "${OAUTH_CLIENT_SECRET}" ]; then + echo "Missing OAuth2 client settings, please gather values following the instruction: " + echo "https://www.ibm.com/docs/en/tarm/latest?topic=cookbook-authenticating-oauth-20-clients-api" + echo "Enable enter your OAuth2 client id: " && read -r OAUTH_CLIENT_ID + echo "Enable enter your OAuth2 client secret: " && read -r OAUTH_CLIENT_SECRET + apply_oauth2_token && return + fi + fi + + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + cat <<-EOF | ${KUBECTL} "${action}" -f - ${config} + apiVersion: v1 + kind: Secret + metadata: + name: turbonomic-credentials + namespace: "${OPERATOR_NS}" + type: Opaque + data: + clientid: $(encode_inline "${OAUTH_CLIENT_ID}") + clientsecret: $(encode_inline "${OAUTH_CLIENT_SECRET}") + --- + EOF +} + +setup_tsc() { + if [ "${ACTION}" = "delete" ]; then + apply_skupper_tunnel + apply_tsc_cr + apply_tsc_op + else + if [ "${ENABLE_TSC}" != "true" ]; then return; fi + ${KUBECTL} -n "${OPERATOR_NS}" delete secret turbonomic-credentials --ignore-not-found + apply_tsc_op + apply_tsc_cr + apply_skupper_tunnel + wait_for_tsc_sync_up + fi + echo "Successfully ${ACTION} TSC operator in the ${OPERATOR_NS} namespace!" + ${KUBECTL} -n "${OPERATOR_NS}" get role,rolebinding,sa,pod,deploy -l 'app.kubernetes.io/created-by in (t8c-client-operator, turbonomic-t8c-client-operator)' +} + +apply_tsc_op() { + if [ "${TARGET_SUBTYPE}" = "${OCP_TYPE}" ]; then + apply_tsc_op_subscription + else + apply_tsc_op_yaml + fi +} + +apply_tsc_op_subscription() { + select_cert_op_from_operatorhub "t8c-tsc" + CERT_TSC_OP_NAME="${CERT_OP_NAME}" + + select_cert_op_channel_from_operatorhub "${CERT_TSC_OP_NAME}" "${TSC_TARGET_RELEASE}" + CERT_TSC_OP_RELEASE="${CERT_OP_RELEASE}" + CERT_TSC_OP_VERSION="${CERT_OP_VERSION}" + + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + echo "${ACTION} Certified t8c-tsc operator subscription ..." + if [ "${ACTION}" = "delete" ]; then + ${KUBECTL} -n "${OPERATOR_NS}" "${action}" Subscription "${CERT_TSC_OP_NAME}" ${config} + ${KUBECTL} -n "${OPERATOR_NS}" "${action}" csv "${CERT_TSC_OP_VERSION}" ${config} + return + fi + cat <<-EOF | ${KUBECTL} "${action}" -f - ${config} + --- + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: "${CERT_TSC_OP_NAME}" + namespace: "${OPERATOR_NS}" + spec: + channel: "${CERT_TSC_OP_RELEASE}" + installPlanApproval: Automatic + name: "${CERT_TSC_OP_NAME}" + source: "${CARALOG_SOURCE}" + sourceNamespace: "${CARALOG_SOURCE_NS}" + startingCSV: "${CERT_TSC_OP_VERSION}" + --- + EOF + wait_for_deployment "${OPERATOR_NS}" "t8c-client-operator-controller-manager" +} + +apply_tsc_op_yaml() { + operator_deploy_name="t8c-client-operator-controller-manager" + operator_service_account="t8c-client-operator-controller-manager" + + source_github_repo="https://raw.githubusercontent.com/turbonomic/kubeturbo-deploy" + operator_yaml_path="deploy/tsc_operator_yamls/operator-bundle.yaml" + operator_yaml_bundle=$(curl "${source_github_repo}/${OPERATOR_RELEASE}/${operator_yaml_path}" | sed "s/: turbonomic$/: ${OPERATOR_NS}/g" | sed '/^\s*#/d') + + apply_operator_bundle "${operator_service_account}" "${operator_deploy_name}" "${operator_yaml_bundle}" +} + +apply_tsc_cr() { + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + echo "${ACTION} TSC CR ..." + if [ "${ACTION}" = "delete" ]; then + # skip deletion if the CRD is not found + if ! ${KUBECTL} api-resources | grep -qE "TurbonomicClient|VersionManager"; then + return + fi + fi + + tsc_client_name="turbonomicclient-release" + cat <<-EOF | ${KUBECTL} "${action}" -f - ${config} + --- + kind: TurbonomicClient + apiVersion: clients.turbonomic.ibm.com/v1alpha1 + metadata: + name: "${tsc_client_name}" + namespace: "${OPERATOR_NS}" + spec: + global: + version: "${KUBETURBO_VERSION}" + --- + apiVersion: clients.turbonomic.ibm.com/v1alpha1 + kind: VersionManager + metadata: + name: versionmanager-release + namespace: "${OPERATOR_NS}" + spec: + url: 'http://remote-nginx-tunnel:9080/cluster-manager/clusterConfiguration' + --- + EOF + if [ "${ACTION}" != "delete" ]; then + echo "Waiting for TSC client to be ready ..." + wait_for_deployment "${OPERATOR_NS}" "tsc-site-resources" + sleep 20 && ${KUBECTL} wait pod \ + -n "${OPERATOR_NS}" \ + -l "app.kubernetes.io/part-of=${tsc_client_name}" \ + --for=condition=Ready \ + --timeout=60s + fi +} + +apply_skupper_tunnel() { + action="${ACTION}" + unset config + if [ "${ACTION}" = "delete" ]; then + action="${ACTION}" + config="--ignore-not-found" + fi + + if [ "${ACTION}" = "delete" ]; then + echo "${ACTION} secrets for TSC connection ..." + for it in $(${KUBECTL} get secret -n "${OPERATOR_NS}" -l "skupper.io/type" -o name); do + ${KUBECTL} "${action}" -n "${OPERATOR_NS}" "${it}" ${config} + done + return + fi + + if [ -z "${TSC_TOKEN}" ]; then + if [ -f "${TSC_TOKEN_FILE}" ]; then + TSC_TOKEN=$(getJsonField "$(cat "${TSC_TOKEN_FILE}")" "tokenData") + else + echo "Please follow the wiki to get the TSC token file: " + echo "https://www.ibm.com/docs/en/tarm/latest?topic=client-secure-deployment-red-hat-openshift-operatorhub#SaaS_OpenShift__OVA_connect__title__1" + echo "Warning: cannot find TSC token file under: ${TSC_TOKEN_FILE}" + echo "Please enter the absolute path for your TSC token: " && read -r TSC_TOKEN_FILE + fi + apply_skupper_tunnel && return + fi + + skupper_connection_secret=$(echo "${TSC_TOKEN}" | base64 -d) + echo "${skupper_connection_secret}" | ${KUBECTL} "${action}" -n "${OPERATOR_NS}" -f - ${config} + + echo "Waiting for setting up TSC connection..." + retry_count=0 + while true; do + tunnel_svc=$(${KUBECTL} -n "${OPERATOR_NS}" get service --field-selector=metadata.name=remote-nginx-tunnel -o name) + if [ -n "${tunnel_svc}" ];then break; fi + retry_count=$((retry_count + 1)) + if message=$(retry "${retry_count}"); then + echo "${message}" + else + echo "Failed to setup the TSC connection, please request another one from the endpoint or regenerate the script from the UI!" + exit 1 + fi + done + echo "Skupper connection established!" +} + +wait_for_tsc_sync_up() { + # Wait for CR updates (watch on target server url updates) + echo "Waiting for Kubeturbo CR updates..." + retry_count=0 + while true; do + turbo_server=$(${KUBECTL} -n "${OPERATOR_NS}" get Kubeturbos "${KUBETURBO_NAME}" -o=jsonpath='{.spec.serverMeta.turboServer}' | grep remote-nginx-tunnel) + if [ -n "${turbo_server}" ];then break; fi + retry_count=$((retry_count + 1)) + if message=$(retry "${retry_count}"); then + echo "${message}" + else + echo "There's no updates from the TSC client, please double-check if the Turbo server can reach out your current cluster!" + exit 1 + fi + done + + # Restart the kubeturbo pod to secure the updates if the operator hasn't restart the pod yet + kubeturbo_pod=$(${KUBECTL} -n "${OPERATOR_NS}" get pods --field-selector=status.phase=Running -o name | grep "${KUBETURBO_NAME}") + if [ -n "${kubeturbo_pod}" ]; then + ${KUBECTL} -n "${OPERATOR_NS}" delete "${kubeturbo_pod}" --ignore-not-found + wait_for_deployment "${OPERATOR_NS}" "${KUBETURBO_NAME}" + fi +} + +select_cert_op_from_operatorhub() { + target=$1 + echo "Fetching Openshift certified ${target} operator from OperatorHub ..." + cert_ops=$(${KUBECTL} get packagemanifests -o jsonpath="{range .items[*]}{.metadata.name} {.status.catalogSource} {.status.catalogSourceNamespace}{'\n'}{end}" | grep -e "${target}" | grep -e "${CARALOG_SOURCE}.*${CARALOG_SOURCE_NS}" | awk '{print $1}') + cert_ops_count=$(echo "${cert_ops}" | wc -l | awk '{print $1}') + if [ -z "${cert_ops}" ] || [ "${cert_ops_count}" -lt 1 ]; then + echo "There aren't any certified ${target} operator in the Operatorhub, please contact administrator for more information!" && exit 1 + elif [ "${cert_ops_count}" -gt 1 ]; then + PS3="Fetched mutiple certified ${target} operators in the Operatorhub, please select a number to proceed OR type 'exit' to exit: " + while true; do + echo "Available options:" + i=1; echo "${cert_ops}" | while IFS= read -r cert_op; do + echo "$i) $cert_op" + i=$((i + 1)) + done + echo "${PS3}" && read -r REPLY + if validate_select_input "${cert_ops_count}" "${REPLY}"; then + [ "${REPLY}" = 'exit' ] && exit 0 + cert_ops=$(echo "$cert_ops" | awk "NR==$((REPLY))") + break + fi + done + fi + CERT_OP_NAME=${cert_ops} + echo "Using Openshift certified ${target} operator: ${CERT_OP_NAME}" +} + +select_cert_op_channel_from_operatorhub() { + cert_op_name=${1-${CERT_OP_NAME}} + target_release=${2-${DEFAULT_RELEASE}} + echo "Fetching Openshift ${cert_op_name} channels from OperatorHub ..." + channels=$(${KUBECTL} get packagemanifests "${cert_op_name}" -o jsonpath="{range .status.channels[*]}{.name}:{.currentCSV}{'\n'}{end}" | grep "${target_release}") + channel_count=$(echo "${channels}" | wc -l | awk '{print $1}') + if [ -z "${channels}" ] || [ "${channel_count}" -lt 1 ]; then + echo "There aren't any channel created for ${cert_op_name}, please contact administrator for more information!" && exit 1 + elif [ "${channel_count}" -gt 1 ]; then + PS3="Fetched mutiple releases, please select a number to proceed OR type 'exit' to exit: " + while true; do + echo "Available options:" + i=1; echo "${channels}" | while IFS= read -r channel; do + echo "$i) $channel" + i=$((i + 1)) + done + echo "${PS3}" && read -r REPLY + if validate_select_input "${channel_count}" "${REPLY}"; then + [ "${REPLY}" = 'exit' ] && exit 0 + channels=$(echo "$channels" | awk "NR==$((REPLY))") + break + fi + done + fi + CERT_OP_RELEASE=$(echo "${channels}" | awk -F':' '{print $1}') + CERT_OP_VERSION=$(echo "${channels}" | awk -F':' '{print $2}') + echo "Using Openshift certified ${cert_op_name} ${CERT_OP_RELEASE} channel, version ${CERT_OP_VERSION}" +} + +getJsonField() { + jsonData=$1 && field=$2 + if ! echo "${jsonData}" | grep -q "${field}"; then + echo "Unable to get field ${field} due to:" + echo "${jsonData}" + exit 1 + fi + echo "${jsonData}" | sed -e "s/^{//g" -e "s/}$//g" -e "s/,/\n/g" -e "s/\"//g" | grep "${field}" | sed -e "s/[\" ]//g" | awk -F ':' '{print $2}' +} + +validate_select_input() { + opts_count=$1 && opt=$2 + if [ "${opt}" = "exit" ]; then + echo "Exiting the program ..." >&2 && exit 0 + elif ! echo "${opt}" | grep -qE '^[1-9][0-9]*$'; then + echo "ERROR: Input not a number: ${opt}" >&2 && exit 1 + elif [ "${opt}" -le 0 ] || [ "${opt}" -gt "${opts_count}" ]; then + echo "ERROR: Input out of range [1 - ${opts_count}]: ${opt}" >&2 && exit 1 + fi +} + +wait_for_deployment() { + if [ "${ACTION}" = "delete" ]; then return; fi + namespace=$1 && deploy_name=$2 + + echo "Waiting for deployment '${deploy_name}' to start..." + retry_count=0 + while true; do + full_deploy_name=$(${KUBECTL} -n "${namespace}" get deploy -o name | grep -E "^deployment.apps/${deploy_name}$") + if [ -n "${full_deploy_name}" ]; then + deploy_status=$(${KUBECTL} -n "${namespace}" rollout status "${full_deploy_name}" --timeout=5s 2>&1 | grep "successfully") + if [ -n "${deploy_status}" ]; then + deploy_name=$(echo "${full_deploy_name}" | awk -F '/' '{print $2}') + for pod in $(${KUBECTL} -n "${namespace}" get pods -o name | grep "${deploy_name}"); do + ${KUBECTL} -n "${namespace}" wait --for=condition=Ready "${pod}" + done + break + fi + fi + retry_count=$((retry_count + 1)) + if message=$(retry "${retry_count}"); then + echo "${message}" + else + echo "Please check following events for more information:" + ${KUBECTL} -n "${namespace}" get events --sort-by='.lastTimestamp' | grep "${deploy_name}" + exit 1 + fi + done +} + +retry() { + attempts=${1:--999} + if [ "${attempts}" -ge ${MAX_RETRY} ]; then + echo "ERROR: Resource is not ready in ${MAX_RETRY} attempts." >&2 && exit 1 + else + attempt_str=$([ "${attempts}" -ge 0 ] && echo " (${attempts}/${MAX_RETRY})") + echo "Resource is not ready, re-attempt after ${RETRY_INTERVAL}s ...${attempt_str}" + sleep ${RETRY_INTERVAL} + fi +} + +encode_inline() { + input=$1 + case "${OSTYPE}" in + darwin*) + echo "${input}" | base64 -b 0 + ;; + *) + echo "${input}" | base64 -w 0 + ;; + esac +} + +password_secret_handler() { + if [ "${PWD_SECRET_ENCODED}" = "true" ]; then + echo "$1" | base64 -d + else + echo "$1" + fi +} + +apply_operator_bundle() { + sa_name="$1" + deploy_name="$2" + operator_yaml_str="$3" + + tmp_dir=$(mktemp -d) + # split out yaml from the yaml bundle + echo "${operator_yaml_str}" | awk '/^---/{i++} {file = "'"${tmp_dir}"'/yaml_part_" i ".yaml"; print > file}' + for yaml_part in "${tmp_dir}"/*.yaml; do + yaml_abs_path="${yaml_part}" + kind_name_str=$(${KUBECTL} create -f "${yaml_abs_path}" --dry-run=client -o=jsonpath="{.kind} {.metadata.name}") + obj_kind=$(echo "${kind_name_str}" | awk '{print $1}') + obj_name=$(echo "${kind_name_str}" | awk '{print $2}') + + is_object_exists=$(${KUBECTL} -n "${OPERATOR_NS}" get "${obj_kind}" --field-selector=metadata.name"=${obj_name}" -o name) + if [ "${ACTION}" = "delete" ]; then + # delete k8s resources if exists (avoid cluster resources) + skip_target=$(should_skip_delete_k8s_object "${obj_kind}") + [ -n "${is_object_exists}" ] && [ "${skip_target}" = "false" ] && ${KUBECTL} "${ACTION}" -f "${yaml_abs_path}" + elif [ -n "${is_object_exists}" ] && [ "${obj_kind}" = "ClusterRoleBinding" ]; then + # if cluster role binding exists, patch it with target services account + isClusterRoleBinded=$(${KUBECTL} get "${obj_kind}" "${obj_name}" -o=jsonpath='{range .subjects[*]}{.namespace}{"\n"}{end}' | grep -E "^${OPERATOR_NS}$") + if [ -z "${isClusterRoleBinded}" ]; then + ${KUBECTL} patch "${obj_kind}" "${obj_name}" --type='json' -p='[{"op": "add", "path": "/subjects/-", "value": {"kind": "ServiceAccount", "name": "'"${sa_name}"'", "namespace": "'"${OPERATOR_NS}"'"}}]' + else + echo "Skip patching ${obj_kind} ${obj_name} as the clusterRole has bound to the operator service account already." + fi + elif [ -z "${is_object_exists}" ]; then + # create the k8s object if not exists + ${KUBECTL} "create" -f "${yaml_abs_path}" --save-config + else + # update the k8s object if exists + ${KUBECTL} apply -f "${yaml_abs_path}" + fi + done + rm -rf "${tmp_dir}" + + # check if the operator is ready + [ "${ACTION}" != "delete" ] && wait_for_deployment "${OPERATOR_NS}" "${deploy_name}" +} + +should_skip_delete_k8s_object() { + k8s_kind=$1 + [ "${k8s_kind}" = "Namespace" ] && echo "true" && return + for it in ${K8S_CLUSTER_KINDS}; do + [ "${it}" = "${k8s_kind}" ] && echo "true" && return + done + echo "false" +} + +################## MAIN ################## +dependencies_check && validate_args "$@" && confirm_installation && main diff --git a/scripts/kubeturbo_deployment_helm_test.sh b/scripts/kubeturbo_deployment_helm_test.sh new file mode 100755 index 0000000..58228c7 --- /dev/null +++ b/scripts/kubeturbo_deployment_helm_test.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +# To run with make: +# make helm-test +# To run locally outside of make: +# KUBECONFIG= VERSION= scripts/kubeturbo_deployment_helm_test.sh +# If kubectl or helm are not installed within $PATH, then need to further add HELM= KUBECTL= + + +check_resources() { + RESOURCE_TYPE=$1 + RESOURCE_NAME=$2 + echo "Checking if ${RESOURCE_TYPE} is created..." + if [[ -z $(${KUBECTL} -n ${NAMESPACE} get ${RESOURCE_TYPE} | grep ${RESOURCE_NAME}) ]]; then + echo "Error: failed to create ${RESOURCE_TYPE} ${RESOURCE_NAME}" + exit 1 + fi +} + +VERSION="${VERSION:-''}" +KUBECONFIG="${KUBECONFIG:-''}" +KUBECTL="${KUBECTL:-$(command -v kubectl)}" +KUBECTL="${KUBECTL} --kubeconfig=${KUBECONFIG}" ## Empty KUBECONFIG will use default to use./kube/config +HELM="${HELM:-$(command -v helm)}" +HELM="${HELM} --kubeconfig=${KUBECONFIG}" ## Empty KUBECONFIG will use default to use./kube/config + +HELM_INSTALL_DIR="./deploy/kubeturbo/" + +SERVER_VERSION="${VERSION}" +KUBETURBO_VERSION="${SERVER_VERSION}" + +ROLE_NAME="turbo-cluster-admin" +RB_NAME="turbo-all-binding-helm-test" +SA_NAME="turbo-user-helm-test" + +NAMESPACE="turbonomic-helm-test" + +TARGET_NAME="Kind-helm-test" +TURBO_SERVER_URL="https:\/\/dummy-server" + + +echo "Using kubeturbo version: ${KUBETURBO_VERSION}" +echo "Using kubeconfig file: ${KUBECONFIG}" + +#construct and save to new values.yaml +sed -e "s/tag:.*/tag: $KUBETURBO_VERSION/g" \ + -e "s/roleName:.*/roleName: $ROLE_NAME/g" \ + -e "s/roleBinding:.*/roleBinding: $RB_NAME/g" \ + -e "s/serviceAccountName:.*/serviceAccountName: $SA_NAME/g" \ + -e "s/version:.*/version: $SERVER_VERSION/g" \ + -e "s/turboServer:.*/turboServer: $TURBO_SERVER_URL/g" \ + -e "s/targetName:.*/targetName: $TARGET_NAME/g" \ + -e "s/opsManagerUserName:.*//g" \ + -e "s/opsManagerPassword:.*//g" \ + "${HELM_INSTALL_DIR}/values.yaml" > "${HELM_INSTALL_DIR}/values-$TARGET_NAME.yaml" + +# create test namespace +${KUBECTL} create ns ${NAMESPACE} + +# install chart +RELEASE_NAME=kubeturbo-helm-test +echo "Uninstalling release if it exists already" +${HELM} uninstall ${RELEASE_NAME} -n ${NAMESPACE} + +echo "Installing kubeturbo through helm" +${HELM} install ${RELEASE_NAME} ${HELM_INSTALL_DIR} --values ${HELM_INSTALL_DIR}/values-$TARGET_NAME.yaml -n ${NAMESPACE} + +COUNTER=1 +while [[ -z $(${KUBECTL} -n ${NAMESPACE} get pod | grep ${RELEASE_NAME} | grep Running) ]] +do + if [ $COUNTER -eq 10 ]; then + echo "Time out waiting for pod to start" + echo "----------------------------------------------------------------------------------------" + echo "Generated values.yaml:" + cat ${HELM_INSTALL_DIR}/values-$TARGET_NAME.yaml + echo "----------------------------------------------------------------------------------------" + echo "Pod status and events:" + ${KUBECTL} -n ${NAMESPACE} get pod | grep ${RELEASE_NAME} + ${KUBECTL} -n ${NAMESPACE} get events --sort-by='.lastTimestamp' | grep ${RELEASE_NAME} + exit 1 + fi + echo "Waiting for kubeturbo pod to start" + sleep 5 + COUNTER=$[$COUNTER +1] +done + +check_resources serviceaccount ${SA_NAME} +check_resources clusterrole ${ROLE_NAME} +check_resources clusterrolebinding ${RB_NAME} +check_resources configmap turbo-config-${RELEASE_NAME} +echo "Test passed!" + +echo "Uninstalling kubeturbo" +${HELM} uninstall ${RELEASE_NAME} -n ${NAMESPACE} + +echo "Deleting test values.yaml" +rm -f ${HELM_INSTALL_DIR}/values-$TARGET_NAME.yaml + +echo "Deleting test namespace" +${KUBECTL} delete ns ${NAMESPACE} diff --git a/scripts/kubeturbo_deployment_yaml_test.sh b/scripts/kubeturbo_deployment_yaml_test.sh new file mode 100755 index 0000000..e850be6 --- /dev/null +++ b/scripts/kubeturbo_deployment_yaml_test.sh @@ -0,0 +1,117 @@ +#!/bin/bash + +# To run with make: +# make yaml-test +# To run locally outside of make: +# KUBECONFIG= VERSION= scripts/kubeturbo_deployment_helm_test.sh +# If kubectl is not installed within $PATH, then need to further add KUBECTL= + + +check_resources() { + RESOURCE_TYPE=$1 + RESOURCE_NAME=$2 + echo "Checking if ${RESOURCE_TYPE} is created..." + if [[ -z $(${KUBECTL} -n ${NAMESPACE} get ${RESOURCE_TYPE} | grep ${RESOURCE_NAME}) ]]; then + echo "Error: failed to create ${RESOURCE_TYPE} ${RESOURCE_NAME}" + SUCCESS_FLAG=0 + fi +} + +run_yaml_test() { + YAML_FILE=$1 + ROLE_NAME=$2 + NAMESPACE="yaml-test-${ROLE_NAME}" + + echo "----------------------------------------------------------------------------------------" + echo "Starting test for ${YAML_FILE}" + + # remove ns (by splitting after first occurrence of "name: turbo") then substitute with desired values + sed -e '1,/^ name\: turbo$/d' \ + -e "s//${KUBETURBO_VERSION}/g" \ + -e "s/\"turboServer\": \"\"/\"turboServer\": \"$TURBO_SERVER_URL\"/g" \ + -e "s/username: BASE64encodedValue/username: ${SAMPLE_USERNAME}/g" \ + -e "s/password: BASE64encodedValue/password: ${SAMPLE_PASSWORD}/g" \ + -e "s/namespace: turbo/namespace: ${NAMESPACE}/g" \ + ${YAML_DIR}/${YAML_FILE} > ${YAML_DIR}/test-${YAML_FILE} + + # Create test ns + ${KUBECTL} create ns ${NAMESPACE} + # Apply yaml to install kubeturbo + ${KUBECTL} apply -f ${YAML_DIR}/test-${YAML_FILE} + + COUNTER=1 + while [[ -z $(${KUBECTL} -n ${NAMESPACE} get pod | grep kubeturbo | grep Running) ]] + do + if [ $COUNTER -eq 10 ]; then + echo "Time out waiting for pod to start" + echo "----------------------------------------------------------------------------------------" + echo "Generated yaml file:" + cat ${YAML_DIR}/test-${YAML_FILE} + echo "----------------------------------------------------------------------------------------" + echo "Pod status and events:" + ${KUBECTL} -n ${NAMESPACE} get pod | grep kubeturbo + ${KUBECTL} -n ${NAMESPACE} get events --sort-by='.lastTimestamp' | grep "kubeturbo" + echo "Test failed!" + SUCCESS_FLAG=0 + return + fi + echo "Waiting for kubeturbo pod to start" + sleep 5 + COUNTER=$[$COUNTER +1] + done + + check_resources serviceaccount ${SA_NAME} + check_resources clusterrole ${ROLE_NAME} + check_resources clusterrolebinding ${RB_NAME} + check_resources configmap ${CONFIGMAP_NAME} + echo "Test passed!" + + echo "Uninstalling kubeturbo" + ${KUBECTL} delete -f ${YAML_DIR}/test-${YAML_FILE} + + echo "Deleting test values.yaml" + rm -f ${YAML_DIR}/test-${YAML_FILE} + + echo "Deleting namespace ${NAMESPACE}" + ${KUBECTL} delete ns ${NAMESPACE} +} + +VERSION="${VERSION:-''}" +KUBECONFIG="${KUBECONFIG:-''}" +KUBECTL="${KUBECTL:-$(command -v kubectl)}" +KUBECTL="${KUBECTL} --kubeconfig=${KUBECONFIG}" ## Empty KUBECONFIG will use default to use./kube/config + +KUBETURBO_VERSION="${VERSION}" + +SA_NAME="turbo-user" +RB_NAME="turbo-all-binding-kubeturbo-turbo" +CONFIGMAP_NAME="turbo-config" + +TURBO_SERVER_URL="https:\/\/dummy-server" +SAMPLE_USERNAME=$(echo "user1" | base64) +SAMPLE_PASSWORD=$(echo "password1" | base64) + +YAML_DIR="deploy/kubeturbo_yamls" +LEAST_ADMIN_FILE="kubeturbo_least_admin_full.yaml" +LEAST_ADMIN_ROLE_NAME="turbo-cluster-admin" +READER_FILE="kubeturbo_reader_full.yaml" +READER_ROLE_NAME="turbo-cluster-reader" +CLUSTER_ADMIN_FILE="kubeturbo_full.yaml" +CLUSTER_ADMIN_ROLE_NAME="cluster-admin" + +SUCCESS_FLAG=1 + +echo "Using kubeturbo version: ${KUBETURBO_VERSION}" +echo "Using kubeconfig file: ${KUBECONFIG}" + +run_yaml_test ${LEAST_ADMIN_FILE} ${LEAST_ADMIN_ROLE_NAME} +run_yaml_test ${READER_FILE} ${READER_ROLE_NAME} +run_yaml_test ${CLUSTER_ADMIN_FILE} ${CLUSTER_ADMIN_ROLE_NAME} + +if [ ${SUCCESS_FLAG} -eq 1 ]; then + echo "Summmary: All tests passed!" + exit 0 +else + echo "Summary: One or more tests failed!" + exit 1 +fi \ No newline at end of file diff --git a/scripts/multi-node-kind-cluster.yaml b/scripts/multi-node-kind-cluster.yaml new file mode 100644 index 0000000..d5fd0b4 --- /dev/null +++ b/scripts/multi-node-kind-cluster.yaml @@ -0,0 +1,7 @@ +# three node (two workers) cluster config +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: worker +- role: worker \ No newline at end of file diff --git a/scripts/public_repo_update.sh b/scripts/public_repo_update.sh new file mode 100755 index 0000000..8e0c724 --- /dev/null +++ b/scripts/public_repo_update.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +set -e + +if [ -z "$1" ]; then + echo "Error: script argument VERSION is not specified." + exit 1 +fi +VERSION=$1 + +if [ -z "${PUBLIC_GITHUB_TOKEN}" ]; then + echo "Error: PUBLIC_GITHUB_TOKEN environment variable is not set" + exit 1 +fi +TC_PUBLIC_REPO=turbonomic-container-platform + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +SRC_DIR=${SCRIPT_DIR}/../deploy +OUTPUT_DIR=${SCRIPT_DIR}/../_output +HELM=${SCRIPT_DIR}/../bin/helm + +if ! command -v ${HELM} > /dev/null 2>&1; then + HELM=helm + if ! command -v helm > /dev/null 2>&1; then + echo "Error: helm could not be found." + exit 1 + fi +fi + +if ! command -v git > /dev/null 2>&1; then + echo "Error: git could not be found." + exit 1 +fi + +echo "===> Cloning public repo..."; +mkdir ${OUTPUT_DIR} +cd ${OUTPUT_DIR} +git clone https://${PUBLIC_GITHUB_TOKEN}@github.com/IBM/${TC_PUBLIC_REPO}.git +cd ${TC_PUBLIC_REPO} + +echo "===> Cleanup existing files" +rm -rf kubeturbo +mkdir -p kubeturbo/operator +mkdir -p kubeturbo/yamls +cd kubeturbo + +# copy helm chart +echo "===> Copy helm chart files" +cp -r ${SRC_DIR}/kubeturbo helm_chart + +# copy operator files +echo "===> Copy Operator files" +cd operator +cp ${SRC_DIR}/kubeturbo_operator_yamls/*.yaml . + +# copy yaml files +echo "===> Copy yaml files" +cd ../yamls +cp ${SRC_DIR}/kubeturbo_yamls/kubeturbo_full.yaml . +cp ${SRC_DIR}/kubeturbo_yamls/kubeturbo_least_admin_full.yaml . +cp ${SRC_DIR}/kubeturbo_yamls/kubeturbo_reader_full.yaml . + +# Insert current version +echo "===> Updating Turbo version in yaml files" +cd .. +sed -i.bak "s|version: 1.0.0|version: ${VERSION}|" helm_chart/Chart.yaml +find ./ -type f -name '*.y*' -exec sed -i.bak "s||${VERSION}|g" {} + +find ./ -name '*.bak' -type f -delete +find ./ -name '*.md' -type f -delete +echo "See the [documentation](https://www.ibm.com/docs/en/tarm/latest?topic=targets-connecting-kubernetes-clusters)" > README.md + +# commit all modified source files to the public repo +echo "===> Commit modified files to public repo" +cd .. +git add . +if ! git diff --quiet --cached; then + git commit -m "kubeturbo deployment ${VERSION}" + git push +else + echo "No changed files" +fi + +# package the helm chart and upload to helm repo +echo "===> Package helm chart" +${HELM} package kubeturbo/helm_chart -d ${OUTPUT_DIR} + +echo "===> Update helm chart index" +git switch gh-pages +cp index.yaml .. +mkdir -p downloads/kubeturbo +cp ${OUTPUT_DIR}/kubeturbo-${VERSION}.* downloads/kubeturbo/ +${HELM} repo index .. --url https://ibm.github.io/${TC_PUBLIC_REPO}/downloads/kubeturbo --merge index.yaml +cp ../index.yaml . + +# commit packaged helm chart +echo "===> Commit packaged helm chart to helm chart repo" +git add . +git commit -m "kubeturbo helm chart ${VERSION}" +git push + +# cleanup +rm -rf ${OUTPUT_DIR} + +echo "" +echo "Update public repo complete." diff --git a/test/e2e/e2e_kt_reconcile.go b/test/e2e/e2e_kt_reconcile.go new file mode 100644 index 0000000..9f2d0c3 --- /dev/null +++ b/test/e2e/e2e_kt_reconcile.go @@ -0,0 +1,258 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + ktv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" + + "github.ibm.com/turbonomic/kubeturbo-deploy/internal/constants" + "github.ibm.com/turbonomic/kubeturbo-deploy/test/utils" +) + +var _ = Describe("Test CR reconciliation", Ordered, func() { + var err error + var cmd *exec.Cmd + var tmpCRFile *os.File + var ktPodName string + var kt ktv1.Kubeturbo + + var before_test_k8s_context string + test_k8s_context := "kind-" + utils.KIND_CLUSTER + + // projectImage stores the name of the image used in the test + var PROJECT_IMAGE = utils.REGISTRY + "/" + utils.OPERATOR_NAME + ":" + utils.VERSION + + // path to the generated YAML bundle for the Kubeturbo operator + base, _ := os.Getwd() + YAML_BUNDLE_DIR := filepath.Join(base, "../../deploy/kubeturbo_operator_yamls") + YAML_BUNDLE_PATH, _ := filepath.Abs(filepath.Join(YAML_BUNDLE_DIR, "operator-bundle.yaml")) + + KT_CR_Name := "kubeturbo-release" + WAIT_PERIOD := 20 * time.Second + + BeforeAll(func() { + By("Check or create the kind cluster") + os.Setenv("KUBECONFIG_STR", utils.KIND_KUBECONFIG_STR) + cmd = exec.Command("make", "create-kind-cluster") + _, _ = utils.Run(cmd) + + By("Get current K8s cluster context") + before_test_k8s_context = utils.CurrentK8sContext() + + By("Switch to the Kind cluster") + err = utils.SwitchToContext(test_k8s_context) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Build test image") + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", PROJECT_IMAGE)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Load image to Kind cluster") + err = utils.LoadImageToKindClusterWithName(PROJECT_IMAGE) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Create namespace") + os.Setenv("NAMESPACE", utils.NAMESPACE) + cmd = exec.Command(utils.KUBECTL, "create", "ns", utils.NAMESPACE) + _, _ = utils.Run(cmd) + + By("Install Kubeturbo Operator") + cmd = exec.Command("make", "export_operator_yaml_bundle") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + cmd = exec.Command(utils.KUBECTL, "apply", "-f", YAML_BUNDLE_PATH) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + // Create a temporary file in the system's default temporary directory + tmpCRFile, err = os.CreateTemp("", "kt-cr-*.json") + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + utils.LogMessage("INFO", "Temporary CR file created: %s\n", tmpCRFile.Name()) + }) + + AfterAll(func() { + By("Clean up") + cmd = exec.Command(utils.KUBECTL, "delete", "-f", tmpCRFile.Name()) + _, _ = utils.Run(cmd) + utils.LogMessage("INFO", "Temporary CR file remove: %s\n", tmpCRFile.Name()) + os.Remove(tmpCRFile.Name()) + + By("Uninstall the Operator") + cmd = exec.Command(utils.KUBECTL, "delete", "-f", YAML_BUNDLE_PATH, "--ignore-not-found=true") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Switch back to the K8s cluster before test") + if before_test_k8s_context != "" && + before_test_k8s_context != test_k8s_context { + By("revert k8s context change") + err := utils.SwitchToContext(before_test_k8s_context) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + }) + + Context("Create plain CR", func() { + It("Apply CR", func() { + version := "8.14.6" + kt = utils.GenerateKtV1CRWithSpec(KT_CR_Name, &ktv1.KubeturboSpec{ + Image: ktv1.KubeturboImage{Tag: &version}, + ServerMeta: ktv1.KubeturboServerMeta{Version: &version}, + }) + ExpectWithOffset(1, utils.WriteToFile(tmpCRFile, kt)).NotTo(HaveOccurred()) + + cmd = exec.Command(utils.KUBECTL, "apply", "-f", tmpCRFile.Name()) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Validate installation") + assets := utils.GenerateKubeturboAssets(kt) + + By(fmt.Sprintf("Wait %s for operator to act", WAIT_PERIOD.String())) + time.Sleep(WAIT_PERIOD) + + By("Verify Assets creation") + ExpectWithOffset(1, utils.VerifyK8sAssets(assets)).NotTo(HaveOccurred()) + + By("Verify ClusterRoleBinding relationship") + ExpectWithOffset(1, utils.VerifyCRB(assets)).NotTo(HaveOccurred()) + + By("Verify Deployment image tag") + ExpectWithOffset(1, utils.VerifyImageTag(assets, constants.KubeturboContainerName, version)).NotTo(HaveOccurred()) + + By("Verify Deployment settings") + ExpectWithOffset(1, utils.VerifyDeployment(assets)).NotTo(HaveOccurred()) + + By("Caching running Kubeturbo pod name") + ktPodName, err = utils.GetRunningPodByDeployName(KT_CR_Name) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + }) + }) + + Context("Modify CR that restarts pod", func() { + version := "8.13.1" + targetName := "e2e-test-cluster" + It("Apply CR", func() { + kt.Spec = ktv1.KubeturboSpec{ + Image: ktv1.KubeturboImage{Tag: &version}, + ServerMeta: ktv1.KubeturboServerMeta{Version: &version}, + TargetConfig: ktv1.KubeturboTargetConfig{TargetName: &targetName}, + } + ExpectWithOffset(1, utils.WriteToFile(tmpCRFile, kt)).NotTo(HaveOccurred()) + + cmd = exec.Command(utils.KUBECTL, "apply", "-f", tmpCRFile.Name()) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Validate changes") + oldKtPodName := ktPodName + assets := utils.GenerateKubeturboAssets(kt) + + By(fmt.Sprintf("Wait %s for operator to act", WAIT_PERIOD.String())) + time.Sleep(WAIT_PERIOD) + + // expecting pod restart with a different tag + By("Verify Deployment image tag") + ExpectWithOffset(1, utils.VerifyImageTag(assets, constants.KubeturboContainerName, version)).NotTo(HaveOccurred()) + + By("Verify Deployment settings") + ExpectWithOffset(1, utils.VerifyDeployment(assets)).NotTo(HaveOccurred()) + + By("Caching running Kubeturbo pod name") + ktPodName, err = utils.GetRunningPodByDeployName(KT_CR_Name) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Verify the Kubeturbo pod restarts") + utils.LogMessage("INFO", "Pod restarts: %s -> %s ", oldKtPodName, ktPodName) + ExpectWithOffset(1, ktPodName).NotTo(Equal(oldKtPodName)) + }) + }) + + Context("Modify CR that should not restarts pod", func() { + logLevel := 5 + roleName := ktv1.RoleTypeAdmin + npMax := 100 + It("Apply CR", func() { + kt.Spec.RoleName = roleName + kt.Spec.Logging = ktv1.Logging{Level: &logLevel} + kt.Spec.NodePoolSize = ktv1.NodePoolSize{Max: &npMax} + ExpectWithOffset(1, utils.WriteToFile(tmpCRFile, kt)).NotTo(HaveOccurred()) + + cmd = exec.Command(utils.KUBECTL, "apply", "-f", tmpCRFile.Name()) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Validate changes") + oldKtPodName := ktPodName + assets := utils.GenerateKubeturboAssets(kt) + + By(fmt.Sprintf("Wait %s for operator to act", WAIT_PERIOD.String())) + time.Sleep(WAIT_PERIOD) + + // change roleName: update the clusterRole and clusterRoleBinding for Kubeturbo + By("Verify Assets creation") + ExpectWithOffset(1, utils.VerifyK8sAssets(assets)).NotTo(HaveOccurred()) + + By("Verify ClusterRoleBinding relationship") + ExpectWithOffset(1, utils.VerifyCRB(assets)).NotTo(HaveOccurred()) + + By("Caching running Kubeturbo pod name") + ktPodName, err = utils.GetRunningPodByDeployName(KT_CR_Name) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + // change logLevel: update the dynamic config in the configMap + // change npMax: update the dynamic config in the configMap + By("Verify the Kubeturbo pod not restarts") + ExpectWithOffset(1, ktPodName).To(Equal(oldKtPodName)) + }) + }) + + Context("Delete CR", func() { + It("Delete CR", func() { + cmd = exec.Command(utils.KUBECTL, "delete", "-f", tmpCRFile.Name()) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By(fmt.Sprintf("Wait %s for operator to act", WAIT_PERIOD.String())) + time.Sleep(WAIT_PERIOD) + + By("Verify created Kubeturbo resources are deleted") + cmd = exec.Command(utils.KUBECTL, + "-n", utils.NAMESPACE, + "get", "deploy,sa,cm,clusterrole,clusterrolebinding", + "-o", "name", + "-l", fmt.Sprintf("app.kubernetes.io/managed-by=%s", utils.OPERATOR_NAME), + ) + result, err := utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + // General check if all Kubeturbo resources that created + // by the operator are deleted when we deleting the CR + ExpectWithOffset(1, string(result)).Should(BeEmpty()) + }) + }) +}) diff --git a/test/e2e/e2e_operator_lifecycle.go b/test/e2e/e2e_operator_lifecycle.go new file mode 100644 index 0000000..ffa0c3c --- /dev/null +++ b/test/e2e/e2e_operator_lifecycle.go @@ -0,0 +1,153 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.ibm.com/turbonomic/kubeturbo-deploy/test/utils" +) + +var _ = Describe("Operator lifecycle", Ordered, func() { + var err error + var cmd *exec.Cmd + + var before_test_k8s_context string + test_k8s_context := "kind-" + utils.KIND_CLUSTER + + // projectImage stores the name of the image used in the test + var PROJECT_IMAGE = utils.REGISTRY + "/" + utils.OPERATOR_NAME + ":" + utils.VERSION + + // path to the generated YAML bundle for the Kubeturbo operator + base, _ := os.Getwd() + YAML_BUNDLE_DIR := filepath.Join(base, "../../deploy/kubeturbo_operator_yamls") + YAML_BUNDLE_PATH, _ := filepath.Abs(filepath.Join(YAML_BUNDLE_DIR, "operator-bundle.yaml")) + + BeforeAll(func() { + By("Check or create the kind cluster") + os.Setenv("KUBECONFIG_STR", utils.KIND_KUBECONFIG_STR) + cmd = exec.Command("make", "create-kind-cluster") + _, _ = utils.Run(cmd) + + By("Get current K8s cluster context") + before_test_k8s_context = utils.CurrentK8sContext() + + By("Switch to the Kind cluster") + err = utils.SwitchToContext(test_k8s_context) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Build test image") + cmd = exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", PROJECT_IMAGE)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Load image to Kind cluster") + err = utils.LoadImageToKindClusterWithName(PROJECT_IMAGE) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + }) + + AfterAll(func() { + By("Switch back to the K8s cluster before test") + if before_test_k8s_context != "" && + before_test_k8s_context != test_k8s_context { + By("revert k8s context change") + err := utils.SwitchToContext(before_test_k8s_context) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + }) + + Context("YAML approach", func() { + It("Generate bundle", func() { + By("Create namespace") + os.Setenv("NAMESPACE", utils.NAMESPACE) + cmd = exec.Command(utils.KUBECTL, "create", "ns", utils.NAMESPACE) + _, _ = utils.Run(cmd) + + cmd = exec.Command("make", "export_operator_yaml_bundle") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Install bundle") + cmd = exec.Command(utils.KUBECTL, "apply", "-f", YAML_BUNDLE_PATH) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Validate installation") + assets := utils.OPERATOR_ASSETS + + By("Verify Asset creation") + ExpectWithOffset(1, utils.VerifyK8sAssets(assets)).NotTo(HaveOccurred()) + + By("Verify ClusterRoleBinding relationship") + ExpectWithOffset(1, utils.VerifyCRB(assets)).NotTo(HaveOccurred()) + + By("Verify Deployment settings") + ExpectWithOffset(1, utils.VerifyDeployment(assets)).NotTo(HaveOccurred()) + + By("Uninstall bundle") + cmd = exec.Command(utils.KUBECTL, "delete", "-f", YAML_BUNDLE_PATH, "--ignore-not-found=true") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + }) + }) + + Context("Direct deploy approach", func() { + It("Install CRDs", func() { + By("Create namespace") + os.Setenv("NAMESPACE", utils.NAMESPACE) + cmd = exec.Command(utils.KUBECTL, "create", "ns", utils.NAMESPACE) + _, _ = utils.Run(cmd) + + cmd = exec.Command("make", "install") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Deploy operator") + cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", PROJECT_IMAGE)) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Validate installation") + assets := utils.OPERATOR_ASSETS + + By("Verify Asset creation") + ExpectWithOffset(1, utils.VerifyK8sAssets(assets)).NotTo(HaveOccurred()) + + By("Verify ClusterRoleBinding relationship") + ExpectWithOffset(1, utils.VerifyCRB(assets)).NotTo(HaveOccurred()) + + By("Verify Deployment settings") + ExpectWithOffset(1, utils.VerifyDeployment(assets)).NotTo(HaveOccurred()) + + By("Undeploy operator") + cmd = exec.Command("make", "undeploy") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("Uninstall CRDs") + cmd = exec.Command("make", "uninstall", "ignore-not-found=true") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go new file mode 100644 index 0000000..066ee54 --- /dev/null +++ b/test/e2e/e2e_suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.ibm.com/turbonomic/kubeturbo-deploy/test/utils" +) + +// Run e2e tests using the Ginkgo runner. +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + utils.LogMessage("INFO", "Starting kubeturbo-deploy suite") + RunSpecs(t, "e2e suite") +} diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 0000000..512296c --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,430 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "strings" + + ktv1 "github.ibm.com/turbonomic/kubeturbo-deploy/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/onsi/ginkgo/v2" //nolint:golint,revive +) + +type K8SAssets struct { + ServiceAccount string + ClusterRole string + ClusterRoleBinding string + Deployment string + ConfigMap string +} + +const ( + prometheusOperatorVersion = "v0.68.0" + prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" + + "releases/download/%s/bundle.yaml" + + certmanagerVersion = "v1.5.3" + certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml" +) + +var ( + KUBECTL = EnvLookUp("KUBECTL", "kubectl") + KIND = EnvLookUp("KIND", "kind") + KIND_CLUSTER = EnvLookUp("KIND_CLUSTER", "kind") + KIND_KUBECONFIG = EnvLookUp("KIND_KUBECONFIG", "~/.kube/config") + NAMESPACE = EnvLookUp("NAMESPACE", "turbonomic") + REGISTRY = EnvLookUp("REGISTRY", "e2e-test") + OPERATOR_NAME = EnvLookUp("OPERATOR_NAME", "kubeturbo-operator") + VERSION = EnvLookUp("VERSION", "8.13.6") + KIND_KUBECONFIG_STR = fmt.Sprintf("--kubeconfig=%v", KIND_KUBECONFIG) + LOGGING_LEVEL = EnvLookUp("TESTING_LOGGING_LEVEL", "INFO") + DEFAULT_KT_VERSION = EnvLookUp("DEFAULT_KUBETURBO_VERSION", VERSION) + OPERATOR_ASSETS = K8SAssets{ + ServiceAccount: "serviceaccount/kubeturbo-operator", + ClusterRole: "clusterrole.rbac.authorization.k8s.io/kubeturbo-operator", + ClusterRoleBinding: "clusterrolebinding.rbac.authorization.k8s.io/kubeturbo-operator", + Deployment: "deployment.apps/kubeturbo-operator", + } + LOG_LEVELS = map[string]int{ + "ERROR": 1, + "WARN": 2, + "INFO": 3, + "DEBUG": 4, + } +) + +func LogMessage(level string, format string, args ...interface{}) { + normalizedLogLevel := strings.ToUpper(LOGGING_LEVEL) + currentLogLevel, ok := LOG_LEVELS[normalizedLogLevel] + if !ok { + LOGGING_LEVEL = "INFO" + LogMessage("WARN", "The given log level %s is invalid, default to INFO level", normalizedLogLevel) + return + } + + normalizedLogLevel = strings.ToUpper(level) + messageLogLevel, ok := LOG_LEVELS[normalizedLogLevel] + if !ok { + LogMessage("WARN", "The message sets to an invalid log level %s default to INFO level", level) + LogMessage("INFO", format, args...) + return + } + + if messageLogLevel <= currentLogLevel { + fmt.Fprintf(GinkgoWriter, "[%s] %s\n", normalizedLogLevel, fmt.Sprintf(format, args...)) + } +} + +// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics. +func InstallPrometheusOperator() error { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command(KUBECTL, "create", "-f", url) + _, err := Run(cmd) + return err +} + +// Run executes the provided command within this context +func Run(cmd *exec.Cmd) ([]byte, error) { + dir, _ := GetProjectDir() + cmd.Dir = dir + + if err := os.Chdir(cmd.Dir); err != nil { + fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err) + } + + cmd_name := cmd.Args[0] + if cmd_name == KUBECTL { + cmd.Args = append(cmd.Args, KIND_KUBECONFIG_STR) + } + + cmd.Env = append(os.Environ(), "GO111MODULE=on") + command := strings.Join(cmd.Args, " ") + LogMessage("INFO", "> %s", command) + output, err := cmd.CombinedOutput() + if err != nil { + return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output)) + } + LogMessage("DEBUG", "%s", output) + return output, nil +} + +// UninstallPrometheusOperator uninstalls the prometheus +func UninstallPrometheusOperator() { + url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion) + cmd := exec.Command(KUBECTL, "delete", "-f", url) + if _, err := Run(cmd); err != nil { + LogMessage("WARN", "%s", err.Error()) + } +} + +// UninstallCertManager uninstalls the cert manager +func UninstallCertManager() { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command(KUBECTL, "delete", "-f", url) + if _, err := Run(cmd); err != nil { + LogMessage("WARN", "%s", err.Error()) + } +} + +// InstallCertManager installs the cert manager bundle. +func InstallCertManager() error { + url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion) + cmd := exec.Command(KUBECTL, "apply", "-f", url) + if _, err := Run(cmd); err != nil { + return err + } + // Wait for cert-manager-webhook to be ready, which can take time if cert-manager + // was re-installed after uninstalling on a cluster. + cmd = exec.Command(KUBECTL, "wait", "deployment.apps/cert-manager-webhook", + "--for", "condition=Available", + "--namespace", "cert-manager", + "--timeout", "5m", + ) + + _, err := Run(cmd) + return err +} + +// LoadImageToKindCluster loads a local docker image to the kind cluster +func LoadImageToKindClusterWithName(name string) error { + kindOptions := []string{"load", "docker-image", name, "--name", KIND_CLUSTER} + cmd := exec.Command(KIND, kindOptions...) + _, err := Run(cmd) + return err +} + +func SwitchToContext(context string) (err error) { + LogMessage("INFO", "Switching to k8s context: %s", context) + cmd := exec.Command(KUBECTL, "config", "use-context", context) + _, err = Run(cmd) + return +} + +// GetNonEmptyLines converts given command output string into individual objects +// according to line breakers, and ignores the empty elements in it. +func GetNonEmptyLines(output string) []string { + var res []string + elements := strings.Split(output, "\n") + for _, element := range elements { + if element != "" { + res = append(res, element) + } + } + + return res +} + +// GetProjectDir will return the directory where the project is +func GetProjectDir() (string, error) { + wd, err := os.Getwd() + if err != nil { + return wd, err + } + wd = strings.Replace(wd, "/test/e2e", "", -1) + return wd, nil +} + +func EnvLookUp(env_var string, default_val string) (val string) { + val = default_val + if v, ok := os.LookupEnv(env_var); ok { + val = v + } + fmt.Fprintf(GinkgoWriter, "Addressing %s as: %s\n", env_var, val) + os.Setenv(env_var, val) + return +} + +func CurrentK8sContext() (currentContext string) { + cmd := exec.Command(KUBECTL, "config", "current-context") + output, _ := Run(cmd) + currentContext = GetNonEmptyLines(string(output))[0] + LogMessage("INFO", "Current k8s context is: %s", currentContext) + return +} + +func VerifyK8sAssets(assets K8SAssets) error { + var cmd *exec.Cmd + var err error + var errs []error + + // To verify if targeting k8s objects are created + k8s_targets := []string{assets.ServiceAccount, assets.ClusterRoleBinding, assets.ClusterRole, assets.Deployment, assets.ConfigMap} + for _, k8s_target := range k8s_targets { + if k8s_target == "" { + continue + } + cmd = exec.Command(KUBECTL, "-n", NAMESPACE, "get", k8s_target) + _, err = Run(cmd) + if err != nil { + errs = append(errs, err) + } + } + + return SummarizeErrors(errs) +} + +func VerifyCRB(assets K8SAssets) error { + var cmd *exec.Cmd + var err error + var errs []error + + // To verify the clusterrole_binding binds to right object + jsonPath := "-o=jsonpath='{.roleRef.kind}.{.roleRef.apiGroup}/{.roleRef.name}'" + cmd = exec.Command(KUBECTL, "get", assets.ClusterRoleBinding, jsonPath) + result, err := Run(cmd) + if err != nil { + errs = append(errs, err) + } + + resultStr := strings.ToLower(strings.Trim(string(result), "'")) + if resultStr != assets.ClusterRole { + errs = append(errs, fmt.Errorf("expecting %s binds to %s but got %s", assets.ClusterRoleBinding, assets.ClusterRole, resultStr)) + } + + jsonPath = fmt.Sprintf("-o=jsonpath='{range .subjects[?(@.namespace == \"%s\")]}{.kind}/{.name}{end}'", NAMESPACE) + cmd = exec.Command(KUBECTL, "get", assets.ClusterRoleBinding, jsonPath) + result, err = Run(cmd) + if err != nil { + errs = append(errs, err) + } + + resultStr = strings.ToLower(strings.Trim(string(result), "'")) + if resultStr != assets.ServiceAccount { + errs = append(errs, fmt.Errorf("expecting %s binds to %s but got %s", assets.ClusterRoleBinding, assets.ServiceAccount, resultStr)) + } + + return SummarizeErrors(errs) +} + +func VerifyDeployment(assets K8SAssets) error { + var cmd *exec.Cmd + var err error + var errs []error + + // To verify the deployment uses right serviceaccount + jsonPath := "-o=jsonpath='serviceaccount/{.spec.template.spec.serviceAccountName}'" + cmd = exec.Command(KUBECTL, "-n", NAMESPACE, "get", assets.Deployment, jsonPath) + result, err := Run(cmd) + if err != nil { + errs = append(errs, err) + } + + resultStr := strings.ToLower(strings.Trim(string(result), "'")) + if resultStr != assets.ServiceAccount { + errs = append(errs, fmt.Errorf("expecting %s uses %s but got %s", assets.Deployment, assets.ServiceAccount, resultStr)) + } + + // To verify the operator pod runs without any issue + LogMessage("INFO", "Wait until the deployment %s become ready", assets.Deployment) + cmd = exec.Command(KUBECTL, "-n", NAMESPACE, "rollout", "status", assets.Deployment, "--timeout=1m") + _, err = Run(cmd) + if err != nil { + errs = append(errs, err) + } + + return SummarizeErrors(errs) +} + +func VerifyImageTag(assets K8SAssets, containerName string, desireTag string) error { + var cmd *exec.Cmd + var err error + + jsonPath := fmt.Sprintf("-o=jsonpath='{.spec.template.spec.containers[?(@.name == \"%s\")].image}'", containerName) + cmd = exec.Command(KUBECTL, "-n", NAMESPACE, "get", assets.Deployment, jsonPath) + result, err := Run(cmd) + if err != nil { + return err + } + + resultStr := strings.Trim(string(result), "'") + parts := strings.Split(resultStr, ":") + tag := "latest" + if len(parts) == 2 { + tag = parts[1] + } + + if desireTag != tag { + return fmt.Errorf("expecting %s uses image tag %s but got %s", assets.Deployment, desireTag, tag) + } + + return nil +} + +func SummarizeErrors(errs []error) error { + var sb strings.Builder + + // Iterate through the errors and append to the builder if not nil. + for _, err := range errs { + if err != nil { + sb.WriteString(err.Error() + "\n") + } + } + + if len(sb.String()) == 0 { + return nil + } + return errors.New(sb.String()) +} + +func WriteToFile(targetFile *os.File, value any) (err error) { + file, err := os.Create(targetFile.Name()) + if err != nil { + LogMessage("ERROR", "Failed to open file: %s", err) + return err + } + defer file.Close() + + encoder := json.NewEncoder(file) + if err = encoder.Encode(value); err != nil { + LogMessage("ERROR", "Fail to encode %s to %s", value, file.Name()) + return err + } + return nil +} + +func GenerateKtV1CRWithSpec(name string, spec *ktv1.KubeturboSpec) ktv1.Kubeturbo { + return ktv1.Kubeturbo{ + TypeMeta: metav1.TypeMeta{ + Kind: "Kubeturbo", + APIVersion: "charts.helm.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: NAMESPACE, + }, + Spec: *spec, + } +} + +func GenerateKubeturboAssets(kt ktv1.Kubeturbo) K8SAssets { + // follow the same rule sets in internal/api/kubeturbo/reconciler.go + serviceAccount := "turbo-user" + if kt.Spec.ServiceAccountName != "" { + serviceAccount = kt.Spec.ServiceAccountName + } + + // follow the same rule sets in internal/api/kubeturbo/reconciler.go + clusterRole := "cluster-admin" + if kt.Spec.RoleName != "" { + clusterRole = kt.Spec.RoleName + if clusterRole == ktv1.RoleTypeAdmin || clusterRole == ktv1.RoleTypeReadOnly { + clusterRole = clusterRole + "-" + kt.ObjectMeta.Name + "-" + kt.ObjectMeta.Namespace + } + } + + // follow the same rule sets in internal/api/kubeturbo/reconciler.go + clusterRoleBinding := "turbo-all-binding" + if kt.Spec.RoleBinding != "" { + clusterRoleBinding = kt.Spec.RoleBinding + } + clusterRoleBinding = clusterRoleBinding + "-" + kt.ObjectMeta.Name + "-" + kt.ObjectMeta.Namespace + + // follow the same rule sets in internal/api/kubeturbo/reconciler.go + configMap := "turbo-config-" + kt.ObjectMeta.Name + + return K8SAssets{ + Deployment: fmt.Sprintf("deployment.apps/%s", kt.Name), + ConfigMap: fmt.Sprintf("configmap/%s", configMap), + ServiceAccount: fmt.Sprintf("serviceaccount/%s", serviceAccount), + ClusterRoleBinding: fmt.Sprintf("clusterrolebinding.rbac.authorization.k8s.io/%s", clusterRoleBinding), + ClusterRole: fmt.Sprintf("clusterrole.rbac.authorization.k8s.io/%s", clusterRole), + } +} + +func GetRunningPodByDeployName(deployName string) (string, error) { + cmd := exec.Command(KUBECTL, + "-n", NAMESPACE, + "get", "pods", + "-o", "name", + "--field-selector=status.phase=Running", + "-l", fmt.Sprintf("app.kubernetes.io/name=%s", deployName), + ) + result, err := Run(cmd) + if err != nil { + return "", err + } + + resultStr := strings.Trim(string(result), "'") + LogMessage("INFO", "The current running pod for %s is %s", deployName, resultStr) + return resultStr, nil +}