From 5dd1ea5d1fde584c51c199a47c58d12415920482 Mon Sep 17 00:00:00 2001 From: Simon Beck Date: Tue, 6 Aug 2024 10:00:52 +0200 Subject: [PATCH] Add initial vcluster support This will help with developing for splitted clusters, where crossplane and the services are not on the same clusters. --- Makefile | 109 +++++++++++++++++++++++++---- Makefile.vars.mk | 10 +++ README.md | 32 +++++++++ argocd/controlplanesecret.yaml | 13 ++++ argocd/service-account-secret.yaml | 7 ++ forgejo/values.yaml | 2 +- kind/kind.mk | 2 +- prometheus/values_vcluster.yaml | 26 +++++++ vclusterconfig/values.yaml | 6 ++ 9 files changed, 193 insertions(+), 14 deletions(-) create mode 100644 argocd/controlplanesecret.yaml create mode 100644 argocd/service-account-secret.yaml create mode 100644 prometheus/values_vcluster.yaml create mode 100644 vclusterconfig/values.yaml diff --git a/Makefile b/Makefile index 649d01e..b4767a0 100644 --- a/Makefile +++ b/Makefile @@ -37,18 +37,20 @@ lint: ## All-in-one linting @echo 'Check for uncommitted changes ...' git diff --exit-code -kind-storage: kind-setup csi-host-path-setup +kind-storage: kind-setup csi-host-path-setup vcluster-setup crossplane-setup: $(crossplane_sentinel) ## Install local Kubernetes cluster and install Crossplane $(crossplane_sentinel): export KUBECONFIG = $(KIND_KUBECONFIG) -$(crossplane_sentinel): kind-setup csi-host-path-setup load-comp-image +$(crossplane_sentinel): kind-setup csi-host-path-setup helm repo add crossplane https://charts.crossplane.io/stable --force-update + if $(vcluster); then $(vcluster_bin) connect controlplane --namespace vcluster; fi helm upgrade --install crossplane --create-namespace --namespace syn-crossplane crossplane/crossplane \ --set "args[0]='--debug'" \ --set "args[1]='--enable-environment-configs'" \ --set "args[2]='--enable-usages'" \ --wait + if $(vcluster); then $(vcluster_bin) disconnect; fi @touch $@ stackgres-setup: export KUBECONFIG = $(KIND_KUBECONFIG) @@ -78,8 +80,21 @@ stackgres-setup: kind-setup csi-host-path-setup ## Install StackGres encoded=$$(echo -n "$$NEW_PASSWORD" | base64) && \ kubectl patch secrets --namespace stackgres stackgres-restapi-admin --type json -p "[{\"op\":\"add\",\"path\":\"/data/clearPassword\", \"value\":\"$${encoded}\"}]" | true -certmanager-setup: export KUBECONFIG = $(KIND_KUBECONFIG) -certmanager-setup: kind-storage +certmanager-setup: $(certmanager-sentinel) + +$(certmanager-sentinel): export KUBECONFIG = $(KIND_KUBECONFIG) +$(certmanager-sentinel): kind-storage +$(certmanager-sentinel): + if $(vcluster); then \ + $(vcluster_bin) connect controlplane --namespace vcluster;\ + $(MAKE) certmanager-install; \ + $(vcluster_bin) disconnect; \ + fi + $(MAKE) certmanager-install + @touch $@ + +certmanager-install: export KUBECONFIG = $(KIND_KUBECONFIG) +certmanager-install: kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml kubectl -n cert-manager wait --for condition=Available deployment/cert-manager --timeout 120s kubectl -n cert-manager wait --for condition=Available deployment/cert-manager-webhook --timeout 120s @@ -123,34 +138,49 @@ prometheus-setup: $(prometheus_sentinel) ## Install Prometheus stack $(prometheus_sentinel): export KUBECONFIG = $(KIND_KUBECONFIG) $(prometheus_sentinel): kind-setup-ingress + if $(vcluster); then \ + $(vcluster_bin) connect controlplane --namespace vcluster; \ + $(MAKE) prometheus-install -e PROM_VALUES=prometheus/values_vcluster.yaml; \ + $(vcluster_bin) disconnect; \ + fi + $(MAKE) prometheus-install + kubectl apply -f prometheus/netpol.yaml + @echo -e "***\n*** Installed Prometheus in http://prometheus.127.0.0.1.nip.io:8088/ and AlertManager in http://alertmanager.127.0.0.1.nip.io:8088/.\n***" + @touch $@ + +prometheus-install: export KUBECONFIG = $(KIND_KUBECONFIG) +prometheus-install: helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm upgrade --install kube-prometheus \ --create-namespace \ --namespace prometheus-system \ --wait \ - --values prometheus/values.yaml \ + --values ${PROM_VALUES} \ prometheus-community/kube-prometheus-stack kubectl -n prometheus-system wait --for condition=Available deployment/kube-prometheus-kube-prome-operator --timeout 120s - kubectl apply -f prometheus/netpol.yaml - @echo -e "***\n*** Installed Prometheus in http://prometheus.127.0.0.1.nip.io:8088/ and AlertManager in http://alertmanager.127.0.0.1.nip.io:8088/.\n***" - @touch $@ load-comp-image: ## Load the appcat-comp image if it exists [[ "$$(docker images -q ghcr.io/vshn/appcat 2> /dev/null)" != "" ]] && kind load docker-image --name kindev ghcr.io/vshn/appcat || true +.PHONY: csi-host-path-setup csi-host-path-setup: $(csi_sentinel) ## Setup csi-driver-host-path and set as default, this provider supports resizing $(csi_sentinel): export KUBECONFIG = $(KIND_KUBECONFIG) $(csi_sentinel): unset-default-sc + $(MAKE) csi-install + @touch $@ + +csi-install: export KUBECONFIG = $(KIND_KUBECONFIG) +csi-install: cd csi-host-path && \ kubectl apply -f snapshot-controller.yaml && \ kubectl apply -f storageclass.yaml && \ ./deploy.sh kubectl patch storageclass csi-hostpath-fast -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' - @touch $@ .PHONY: clean clean: kind-clean ## Clean up local dev environment + rm $(vcluster_bin) metallb-setup: $(metallb_sentinel) ## Install metallb as loadbalancer @@ -194,16 +224,16 @@ $(espejo_sentinel): kubectl apply -f espejo touch $@ -forgejo-setup: $(forgejo_sentinel) +forgejo-setup: $(forgejo_sentinel) ## Install local forgejo instance to host argocd repos $(forgejo_sentinel): export KUBECONFIG = $(KIND_KUBECONFIG) $(forgejo_sentinel): helm upgrade --install forgejo -f forgejo/values.yaml -n forgejo --create-namespace oci://code.forgejo.org/forgejo-helm/forgejo @echo -e "***\n*** Installed forgejo in http://forgejo.127.0.0.1.nip.io:8088\n***" - @echo -e "***\n*** credentials: gitea_admin:admin\n***" + @echo -e "***\n*** credentials: gitea_admin:adminadmin\n***" touch $@ -argocd-setup: $(argocd_sentinel) +argocd-setup: $(argocd_sentinel) ## Install argocd to automagically apply our component $(argocd_sentinel): export KUBECONFIG = $(KIND_KUBECONFIG) $(argocd_sentinel): @@ -213,6 +243,61 @@ $(argocd_sentinel): kubectl -n argocd patch cm argocd-cmd-params-cm -p '{"data": { "server.insecure": "true" } }' kubectl -n argocd patch cm argocd-cm -p '{"data": { "timeout.reconciliation": "30s" } }' kubectl -n argocd rollout restart deployment argocd-server + if $(vcluster); then \ + $(MAKE) argocd-vcluster-auth ; \ + fi @echo -e "***\n*** Installed argocd in http://argocd.127.0.0.1.nip.io:8088\n***" @echo -e "***\n*** credentials: admin:admin\n***" touch $@ + +.PHONY: argocd-vcluster-auth +argocd-vcluster-auth: export KUBECONFIG = $(KIND_KUBECONFIG) ## Re-create argocd authentication for the vcluster, in case it breaks +argocd-vcluster-auth: vcluster-setup +argocd-vcluster-auth: vcluster=true +argocd-vcluster-auth: + # The usualy kubeconfig export doesn't work here for some reason... + export KUBECONFIG=$(KIND_KUBECONFIG) ; \ + $(vcluster_bin) connect controlplane --namespace vcluster; \ + kubectl create serviceaccount argocd; \ + kubectl create clusterrolebinding argocd_admin --clusterrole=cluster-admin --serviceaccount=default:argocd ; \ + kubectl apply -f argocd/service-account-secret.yaml ; \ + sleep 1 ; \ + export token=$$(kubectl get secret argocd-token -oyaml | yq '.data.token' | base64 -d) ; \ + $(vcluster_bin) disconnect; \ + kubectl delete -f argocd/controlplanesecret.yaml ; \ + cat argocd/controlplanesecret.yaml | yq '.stringData.config = "{ \"bearerToken\":\""+ strenv(token) +"\", \"tlsClientConfig\": { \"insecure\": true }}"' | kubectl apply -f - + +.PHONY: install-vcluster-bin +install-vcluster-bin: $(vcluster_bin) + +$(vcluster_bin): export GOOS = $(shell go env GOOS) +$(vcluster_bin): export GOARCH = $(shell go env GOARCH) +$(vcluster_bin): export GOBIN = $(go_bin) +$(vcluster_bin): | $(go_bin) + if $(vcluster); then \ + go install github.com/loft-sh/vcluster/cmd/vclusterctl@latest; \ + fi + + +.PHONY: vcluster-setup +vcluster-setup: export KUBECONFIG = $(KIND_KUBECONFIG) +vcluster-setup: install-vcluster-bin + if $(vcluster); then \ + $(vcluster_bin) create controlplane --namespace vcluster --connect=false -f vclusterconfig/values.yaml || true; \ + fi + +.PHONY: vcluster-in-cluster-kubeconfig +vcluster-in-cluster-kubeconfig: export KUBECONFIG = $(KIND_KUBECONFIG) ## Prints out a kubeconfig for use within the main cluster +vcluster-in-cluster-kubeconfig: + @export KUBECONFIG=$(KIND_KUBECONFIG) ; \ + $(vcluster_bin) connect controlplane --namespace vcluster --print | yq '.clusters[0].cluster.server = "https://controlplane.vcluster"' + +.PHONY: vcluster-local-cluster-kubeconfig +vcluster-local-cluster-kubeconfig: export KUBECONFIG = $(KIND_KUBECONFIG) ## Prints out a kubeconfig for use on the local machine +vcluster-local-cluster-kubeconfig: + @export KUBECONFIG=$(KIND_KUBECONFIG) ; \ + $(vcluster_bin) connect controlplane --namespace vcluster --print | yq + +.PHONY: vcluster-clean +vcluster-clean: ## If you break Crossplane hard enough just remove the whole vcluster + $(vcluster_bin) rm controlplane || true diff --git a/Makefile.vars.mk b/Makefile.vars.mk index a0fe19c..a57df68 100644 --- a/Makefile.vars.mk +++ b/Makefile.vars.mk @@ -1,5 +1,6 @@ ## These are some common variables for Make crossplane_sentinel = $(kind_dir)/crossplane-sentinel +certmanager-sentinel = $(kind_dir)/certmanager-sentinel k8up_sentinel = $(kind_dir)/k8up-sentinel prometheus_sentinel = $(kind_dir)/prometheus-sentinel local_pv_sentinel = $(kind_dir)/local_pv @@ -27,3 +28,12 @@ KIND_IMAGE ?= docker.io/kindest/node:$(KIND_NODE_VERSION) KIND_CMD ?= go run sigs.k8s.io/kind KIND_KUBECONFIG ?= $(kind_dir)/kind-kubeconfig-$(KIND_NODE_VERSION) KIND_CLUSTER ?= $(PROJECT_NAME) + +## PROMETHEUS +PROM_VALUES=prometheus/values.yaml + + +## VCLUSTER +vcluster_bin = $(go_bin)/vclusterctl +# enable or disable vcluster provisioning +vcluster=false diff --git a/README.md b/README.md index c10942c..fa2ec38 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,38 @@ mc alias set localnip http://minio.127.0.0.1.nip.io:8088 minioadmin minioadmin Minio console access: http://minio-gui.127.0.0.1.nip.io:8088 +## Vcluster + +To toggle the vcluster support please use `-e vcluster=true`. Any make target that has support for the vcluster will then automatically use the vcluster. + +There are also some helper targets for the vcluster: +* vcluster-clean: will remove the vluster. Helpful if Crossplane broke completely +* vcluster-in-cluster-kubeconfig: generates a kubeconfig that can be used from within the main cluster. E.g. when deploying the controller or sli-exporter so it can connect to the control plane. +* vcluster-local-cluster-kubeconfig: same as the above, but will point to the vcluster proxy endpoint. Useful for debugging purpose. + +### How to use it in make + +If you need to install something in the control cluster in make, you can do it like this: + +```make +.PHONY: app-setup +app-setup: + $(vcluster_bin) connect controlplane --namespace vcluster + $install what you need + $(vcluster_bin) disconnect +``` + +### Access vcluster + +If you need access to the vcluster from outside make (for example, when applying the AppCat component or other things). Export the kind config and then: + +```bash +kubectl config get-contexts +# get the vcluster context +# it's the one starting with vcluster_* +kubectl config use-context vcluster_*... +``` + ## Integration into other projects kindev is intended to be used by Crossplane providers as a developement and test environment. It can be tied into other projects via a git submodule. diff --git a/argocd/controlplanesecret.yaml b/argocd/controlplanesecret.yaml new file mode 100644 index 0000000..280b706 --- /dev/null +++ b/argocd/controlplanesecret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: controlplane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: controlplane + server: https://controlplane.vcluster.svc + # config: | + # { "bearerToken": "", "tlsClientConfig": { "insecure": true }} diff --git a/argocd/service-account-secret.yaml b/argocd/service-account-secret.yaml new file mode 100644 index 0000000..433b01a --- /dev/null +++ b/argocd/service-account-secret.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: argocd-token + annotations: + kubernetes.io/service-account.name: "argocd" +type: kubernetes.io/service-account-token diff --git a/forgejo/values.yaml b/forgejo/values.yaml index 539b12a..1bfbb0b 100644 --- a/forgejo/values.yaml +++ b/forgejo/values.yaml @@ -14,7 +14,7 @@ gitea: admin: # 'admin' is reserved and can't be used... 'gitea_admin' is the default. username: gitea_admin - password: admin + password: adminadmin config: repository: ENABLE_PUSH_CREATE_USER: 'true' diff --git a/kind/kind.mk b/kind/kind.mk index d57efee..f1800a0 100644 --- a/kind/kind.mk +++ b/kind/kind.mk @@ -46,7 +46,7 @@ $(KIND_KUBECONFIG): $(kind_bin) --name $(KIND_CLUSTER) \ --image $(KIND_IMAGE) \ --config kind/config.yaml - $(kind_bin) get kubeconfig --name $(KIND_CLUSTER) > $(kind_dir)/kind-config + ln -s $(KIND_KUBECONFIG) $(kind_dir)/kind-config @kubectl version @kubectl cluster-info @kubectl config use-context kind-$(KIND_CLUSTER) diff --git a/prometheus/values_vcluster.yaml b/prometheus/values_vcluster.yaml new file mode 100644 index 0000000..6a6ed9d --- /dev/null +++ b/prometheus/values_vcluster.yaml @@ -0,0 +1,26 @@ +# See https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack + +kubeEtcd: + enabled: false +kubeScheduler: + enabled: false +kubeProxy: + enabled: false +kubeControllerManager: + enabled: false +grafana: + enabled: false +nodeExporter: + enabled: false + +prometheus: + prometheusSpec: + # these will cause Prometheus to search in all namespaces + serviceMonitorSelectorNilUsesHelmValues: false + ruleSelectorNilUsesHelmValues: false + podMonitorSelectorNilUsesHelmValues: false + +# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-state-metrics +kube-state-metrics: + metricLabelsAllowlist: + - namespaces=[*] diff --git a/vclusterconfig/values.yaml b/vclusterconfig/values.yaml new file mode 100644 index 0000000..3474ae1 --- /dev/null +++ b/vclusterconfig/values.yaml @@ -0,0 +1,6 @@ +# Vcluster uses sqlite by default and basically just dies with our dev env +controlPlane: + backingStore: + etcd: + deploy: + enabled: true