From 95689239912963366fbe9aa354fa46d175cff5b1 Mon Sep 17 00:00:00 2001 From: esierra-stratio <102975650+esierra-stratio@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:23:49 +0200 Subject: [PATCH] [CLOUD-245] Desplegar Flux en aprovisionamiento and [PLT-459] Support kubernetes 1.30.X in cloud-provisioner (#540) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * version v0.18.0-alpha * update docs for v0.17.0 * fix kind version in readme * comments-update-buildcontext * decoupling helm chart installation * implementing getOverriddenCharts * uncouple charts pull * cleaning code * add changed to changelog * fixed cert-manager and aws-lb-controller installation by default * fixed chart pull bugs * integrated privated cert-manager installation * - Added cert-manager installation by default. - Defined helm charts to be installed generically, without specifying k8s version. * add flux * last changes for AWS * EKS helm repository private * Azure changes * add globalnetpol for flux GCP * delete debug prints * add support for multiple k8s versions * refactor print name * add values for each kubernetes version * replacement of namespace by kube-system * added installedCRDs * fixed tigera-operator version * fix merge with master * review chart values for k8s 1.28 * refactor code * refactor code * refactor code * support 1.38 k8s * add GCP CCM * add GCP CCM installation * fix correct gcp ccm version * add gcp ccm from cluster.yaml helm repository * delete unnecessary folder * extend helmrelease and helmrepository parameters * fix global netpol name * delete unnecessary param in keos.yaml * add stable release gcp ccm * fix wait for metrics-server is ready * add resourcequota template GKE * install capx before reconciling * fix azuredisk driver * removed region from gcp credentials * refactor core dns logic * fix helmrelease gcp ccm * move coredns files to the correct folders * add calico in GKE as netpol engine * add calico in GKE as netpol engine * fix calico installation logic * changed gcp default disk type for storageclass --------- Co-authored-by: Benjamin Elder Co-authored-by: Daman Co-authored-by: Kubernetes Prow Robot Co-authored-by: lreciomelero <120394823+lreciomelero@users.noreply.github.com> Co-authored-by: lreciomelero Co-authored-by: luisreciomelero Co-authored-by: Unai Arríen --- CHANGELOG.md | 3 + DEPENDENCIES | 4 +- bin/change-version.sh | 8 +- bin/images/azure/imagenes-capz.txt | 2 +- go.mod | 16 +- go.sum | 69 +- .../create/actions/createworker/aws.go | 220 +++- .../create/actions/createworker/azure.go | 225 ++++- .../actions/createworker/createworker.go | 584 +++++------ .../files/aws/allow-egress-imds_gnetpol.yaml | 4 +- .../createworker/files/aws/aws-node_rbac.yaml | 30 + .../flux2_azurepodidentityexception.yaml | 9 + .../files/common/calico-metrics.yaml | 20 - .../files/gcp/allow-egress-imds_gnetpol.yaml | 2 +- .../create/actions/createworker/gcp.go | 109 +- .../actions/createworker/keosinstaller.go | 2 - .../create/actions/createworker/provider.go | 938 ++++++++++++++---- ...-cloud-controller-manager-helm-values.tmpl | 9 + .../28/aws-ebs-csi-driver-helm-values.tmpl | 31 + ...-load-balancer-controller-helm-values.tmpl | 2 + ...-cloud-controller-manager-helm-values.tmpl | 11 + .../29/aws-ebs-csi-driver-helm-values.tmpl | 31 + ...-load-balancer-controller-helm-values.tmpl | 2 + ...-cloud-controller-manager-helm-values.tmpl | 12 + .../30/aws-ebs-csi-driver-helm-values.tmpl | 31 + ...-load-balancer-controller-helm-values.tmpl | 2 + .../azure/28/azuredisk-azure-json.tmpl | 39 + .../28/azuredisk-csi-driver-helm-values.tmpl | 8 + .../28/azurefile-csi-driver-helm-values.tmpl | 5 + .../28/cloud-provider-azure-helm-values.tmpl | 10 + .../azure/29/azuredisk-azure-json.tmpl | 39 + .../29/azuredisk-csi-driver-helm-values.tmpl | 8 + .../29/azurefile-csi-driver-helm-values.tmpl | 5 + .../29/cloud-provider-azure-helm-values.tmpl | 10 + .../azure/30/azuredisk-azure-json.tmpl | 39 + .../30/azuredisk-csi-driver-helm-values.tmpl | 8 + .../30/azurefile-csi-driver-helm-values.tmpl | 5 + .../30/cloud-provider-azure-helm-values.tmpl | 11 + .../common/28/cert-manager-helm-values.tmpl | 17 + .../28/cluster-autoscaler-helm-values.tmpl | 14 + .../28/flux2-helm-chart-values_configmap.tmpl | 17 + .../common/28/flux2-helm-values.tmpl | 29 + .../common/28/flux2_helmrelease.tmpl | 35 + .../common/28/flux2_helmrepository.tmpl | 24 + .../28/tigera-operator-helm-values.tmpl | 65 ++ .../common/29/cert-manager-helm-values.tmpl | 15 + .../29/cluster-autoscaler-helm-values.tmpl | 14 + .../29/flux2-helm-chart-values_configmap.tmpl | 17 + .../common/29/flux2-helm-values.tmpl | 29 + .../common/29/flux2_helmrelease.tmpl | 35 + .../common/29/flux2_helmrepository.tmpl | 24 + .../29/tigera-operator-helm-values.tmpl | 73 ++ .../common/30/cert-manager-helm-values.tmpl | 15 + .../30/cluster-autoscaler-helm-values.tmpl | 14 + .../30/flux2-helm-chart-values_configmap.tmpl | 17 + .../common/30/flux2-helm-values.tmpl | 29 + .../common/30/flux2_helmrelease.tmpl | 35 + .../common/30/flux2_helmrepository.tmpl | 24 + .../tigera-operator-helm-values.tmpl} | 0 .../gcp/{ => 28}/coredns_configmap.tmpl | 0 ...cp-compute-persistent-disk-csi-driver.tmpl | 4 +- .../templates/gcp/{ => 28}/resourcequota.tmpl | 0 .../coredns_configmap.tmpl} | 6 + ...cp-compute-persistent-disk-csi-driver.tmpl | 708 +++++++++++++ .../templates/gcp/29/resourcequota.tmpl | 18 + .../templates/gcp/30/coredns_configmap.tmpl | 34 + ...-cloud-controller-manager-helm-values.tmpl | 13 + ...p-compute-persistent-disk-csi-driver.tmpl} | 37 +- .../templates/gcp/30/resourcequota.tmpl | 18 + .../providers/docker/stratio/Dockerfile | 38 +- pkg/cluster/internal/validate/common.go | 11 +- pkg/cmd/kind/version/version.go | 2 +- pkg/commons/cluster.go | 28 +- pkg/commons/utils.go | 6 +- .../stratio-aws-unmanaged-policy.json | 1 + .../attachments/stratio-eks-policy.json | 1 + .../azure-aks-images.adoc | 4 +- .../azure-vms-images.adoc | 4 +- .../stratio-aws-unmanaged-policy.json | 1 + .../attachments/stratio-eks-policy.json | 1 + .../azure-aks-images.adoc | 4 +- .../azure-vms-images.adoc | 4 +- 82 files changed, 3316 insertions(+), 732 deletions(-) create mode 100644 pkg/cluster/internal/create/actions/createworker/files/aws/aws-node_rbac.yaml create mode 100644 pkg/cluster/internal/create/actions/createworker/files/azure/flux2_azurepodidentityexception.yaml create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-cloud-controller-manager-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-ebs-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-load-balancer-controller-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-cloud-controller-manager-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-ebs-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-load-balancer-controller-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-cloud-controller-manager-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-ebs-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-load-balancer-controller-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-azure-json.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/28/azurefile-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/28/cloud-provider-azure-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-azure-json.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/29/azurefile-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/29/cloud-provider-azure-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-azure-json.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/30/azurefile-csi-driver-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/azure/30/cloud-provider-azure-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/cert-manager-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/cluster-autoscaler-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-chart-values_configmap.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrelease.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrepository.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/28/tigera-operator-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/cert-manager-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/cluster-autoscaler-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-chart-values_configmap.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrelease.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrepository.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/29/tigera-operator-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/30/cert-manager-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/30/cluster-autoscaler-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-chart-values_configmap.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-values.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrelease.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrepository.tmpl rename pkg/cluster/internal/create/actions/createworker/templates/common/{calico-helm-values.tmpl => 30/tigera-operator-helm-values.tmpl} (100%) rename pkg/cluster/internal/create/actions/createworker/templates/gcp/{ => 28}/coredns_configmap.tmpl (100%) rename pkg/cluster/internal/create/actions/createworker/templates/gcp/{ => 28}/gcp-compute-persistent-disk-csi-driver.tmpl (99%) rename pkg/cluster/internal/create/actions/createworker/templates/gcp/{ => 28}/resourcequota.tmpl (100%) rename pkg/cluster/internal/create/actions/createworker/templates/gcp/{coredns-patch_configmap.tmpl => 29/coredns_configmap.tmpl} (88%) create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/gcp/29/gcp-compute-persistent-disk-csi-driver.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/gcp/29/resourcequota.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/gcp/30/coredns_configmap.tmpl create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-cloud-controller-manager-helm-values.tmpl rename pkg/cluster/internal/create/actions/createworker/{files/gcp/gcp-compute-persistent-disk-csi-driver.yaml => templates/gcp/30/gcp-compute-persistent-disk-csi-driver.tmpl} (92%) create mode 100644 pkg/cluster/internal/create/actions/createworker/templates/gcp/30/resourcequota.tmpl diff --git a/CHANGELOG.md b/CHANGELOG.md index 570f07872c..e1a665469d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ * [Core] Ensure CoreDNS replicas are assigned to different nodes * [Core] Added the default creation of volumes for containerd, etcd and root, if not indicated in the keoscluster +* [Core] Support k8s v1.30 +* [Core] Deprecated Kubernetes versions prior to 1.28 ## 0.17.0-0.5.2 (2024-06-25) @@ -18,6 +20,7 @@ * [Core] Update runc golang module to fix GHSA-xr7r-f8xq-vfvv * [Core] Improve command execution retries +* [Core] Uncouple chart installation from Dockerfile * [Core] Support k8s v1.28 * [Core] Fix panic when keos_version is not defined * [Core] Script the upgrade diff --git a/DEPENDENCIES b/DEPENDENCIES index 0bc43d4dcb..44a946f5d8 100644 --- a/DEPENDENCIES +++ b/DEPENDENCIES @@ -37,8 +37,8 @@ aws: node-driver-registrar: v2.8.0-eks-1-27-3 volume-modifier-for-k8s: v0.1.0 azure: - cluster-api-azure: v1.11.4 - cluster-api-azure-templates: v1.11.4 + cluster-api-azure: v1.12.4 + cluster-api-azure-templates: v1.12.4 unmanaged: cloud-provider-azure-chart: v1.28.5 azure-cloud-controller-manager: v1.24.18 diff --git a/bin/change-version.sh b/bin/change-version.sh index 330c16ac1b..1523c43276 100755 --- a/bin/change-version.sh +++ b/bin/change-version.sh @@ -10,5 +10,11 @@ else VERSION=$1 fi +VERSION_GO_FILE="$BASEDIR/pkg/cmd/kind/version/version.go" +CORE_VERSION=$(echo "$VERSION" | sed -E "s/-.*//") + echo "Modifying cloud-provisioner version to: $1" -echo $VERSION > VERSION +echo $VERSION > $BASEDIR/VERSION + +sed -i "s/\(const versionCore = \"\)[^\"]*\"/\10.17.0-$CORE_VERSION\"/" "$VERSION_GO_FILE" +sed -i "s/\(const versionPreRelease = \"\)[^\"]*\"/\1SNAPSHOT\"/" "$VERSION_GO_FILE" diff --git a/bin/images/azure/imagenes-capz.txt b/bin/images/azure/imagenes-capz.txt index d323c88595..815a203b7c 100644 --- a/bin/images/azure/imagenes-capz.txt +++ b/bin/images/azure/imagenes-capz.txt @@ -1,3 +1,3 @@ mcr.microsoft.com/oss/azure/aad-pod-identity/nmi:v1.8.14 -registry.k8s.io/cluster-api-azure/cluster-api-azure-controller:v1.11.4 +registry.k8s.io/cluster-api-azure/cluster-api-azure-controller:v1.12.4 mcr.microsoft.com/k8s/azureserviceoperator:v2.3.0 diff --git a/go.mod b/go.mod index 418024ce10..b86c3bbafb 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v3 v3.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 github.com/aws/aws-sdk-go-v2 v1.19.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.105.1 golang.org/x/exp v0.0.0-20231006140011-7918f672742d @@ -39,22 +40,15 @@ require ( ) require ( - github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect - github.com/cilium/ebpf v0.7.0 // indirect - github.com/containerd/console v1.0.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.1.9 // indirect github.com/containers/storage v1.51.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v24.0.7+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -66,21 +60,14 @@ require ( github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mrunalp/fileutils v0.5.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.12 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect - github.com/opencontainers/selinux v1.11.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/seccomp/libseccomp-golang v0.10.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/ulikunitz/xz v0.5.11 // indirect - github.com/urfave/cli v1.22.12 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2 // indirect - github.com/vishvananda/netns v0.0.4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect ) @@ -121,5 +108,4 @@ require ( google.golang.org/appengine v1.6.8 // indirect google.golang.org/grpc v1.61.0 // indirect google.golang.org/protobuf v1.31.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 084a21eb07..25dc6730ca 100644 --- a/go.sum +++ b/go.sum @@ -15,17 +15,17 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontai github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v3 v3.0.0/go.mod h1:JZHrk5tfE4/xpxweWhcG3PafI/PV9ULUSltVBBFG9N4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 h1:z4YeiSXxnUI+PqB46Yj6MZA3nwb1CcJIkEMDrzUd8Cs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0/go.mod h1:rko9SzMxcMk0NJsNAxALEGaTYyy79bNRwxgJfrH0Spw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0 h1:iGj7n4SmssnseLryJRs/0lb4Db129ioYOCPSPC+vEsw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0/go.mod h1:qeBrdANBgW4QsU1bF5/9qjrPRwFIt+AnOMxyH5Bwkhk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.2.0 h1:Pmy0+3ox1IC3sp6musv87BFPIdQbqyPFjn7I8I0o2Js= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.2.0/go.mod h1:ThfyMjs6auYrWPnYJjI3H4H++oVPrz01pizpu8lfl3A= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= @@ -64,15 +64,11 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 h1:rIFn5J3yDoeuKCE9sESXqM5POTAh github.com/aws/aws-sdk-go-v2/service/sts v1.18.6/go.mod h1:48WJ9l3dwP0GSHWGc5sFGGlCkuA82Mc2xnw+T6Q8aDw= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g= github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw= github.com/containers/image/v5 v5.29.2 h1:b8U0XYWhaQbKucK73IbmSm8WQyKAhKDbAHQc45XlsOw= @@ -83,21 +79,15 @@ github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOj github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA= github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= @@ -106,6 +96,7 @@ github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E1 github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -116,7 +107,6 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -125,9 +115,6 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -154,7 +141,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -178,8 +164,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -207,29 +191,26 @@ github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q= -github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= -github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -240,22 +221,19 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= -github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sosedoff/ansible-vault-go v0.1.1 h1:3yU5yxPnK70Exemc0FKFPYCul7YB1EM9Of1Dd7xBzFI= github.com/sosedoff/ansible-vault-go v0.1.1/go.mod h1:u74h49t5XRrBQpubnCr8PXRABdzTEuM+nHUn1mGYgtI= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -275,15 +253,8 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtse github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= @@ -295,8 +266,6 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -326,16 +295,12 @@ golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -393,7 +358,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -403,14 +367,11 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/cluster/internal/create/actions/createworker/aws.go b/pkg/cluster/internal/create/actions/createworker/aws.go index d85207fae7..320009572f 100644 --- a/pkg/cluster/internal/create/actions/createworker/aws.go +++ b/pkg/cluster/internal/create/actions/createworker/aws.go @@ -51,14 +51,22 @@ type AWSBuilder struct { csiNamespace string } +type lbControllerHelmParams struct { + ClusterName string + Private bool + KeosRegUrl string + AccountID string + RoleName string +} + func newAWSBuilder() *AWSBuilder { return &AWSBuilder{} } func (b *AWSBuilder) setCapx(managed bool) { b.capxProvider = "aws" - b.capxVersion = "v2.2.1" - b.capxImageVersion = "v2.2.1" + b.capxVersion = "v2.5.2" + b.capxImageVersion = "v2.5.2" b.capxName = "capa" b.capxManaged = managed b.csiNamespace = "kube-system" @@ -100,6 +108,80 @@ func (b *AWSBuilder) setSC(p ProviderParams) { } } +var awsCharts = ChartsDictionary{ + Charts: map[string]map[string]map[string]commons.ChartEntry{ + "28": { + "managed": { + "aws-load-balancer-controller": {Repository: "https://aws.github.io/eks-charts", Version: "1.8.0", Namespace: "kube-system", Pull: false}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.34.1", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: false}, + }, + "unmanaged": { + "aws-cloud-controller-manager": {Repository: "https://kubernetes.github.io/cloud-provider-aws", Version: "0.0.8", Namespace: "kube-system", Pull: true}, + "aws-ebs-csi-driver": {Repository: "https://kubernetes-sigs.github.io/aws-ebs-csi-driver", Version: "2.31.0", Namespace: "kube-system", Pull: false}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.34.1", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + "29": { + "managed": { + "aws-load-balancer-controller": {Repository: "https://aws.github.io/eks-charts", Version: "1.8.0", Namespace: "kube-system", Pull: false}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.35.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: false}, + }, + "unmanaged": { + "aws-cloud-controller-manager": {Repository: "https://kubernetes.github.io/cloud-provider-aws", Version: "0.0.8", Namespace: "kube-system", Pull: true}, + "aws-ebs-csi-driver": {Repository: "https://kubernetes-sigs.github.io/aws-ebs-csi-driver", Version: "2.31.0", Namespace: "kube-system", Pull: false}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.35.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + "30": { + "managed": { + "aws-load-balancer-controller": {Repository: "https://aws.github.io/eks-charts", Version: "1.8.1", Namespace: "kube-system", Pull: false}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.37.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.28.0", Namespace: "tigera-operator", Pull: false}, + }, + "unmanaged": { + "aws-cloud-controller-manager": {Repository: "https://kubernetes.github.io/cloud-provider-aws", Version: "0.0.8", Namespace: "kube-system", Pull: true}, + "aws-ebs-csi-driver": {Repository: "https://kubernetes-sigs.github.io/aws-ebs-csi-driver", Version: "2.31.0", Namespace: "kube-system", Pull: false}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.37.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.28.0", Namespace: "tigera-operator", Pull: true}, + }, + }, + }, +} + +func (b *AWSBuilder) pullProviderCharts(n nodes.Node, clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials, clusterType string) error { + if clusterConfigSpec.EKSLBController && clusterType == "managed" { + for name, chart := range awsCharts.Charts[majorVersion][clusterType] { + if name == "aws-load-balancer-controller" { + chart.Pull = true + awsCharts.Charts[majorVersion][clusterType][name] = chart + } + } + } + return pullGenericCharts(n, clusterConfigSpec, keosSpec, clusterCredentials, awsCharts, clusterType) + +} + +func (b *AWSBuilder) getProviderCharts(clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterType string) map[string]commons.ChartEntry { + return getGenericCharts(clusterConfigSpec, keosSpec, awsCharts, clusterType) +} + +func (b *AWSBuilder) getOverriddenCharts(charts *[]commons.Chart, clusterConfigSpec *commons.ClusterConfigSpec, clusterType string) []commons.Chart { + providerCharts := ConvertToChart(awsCharts.Charts[majorVersion][clusterType]) + for _, ovChart := range clusterConfigSpec.Charts { + for _, chart := range *providerCharts { + if chart.Name == ovChart.Name { + chart.Version = ovChart.Version + } + } + } + *charts = append(*charts, *providerCharts...) + return *charts +} + func (b *AWSBuilder) getProvider() Provider { return Provider{ capxProvider: b.capxProvider, @@ -122,69 +204,106 @@ func (b *AWSBuilder) installCloudProvider(n nodes.Node, k string, privateParams } else { podsCidrBlock = "192.168.0.0/16" } - c := "helm install aws-cloud-controller-manager /stratio/helm/aws-cloud-controller-manager" + - " --kubeconfig " + k + - " --namespace kube-system" + - " --set args[0]=\"--v=2\"" + - " --set args[1]=\"--cloud-provider=aws\"" + - " --set args[2]=\"--cluster-cidr=" + podsCidrBlock + "\"" + - " --set args[3]=\"--cluster-name=" + keosCluster.Metadata.Name + "\"" - if privateParams.Private { - c += " --set image.repository=" + privateParams.KeosRegUrl + "/provider-aws/cloud-controller-manager" + cloudControllerManagerValuesFile := "/kind/aws-cloud-controller-manager-helm-values.yaml" + cloudControllerManagerHelmParams := cloudControllerHelmParams{ + ClusterName: privateParams.KeosCluster.Metadata.Name, + Private: privateParams.Private, + KeosRegUrl: privateParams.KeosRegUrl, + PodsCidr: podsCidrBlock, } - _, err := commons.ExecuteCommand(n, c, 3, 5) + // Generate the CCM helm values + cloudControllerManagerHelmValues, err := getManifest(b.capxProvider, "aws-cloud-controller-manager-helm-values.tmpl", majorVersion, cloudControllerManagerHelmParams) if err != nil { - return errors.Wrap(err, "failed to deploy aws-cloud-controller-manager Helm Chart") + return errors.Wrap(err, "failed to create cloud controller manager Helm chart values file") + } + c := "echo '" + cloudControllerManagerHelmValues + "' > " + cloudControllerManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create cloud controller manager Helm chart values file") } - return nil -} -func (b *AWSBuilder) installCSI(n nodes.Node, k string, privateParams PrivateParams) error { - c := "helm install aws-ebs-csi-driver /stratio/helm/aws-ebs-csi-driver" + + c = "helm install aws-cloud-controller-manager /stratio/helm/aws-cloud-controller-manager" + " --kubeconfig " + k + - " --namespace " + b.csiNamespace + - " --set controller.podAnnotations.\"cluster-autoscaler\\.kubernetes\\.io/safe-to-evict-local-volumes=socket-dir\"" + " --namespace kube-system" + + " --values " + cloudControllerManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to deploy aws-cloud-controller-manager Helm Chart") + } - if privateParams.Private { - c += " --set image.repository=" + privateParams.KeosRegUrl + "/ebs-csi-driver/aws-ebs-csi-driver" + - " --set sidecars.provisioner.image.repository=" + privateParams.KeosRegUrl + "/eks-distro/kubernetes-csi/external-provisioner" + - " --set sidecars.attacher.image.repository=" + privateParams.KeosRegUrl + "/eks-distro/kubernetes-csi/external-attacher" + - " --set sidecars.snapshotter.image.repository=" + privateParams.KeosRegUrl + "/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter" + - " --set sidecars.livenessProbe.image.repository=" + privateParams.KeosRegUrl + "/eks-distro/kubernetes-csi/livenessprobe" + - " --set sidecars.resizer.image.repository=" + privateParams.KeosRegUrl + "/eks-distro/kubernetes-csi/external-resizer" + - " --set sidecars.nodeDriverRegistrar.image.repository=" + privateParams.KeosRegUrl + "/eks-distro/kubernetes-csi/node-driver-registrar" + - " --set sidecars.volumemodifier.image.repository=" + privateParams.KeosRegUrl + "/ebs-csi-driver/volume-modifier-for-k8s" + return nil +} +func (b *AWSBuilder) installCSI(n nodes.Node, k string, privateParams PrivateParams, providerParams ProviderParams, chartsList map[string]commons.ChartEntry) error { + csiName := "aws-ebs-csi-driver" + csiValuesFile := "/kind/" + csiName + "-helm-values.yaml" + csiEntry := chartsList[csiName] + csiHelmReleaseParams := fluxHelmReleaseParams{ + ChartRepoRef: "keos", + ChartName: csiName, + ChartNamespace: csiEntry.Namespace, + ChartVersion: csiEntry.Version, } - _, err := commons.ExecuteCommand(n, c, 3, 5) + if !privateParams.HelmPrivate { + csiHelmReleaseParams.ChartRepoRef = csiName + } + // Generate the csiName-csi helm values + csiHelmValues, getManifestErr := getManifest(privateParams.KeosCluster.Spec.InfraProvider, csiName+"-helm-values.tmpl", majorVersion, privateParams) + if getManifestErr != nil { + return errors.Wrap(getManifestErr, "failed to generate "+csiName+"-csi helm values") + } + + c := "echo '" + csiHelmValues + "' > " + csiValuesFile + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to deploy AWS EBS CSI driver Helm Chart") + return errors.Wrap(err, "failed to create "+csiName+" Helm chart values file") + } + if err := configureHelmRelease(n, kubeconfigPath, "flux2_helmrelease.tmpl", csiHelmReleaseParams, privateParams.KeosCluster.Spec.HelmRepository); err != nil { + return err } return nil } -func installLBController(n nodes.Node, k string, privateParams PrivateParams, p ProviderParams) error { +func installLBController(n nodes.Node, k string, privateParams PrivateParams, p ProviderParams, chartsList map[string]commons.ChartEntry) error { + lbControllerName := "aws-load-balancer-controller" + lbControllerValuesFile := "/kind/" + lbControllerName + "-helm-values.yaml" + lbControllerEntry := chartsList[lbControllerName] clusterName := p.ClusterName - roleName := p.ClusterName + "-lb-controller-manager" + roleName := clusterName + "-lb-controller-manager" accountID := p.Credentials["AccountID"] - c := "helm install aws-load-balancer-controller /stratio/helm/aws-load-balancer-controller" + - " --kubeconfig " + k + - " --namespace kube-system" + - " --set clusterName=" + clusterName + - " --set podDisruptionBudget.minAvailable=1" + - " --set serviceAccount.annotations.\"eks\\.amazonaws\\.com/role-arn\"=arn:aws:iam::" + accountID + ":role/" + roleName - if privateParams.Private { - c += " --set image.repository=" + privateParams.KeosRegUrl + "/eks/aws-load-balancer-controller" + lbControllerManagerHelmParams := lbControllerHelmParams{ + ClusterName: privateParams.KeosCluster.Metadata.Name, + Private: privateParams.Private, + KeosRegUrl: privateParams.KeosRegUrl, + AccountID: accountID, + RoleName: roleName, } - _, err := commons.ExecuteCommand(n, c, 3, 5) + lbControllerHelmReleaseParams := fluxHelmReleaseParams{ + ChartRepoRef: "keos", + ChartName: lbControllerName, + ChartNamespace: lbControllerEntry.Namespace, + ChartVersion: lbControllerEntry.Version, + } + if !privateParams.HelmPrivate { + lbControllerHelmReleaseParams.ChartRepoRef = lbControllerName + } + // Generate the aws lb controller helm values + lbControllerHelmValues, getManifestErr := getManifest(privateParams.KeosCluster.Spec.InfraProvider, lbControllerName+"-helm-values.tmpl", majorVersion, lbControllerManagerHelmParams) + if getManifestErr != nil { + return errors.Wrap(getManifestErr, "failed to generate "+lbControllerName+"-csi helm values") + } + c := "echo '" + lbControllerHelmValues + "' > " + lbControllerValuesFile + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to deploy aws-load-balancer-controller Helm Chart") + return errors.Wrap(err, "failed to create "+lbControllerName+" Helm chart values file") + } + if err := configureHelmRelease(n, kubeconfigPath, "flux2_helmrelease.tmpl", lbControllerHelmReleaseParams, privateParams.KeosCluster.Spec.HelmRepository); err != nil { + return err } - return nil } @@ -212,14 +331,16 @@ spec: // Create the eks.config file in the container eksConfigPath := "/kind/eks.config" c = "echo \"" + eksConfigData + "\" > " + eksConfigPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create eks.config") } // Run clusterawsadm with the eks.config file previously created (this will create or update the CloudFormation stack in AWS) c = "clusterawsadm bootstrap iam create-cloudformation-stack --config " + eksConfigPath - _, err = commons.ExecuteCommand(n, c, 3, 5, envVars) + + _, err = commons.ExecuteCommand(n, c, 5, 3, envVars) if err != nil { return errors.Wrap(err, "failed to run clusterawsadm") } @@ -282,13 +403,15 @@ func (b *AWSBuilder) configureStorageClass(n nodes.Node, k string) error { if b.capxManaged { // Remove annotation from default storage class c = "kubectl --kubeconfig " + k + ` get sc -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}'` - output, err := commons.ExecuteCommand(n, c, 3, 5) + + output, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to get default storage class") } if strings.TrimSpace(output) != "" && strings.TrimSpace(output) != "No resources found" { c = "kubectl --kubeconfig " + k + " annotate sc " + strings.TrimSpace(output) + " " + defaultScAnnotation + "-" - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to remove annotation from default storage class") } @@ -349,7 +472,8 @@ func (b *AWSBuilder) postInstallPhase(n nodes.Node, k string) error { var coreDNSPDBName = "coredns" c := "kubectl --kubeconfig " + kubeconfigPath + " get pdb " + coreDNSPDBName + " -n kube-system" - _, err := commons.ExecuteCommand(n, c, 3, 5) + + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { err = installCorednsPdb(n) if err != nil { diff --git a/pkg/cluster/internal/create/actions/createworker/azure.go b/pkg/cluster/internal/create/actions/createworker/azure.go index 0e23333395..0aaf1f3787 100644 --- a/pkg/cluster/internal/create/actions/createworker/azure.go +++ b/pkg/cluster/internal/create/actions/createworker/azure.go @@ -27,6 +27,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "gopkg.in/yaml.v3" "sigs.k8s.io/kind/pkg/cluster/nodes" @@ -53,14 +54,49 @@ type AzureBuilder struct { csiNamespace string } +var azureCharts = ChartsDictionary{ + Charts: map[string]map[string]map[string]commons.ChartEntry{ + "28": { + "managed": {}, + "unmanaged": { + "azuredisk-csi-driver": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts", Namespace: "kube-system", Version: "v1.28.7", Pull: false}, + "azurefile-csi-driver": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts", Namespace: "kube-system", Version: "v1.28.7", Pull: false}, + "cloud-provider-azure": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo", Namespace: "kube-system", Version: "v1.28.5", Pull: true}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.34.1", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + "29": { + "managed": {}, + "unmanaged": { + "azuredisk-csi-driver": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts", Namespace: "kube-system", Version: "v1.29.5", Pull: false}, + "azurefile-csi-driver": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts", Namespace: "kube-system", Version: "v1.29.5", Pull: false}, + "cloud-provider-azure": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo", Namespace: "kube-system", Version: "v1.29.6", Pull: true}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.35.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + "30": { + "managed": {}, + "unmanaged": { + "azuredisk-csi-driver": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts", Namespace: "kube-system", Version: "v1.30.1", Pull: false}, + "azurefile-csi-driver": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts", Namespace: "kube-system", Version: "v1.30.2", Pull: false}, + "cloud-provider-azure": {Repository: "https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo", Namespace: "kube-system", Version: "v1.30.4", Pull: true}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.37.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.28.0", Namespace: "tigera-operator", Pull: true}, + }, + }, + }, +} + func newAzureBuilder() *AzureBuilder { return &AzureBuilder{} } func (b *AzureBuilder) setCapx(managed bool) { b.capxProvider = "azure" - b.capxVersion = "v1.11.4" - b.capxImageVersion = "v1.11.4" + b.capxVersion = "v1.12.4" + b.capxImageVersion = "v1.12.4" b.capxName = "capz" b.capxManaged = managed b.csiNamespace = "kube-system" @@ -91,6 +127,27 @@ func (b *AzureBuilder) setSC(p ProviderParams) { } } +func (b *AzureBuilder) pullProviderCharts(n nodes.Node, clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials, clusterType string) error { + return pullGenericCharts(n, clusterConfigSpec, keosSpec, clusterCredentials, azureCharts, clusterType) +} + +func (b *AzureBuilder) getProviderCharts(clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterType string) map[string]commons.ChartEntry { + return getGenericCharts(clusterConfigSpec, keosSpec, azureCharts, clusterType) +} + +func (b *AzureBuilder) getOverriddenCharts(charts *[]commons.Chart, clusterConfigSpec *commons.ClusterConfigSpec, clusterType string) []commons.Chart { + providerCharts := ConvertToChart(azureCharts.Charts[majorVersion][clusterType]) + for _, ovChart := range clusterConfigSpec.Charts { + for _, chart := range *providerCharts { + if chart.Name == ovChart.Name { + chart.Version = ovChart.Version + } + } + } + *charts = append(*charts, *providerCharts...) + return *charts +} + func (b *AzureBuilder) setCapxEnvVars(p ProviderParams) { b.capxEnvVars = []string{ "AZURE_CLIENT_SECRET_B64=" + base64.StdEncoding.EncodeToString([]byte(p.Credentials["ClientSecret"])), @@ -128,56 +185,144 @@ func (b *AzureBuilder) installCloudProvider(n nodes.Node, k string, privateParam } else { podsCidrBlock = "192.168.0.0/16" } - c := "helm install cloud-provider-azure /stratio/helm/cloud-provider-azure" + - " --kubeconfig " + k + - " --namespace kube-system" + - " --set infra.clusterName=" + keosCluster.Metadata.Name + - " --set cloudControllerManager.configureCloudRoutes=false" + - " --set 'cloudControllerManager.clusterCIDR=" + podsCidrBlock + "'" - if privateParams.Private { - c += " --set cloudControllerManager.imageRepository=" + privateParams.KeosRegUrl + "/oss/kubernetes" + - " --set cloudNodeManager.imageRepository=" + privateParams.KeosRegUrl + "/oss/kubernetes" + cloudControllerManagerValuesFile := "/kind/cloud-provider-" + keosCluster.Spec.InfraProvider + "-helm-values.yaml" + cloudControllerManagerHelmParams := cloudControllerHelmParams{ + ClusterName: privateParams.KeosCluster.Metadata.Name, + Private: privateParams.Private, + KeosRegUrl: privateParams.KeosRegUrl, + PodsCidr: podsCidrBlock, } - _, err := commons.ExecuteCommand(n, c, 3, 5) + + // Generate the CCM helm values + cloudControllerManagerHelmValues, err := getManifest(b.capxProvider, "cloud-provider-"+keosCluster.Spec.InfraProvider+"-helm-values.tmpl", majorVersion, cloudControllerManagerHelmParams) + if err != nil { + return errors.Wrap(err, "failed to create cloud controller manager Helm chart values file") + } + c := "echo '" + cloudControllerManagerHelmValues + "' > " + cloudControllerManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create cloud controller manager Helm chart values file") + } + + c = "helm install cloud-provider-azure /stratio/helm/cloud-provider-azure" + + " --kubeconfig " + k + + " --namespace kube-system" + + " --set cloudControllerManager.replicas=1" + + " --values " + cloudControllerManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to deploy cloud-provider-azure Helm Chart") } + return nil } -func (b *AzureBuilder) installCSI(n nodes.Node, k string, privateParams PrivateParams) error { +func (b *AzureBuilder) installCSI(n nodes.Node, kubeconfigPath string, privateParams PrivateParams, providerParams ProviderParams, chartsList map[string]commons.ChartEntry) error { var c string var err error - // Deploy disk CSI driver - c = "helm install azuredisk-csi-driver /stratio/helm/azuredisk-csi-driver " + - " --kubeconfig " + k + - " --namespace " + b.csiNamespace + - " --set controller.podAnnotations.\"cluster-autoscaler\\.kubernetes\\.io/safe-to-evict-local-volumes=socket-dir\\,azure-cred\"" - - if privateParams.Private { - c += " --set image.baseRepo=" + privateParams.KeosRegUrl - } + // Workaround for azuredisk driver issue with MachineDeployments (standalone VMs) + // See: https://kubernetes.slack.com/archives/C5HJXTT9Q/p1726137253181949 + if !privateParams.KeosCluster.Spec.ControlPlane.Managed { + var ctx = context.Background() + azureDiskSecretFile := "/kind/azuredisk-azure.json" + azureDiskNamespace := chartsList["azuredisk-csi-driver"].Namespace + nodesIdentity := privateParams.KeosCluster.Spec.Security.NodesIdentity + + matchResourceGroup := strings.Split(nodesIdentity, "resourceGroups/") + var resourceGroupName string + if len(matchResourceGroup) > 1 { + resourceGroupName = strings.Split(matchResourceGroup[1], "/")[0] + } else { + resourceGroupName = "" + } + matchIdentity := strings.Split(nodesIdentity, "userAssignedIdentities/") + var managedIdentityName string + if len(matchIdentity) > 1 { + managedIdentityName = strings.Split(matchIdentity[1], "/")[0] + } else { + managedIdentityName = "" + } + if resourceGroupName == "" || managedIdentityName == "" { + return errors.New("failed to extract resource group name or managed identity name from managed identity") + } - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to deploy Azure Disk CSI driver Helm Chart") - } + cfg, err := commons.AzureGetConfig(providerParams.Credentials) + if err != nil { + return err + } + msiClient, err := armmsi.NewClientFactory(providerParams.Credentials["SubscriptionID"], cfg, nil) + if err != nil { + return errors.Wrap(err, "failed to create msi client") + } - // Deploy file CSI driver - c = "helm install azurefile-csi-driver /stratio/helm/azurefile-csi-driver " + - " --kubeconfig " + k + - " --namespace " + b.csiNamespace + - " --set controller.podAnnotations.\"cluster-autoscaler\\.kubernetes\\.io/safe-to-evict-local-volumes=socket-dir\\,azure-cred\"" + managedIdentity, err := msiClient.NewUserAssignedIdentitiesClient().Get(ctx, resourceGroupName, managedIdentityName, nil) + if err != nil { + return errors.Wrap(err, "failed to retrieve managed identity info") + } + // Extract the principalId + objectIDIdentity := *managedIdentity.Properties.ClientID + + azureDiskParams := struct { + TenantID string + SubscriptionID string + KeosClusterName string + Region string + Networks commons.Networks + UserAssignedIdentityID string + }{ + TenantID: providerParams.Credentials["TenantID"], + SubscriptionID: providerParams.Credentials["SubscriptionID"], + KeosClusterName: providerParams.ClusterName, + Region: providerParams.Region, + Networks: privateParams.KeosCluster.Spec.Networks, + UserAssignedIdentityID: objectIDIdentity, + } - if privateParams.Private { - c += " --set image.baseRepo=" + privateParams.KeosRegUrl + // Generate azuredisk driver secret + azureDiskSecret, getManifestErr := getManifest(privateParams.KeosCluster.Spec.InfraProvider, "azuredisk-azure-json.tmpl", majorVersion, azureDiskParams) + if getManifestErr != nil { + return errors.Wrap(getManifestErr, "failed to generate azuredisk driver config") + } + c = "echo '" + azureDiskSecret + "' > " + azureDiskSecretFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create azuredisk driver config") + } + c = "kubectl --kubeconfig " + kubeconfigPath + " create secret generic azure-cloud-provider -n " + + azureDiskNamespace + " --from-file=cloud-config=/kind/azuredisk-azure.json" + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create azuredisk secret") + } } - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to deploy Azure File CSI driver Helm Chart") + for _, csiName := range []string{"azuredisk-csi-driver", "azurefile-csi-driver"} { + csiValuesFile := "/kind/" + csiName + "-helm-values.yaml" + csiEntry := chartsList[csiName] + csiHelmReleaseParams := fluxHelmReleaseParams{ + ChartRepoRef: "keos", + ChartName: csiName, + ChartNamespace: csiEntry.Namespace, + ChartVersion: csiEntry.Version, + } + if !privateParams.HelmPrivate { + csiHelmReleaseParams.ChartRepoRef = csiName + } + // Generate the csiName-csi helm values + csiHelmValues, getManifestErr := getManifest(privateParams.KeosCluster.Spec.InfraProvider, csiName+"-helm-values.tmpl", majorVersion, privateParams) + if getManifestErr != nil { + return errors.Wrap(getManifestErr, "failed to generate "+csiName+"-csi helm values") + } + c = "echo '" + csiHelmValues + "' > " + csiValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create "+csiName+" Helm chart values file") + } + if err := configureHelmRelease(n, kubeconfigPath, "flux2_helmrelease.tmpl", csiHelmReleaseParams, privateParams.KeosCluster.Spec.HelmRepository); err != nil { + return err + } } return nil @@ -229,14 +374,15 @@ func (b *AzureBuilder) configureStorageClass(n nodes.Node, k string) error { if b.capxManaged { // Remove annotation from default storage class c = "kubectl --kubeconfig " + k + ` get sc -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}'` - output, err := commons.ExecuteCommand(n, c, 3, 5) + output, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to get default storage class") } if strings.TrimSpace(output) != "" && strings.TrimSpace(output) != "No resources found" { c = "kubectl --kubeconfig " + k + " annotate sc " + strings.TrimSpace(output) + " " + defaultScAnnotation + "-" - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to remove annotation from default storage class") } @@ -326,6 +472,7 @@ func (b *AzureBuilder) getOverrideVars(p ProviderParams, networks commons.Networ func (b *AzureBuilder) postInstallPhase(n nodes.Node, k string) error { var coreDNSPDBName = "coredns" + if b.capxManaged { coreDNSPDBName = "coredns-pdb" @@ -350,7 +497,7 @@ func (b *AzureBuilder) postInstallPhase(n nodes.Node, k string) error { } c := "kubectl --kubeconfig " + kubeconfigPath + " get pdb " + coreDNSPDBName + " -n kube-system" - _, err := commons.ExecuteCommand(n, c, 3, 5) + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { err = installCorednsPdb(n) if err != nil { diff --git a/pkg/cluster/internal/create/actions/createworker/createworker.go b/pkg/cluster/internal/create/actions/createworker/createworker.go index 42ab83fc21..5defbf13f9 100644 --- a/pkg/cluster/internal/create/actions/createworker/createworker.go +++ b/pkg/cluster/internal/create/actions/createworker/createworker.go @@ -25,6 +25,7 @@ import ( "encoding/json" "io" "os" + "regexp" "strings" "sigs.k8s.io/kind/pkg/cluster/internal/create/actions" @@ -57,6 +58,12 @@ type HelmRegistry struct { Type string } +type CMHelmRelease struct { + CMName string + CMNamespace string + CMValue string +} + const ( kubeconfigPath = "/kind/worker-cluster.kubeconfig" workKubeconfigPath = ".kube/config" @@ -67,7 +74,7 @@ const ( cniDefaultFile = "/kind/manifests/default-cni.yaml" storageDefaultPath = "/kind/manifests/default-storage.yaml" infraGCPVersion = "v1.6.1" - infraAWSVersion = "v2.2.1" + infraAWSVersion = "v2.5.2" ) var PathsToBackupLocally = []string{ @@ -75,15 +82,21 @@ var PathsToBackupLocally = []string{ "/kind/manifests", } +var majorVersion = "" + //go:embed files/common/allow-all-egress_netpol.yaml var allowCommonEgressNetPol string //go:embed files/gcp/rbac-loadbalancing.yaml var rbacInternalLoadBalancing string +//go:embed files/aws/aws-node_rbac.yaml +var rbacAWSNode string + //go:embed files/gcp/coredns_*.yaml var gcpCoreDNSDeploy embed.FS + // NewAction returns a new action for installing default CAPI func NewAction(vaultPassword string, descriptorPath string, moveManagement bool, avoidCreation bool, keosCluster commons.KeosCluster, clusterCredentials commons.ClusterCredentials, clusterConfig *commons.ClusterConfig) actions.Action { return &action{ @@ -103,6 +116,7 @@ func (a *action) Execute(ctx *actions.ActionContext) error { var err error var keosRegistry KeosRegistry var helmRegistry HelmRegistry + majorVersion = strings.Split(a.keosCluster.Spec.K8SVersion, ".")[1] // Get the target node n, err := ctx.GetNode() @@ -123,6 +137,20 @@ func (a *action) Execute(ctx *actions.ActionContext) error { infra := newInfra(providerBuilder) provider := infra.buildProvider(providerParams) + ctx.Status.Start("Pulling initial Helm Charts 🧭") + + err = loginHelmRepo(n, a.keosCluster, a.clusterCredentials, &helmRegistry, infra, providerParams) + if err != nil { + return err + } + + err = infra.pullProviderCharts(n, &a.clusterConfig.Spec, a.keosCluster.Spec, a.clusterCredentials) + if err != nil { + return err + } + + ctx.Status.End(true) + for _, registry := range a.keosCluster.Spec.DockerRegistries { if registry.KeosRegistry { keosRegistry.url = registry.URL @@ -142,6 +170,7 @@ func (a *action) Execute(ctx *actions.ActionContext) error { } awsEKSEnabled := a.keosCluster.Spec.InfraProvider == "aws" && a.keosCluster.Spec.ControlPlane.Managed + azAKSEnabled := a.keosCluster.Spec.InfraProvider == "azure" && a.keosCluster.Spec.ControlPlane.Managed isMachinePool := a.keosCluster.Spec.InfraProvider != "aws" && a.keosCluster.Spec.ControlPlane.Managed gcpGKEEnabled := a.keosCluster.Spec.InfraProvider == "gcp" && a.keosCluster.Spec.ControlPlane.Managed @@ -151,6 +180,7 @@ func (a *action) Execute(ctx *actions.ActionContext) error { KeosCluster: a.keosCluster, KeosRegUrl: keosRegistry.url, Private: a.clusterConfig.Spec.Private, + HelmPrivate: a.clusterConfig.Spec.PrivateHelmRepo, } } else { privateParams = PrivateParams{ @@ -165,22 +195,25 @@ func (a *action) Execute(ctx *actions.ActionContext) error { defer ctx.Status.End(false) c = `sed -i 's/@sha256:[[:alnum:]_-].*$//g' ` + cniDefaultFile - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } c = `sed -i 's|docker.io|` + keosRegistry.url + `|g' /kind/manifests/default-cni.yaml` - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } c = `sed -i 's/{{ .PodSubnet }}/10.244.0.0\/16/g' /kind/manifests/default-cni.yaml` - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } c = `cat /kind/manifests/default-cni.yaml | kubectl apply -f -` - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } @@ -189,7 +222,8 @@ func (a *action) Execute(ctx *actions.ActionContext) error { ctx.Status.Start("Deleting local storage plugin 🎖️") defer ctx.Status.End(false) c = `kubectl delete -f ` + storageDefaultPath + ` --force` - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } @@ -197,22 +231,11 @@ func (a *action) Execute(ctx *actions.ActionContext) error { } + chartsList := infra.getProviderCharts(&a.clusterConfig.Spec, a.keosCluster.Spec) + ctx.Status.Start("Installing CAPx 🎖️") defer ctx.Status.End(false) - helmRegistry.Type = a.keosCluster.Spec.HelmRepository.Type - helmRegistry.URL = a.keosCluster.Spec.HelmRepository.URL - if a.keosCluster.Spec.HelmRepository.Type != "generic" { - urlLogin := strings.Split(strings.Split(helmRegistry.URL, "//")[1], "/")[0] - helmRegistry.User, helmRegistry.Pass, err = infra.getRegistryCredentials(providerParams, urlLogin) - if err != nil { - return errors.Wrap(err, "failed to get helm registry credentials") - } - } else { - helmRegistry.User = a.clusterCredentials.HelmRepositoryCredentials["User"] - helmRegistry.Pass = a.clusterCredentials.HelmRepositoryCredentials["Pass"] - } - for _, registry := range a.keosCluster.Spec.DockerRegistries { if registry.KeosRegistry { keosRegistry.url = registry.URL @@ -236,7 +259,8 @@ func (a *action) Execute(ctx *actions.ActionContext) error { " --docker-server=" + strings.Split(keosRegistry.url, "/")[0] + " --docker-username=" + keosRegistry.user + " --docker-password=" + keosRegistry.pass - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create docker-registry secret") } @@ -247,7 +271,8 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Create provider-system namespace c = "kubectl create namespace " + provider.capxName + "-system" - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create "+provider.capxName+"-system namespace") } @@ -258,26 +283,40 @@ func (a *action) Execute(ctx *actions.ActionContext) error { " --docker-username=" + keosRegistry.user + " --docker-password=" + keosRegistry.pass + " --namespace=" + provider.capxName + "-system" - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create docker-registry secret") } // Add imagePullSecrets to infrastructure-components.yaml c = "sed -i '/containers:/i\\ imagePullSecrets:\\n - name: regcred' " + infraComponents - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to add imagePullSecrets to infrastructure-components.yaml") } } - if privateParams.Private { - err = provider.deployCertManager(n, keosRegistry.url, "") - if err != nil { - return err - } + certManagerVersion := getChartVersion(a.clusterConfig.Spec.Charts, "cert-manager") + if certManagerVersion == "" { + return errors.New("Cert manager helm chart version cannot be found ") + } + err = provider.deployCertManager(n, keosRegistry.url, "", privateParams, make(map[string]commons.ChartEntry)) + if err != nil { + return err + } + + c = "echo \"cert-manager:\" >> /root/.cluster-api/clusterctl.yaml && " + + "echo \" version: " + certManagerVersion + "\" >> /root/.cluster-api/clusterctl.yaml " + + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to set cert-manager version in clusterctl config") + } + + if privateParams.Private { c = "echo \"images:\" >> /root/.cluster-api/clusterctl.yaml && " + "echo \" cluster-api:\" >> /root/.cluster-api/clusterctl.yaml && " + "echo \" repository: " + keosRegistry.url + "/cluster-api\" >> /root/.cluster-api/clusterctl.yaml && " + @@ -295,15 +334,13 @@ func (a *action) Execute(ctx *actions.ActionContext) error { "echo \" repository: " + keosRegistry.url + "/cluster-api-azure\" >> /root/.cluster-api/clusterctl.yaml && " + "echo \" cert-manager:\" >> /root/.cluster-api/clusterctl.yaml && " + "echo \" repository: " + keosRegistry.url + "/cert-manager\" >> /root/.cluster-api/clusterctl.yaml " - - _, err = commons.ExecuteCommand(n, c, 3, 5) - + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to add private image registry clusterctl config") } c = `sed -i 's/@sha256:[[:alnum:]_-].*$//g' /root/.cluster-api/local-repository/infrastructure-gcp/` + infraGCPVersion + `/infrastructure-components.yaml` - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } @@ -313,8 +350,7 @@ func (a *action) Execute(ctx *actions.ActionContext) error { "echo \" repository: " + keosRegistry.url + "/cluster-api-gcp\" >> /root/.cluster-api/clusterctl.yaml && " + "echo \" tag: " + provider.capxImageVersion + "\" >> /root/.cluster-api/clusterctl.yaml " - _, err = commons.ExecuteCommand(n, c, 3, 5) - + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to overwrite image registry clusterctl config") } @@ -332,15 +368,22 @@ func (a *action) Execute(ctx *actions.ActionContext) error { ctx.Status.Start("Generating secrets file 📝🗝️") defer ctx.Status.End(false) - commons.EnsureSecretsFile(a.keosCluster.Spec, a.vaultPassword, a.clusterCredentials) + err = commons.EnsureSecretsFile(a.keosCluster.Spec, a.vaultPassword, a.clusterCredentials) + if err != nil { + return errors.Wrap(err, "failed to ensure the secrets file") + } - commons.RewriteDescriptorFile(a.descriptorPath) + err = commons.RewriteDescriptorFile(a.descriptorPath) + if err != nil { + return errors.Wrap(err, "failed to rewrite the descriptor file") + } defer ctx.Status.End(true) // End Generating secrets file // Create namespace for CAPI clusters (it must exists) c = "kubectl create ns " + capiClustersNamespace - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create cluster's Namespace") } @@ -348,7 +391,8 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Create the allow-all-egress network policy file in the container allowCommonEgressNetPolPath := "/kind/allow-all-egress_netpol.yaml" c = "echo \"" + allowCommonEgressNetPol + "\" > " + allowCommonEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to write the allow-all-egress network policy") } @@ -381,7 +425,7 @@ func (a *action) Execute(ctx *actions.ActionContext) error { if a.clusterConfig != nil { // Apply cluster manifests c = "kubectl apply -f " + manifestsPath + "/clusterconfig.yaml" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 5) if err != nil { return errors.Wrap(err, "failed to apply clusterconfig manifests") } @@ -389,20 +433,20 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Apply cluster manifests c = "kubectl apply -f " + manifestsPath + "/keoscluster.yaml" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 10, 5) if err != nil { return errors.Wrap(err, "failed to apply keoscluster manifests") } c = "kubectl -n " + capiClustersNamespace + " get cluster " + a.keosCluster.Metadata.Name - _, err = commons.ExecuteCommand(n, c, 3, 45) + _, err = commons.ExecuteCommand(n, c, 25, 5) if err != nil { return errors.Wrap(err, "failed to wait for cluster") } // Wait for the control plane initialization c = "kubectl -n " + capiClustersNamespace + " wait --for=condition=ControlPlaneInitialized --timeout=25m cluster " + a.keosCluster.Metadata.Name - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create the workload cluster") } @@ -414,14 +458,14 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Get the workload cluster kubeconfig c = "clusterctl -n " + capiClustersNamespace + " get kubeconfig " + a.keosCluster.Metadata.Name + " | tee " + kubeconfigPath - kubeconfig, err := commons.ExecuteCommand(n, c, 3, 5) + kubeconfig, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil || kubeconfig == "" { return errors.Wrap(err, "failed to get workload cluster kubeconfig") } // Create worker-kubeconfig secret for keos cluster c = "kubectl -n " + capiClustersNamespace + " create secret generic worker-kubeconfig --from-file " + kubeconfigPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create worker-kubeconfig secret") } @@ -443,90 +487,107 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Install unmanaged cluster addons if !a.keosCluster.Spec.ControlPlane.Managed { - if a.keosCluster.Spec.InfraProvider != "gcp" { - ctx.Status.Start("Installing cloud-provider in workload cluster ☁️") - defer ctx.Status.End(false) - err = infra.installCloudProvider(n, kubeconfigPath, privateParams) - if err != nil { - return errors.Wrap(err, "failed to install external cloud-provider in workload cluster") - } - ctx.Status.End(true) // End Installing cloud-provider in workload cluster - } else if !gcpGKEEnabled { - // XXX Ref kubernetes/kubernetes#86793 Starting from v1.18, gcp cloud-controller-manager requires RBAC to patch,update service/status (in-tree) - ctx.Status.Start("Creating Kubernetes RBAC for internal loadbalancing 🔐") - defer ctx.Status.End(false) + ctx.Status.Start("Installing cloud-provider in workload cluster ☁️") + defer ctx.Status.End(false) - requiredInternalNginx, err := infra.internalNginx(providerParams, a.keosCluster.Spec.Networks) - if err != nil { - return err - } - if requiredInternalNginx { - rbacInternalLoadBalancingPath := "/kind/internalloadbalancing_rbac.yaml" - // Deploy Kubernetes RBAC internal loadbalancing - c = "echo \"" + rbacInternalLoadBalancing + "\" > " + rbacInternalLoadBalancingPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to write the kubernetes RBAC internal loadbalancing") - } - c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + rbacInternalLoadBalancingPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to the kubernetes RBAC internal loadbalancing") - } - } - ctx.Status.End(true) + err = infra.installCloudProvider(n, kubeconfigPath, privateParams) + if err != nil { + return errors.Wrap(err, "failed to install external cloud-provider in workload cluster") } + ctx.Status.End(true) // End Installing cloud-provider in workload cluster + } + + if !azAKSEnabled { ctx.Status.Start("Installing Calico in workload cluster 🔌") defer ctx.Status.End(false) - err = installCalico(n, kubeconfigPath, privateParams, allowCommonEgressNetPolPath, false) - if err != nil { - return errors.Wrap(err, "failed to install Calico in workload cluster") - } - ctx.Status.End(true) // End Installing Calico in workload cluster + isNetPolEngine := gcpGKEEnabled || awsEKSEnabled - ctx.Status.Start("Installing CSI in workload cluster 💾") - defer ctx.Status.End(false) + err = installCalico(n, kubeconfigPath, privateParams, isNetPolEngine, false) - err = infra.installCSI(n, kubeconfigPath, privateParams) if err != nil { - return errors.Wrap(err, "failed to install CSI in workload cluster") + return errors.Wrap(err, "failed to install Calico in workload cluster") } - ctx.Status.End(true) + // After calico is installed patch the tigera-operator clusterrole to allow resourcequotas creation + if gcpGKEEnabled { + c = "kubectl --kubeconfig " + kubeconfigPath + " get clusterrole tigera-operator -o jsonpath='{.rules}'" + tigerarules, err := commons.ExecuteCommand(n, c, 3, 5) + if err != nil { + return errors.Wrap(err, "failed to get tigera-operator clusterrole rules") + } + var rules []json.RawMessage + err = json.Unmarshal([]byte(tigerarules), &rules) + if err != nil { + return errors.Wrap(err, "failed to parse tigera-operator clusterrole rules") + } + // create, delete + rules = append(rules, json.RawMessage(`{"apiGroups": [""],"resources": ["resourcequotas"],"verbs": ["create"]}`)) + newtigerarules, err := json.Marshal(rules) + if err != nil { + return errors.Wrap(err, "failed to marshal tigera-operator clusterrole rules") + } + c = "kubectl --kubeconfig " + kubeconfigPath + " patch clusterrole tigera-operator -p '{\"rules\": " + string(newtigerarules) + "}'" + _, err = commons.ExecuteCommand(n, c, 3, 5) + if err != nil { + return errors.Wrap(err, "failed to patch tigera-operator clusterrole") + } + } + ctx.Status.End(true) // End Installing Calico in workload cluster } ctx.Status.Start("Preparing nodes in workload cluster 📦") defer ctx.Status.End(false) + if awsEKSEnabled { c = "kubectl -n capa-system rollout restart deployment capa-controller-manager" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to reload capa-controller-manager") } + c = "kubectl -n capa-system rollout status deployment capa-controller-manager" + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to wait for capa-controller-manager") + } + // Patch aws-node clusterrole with the required permissions + // https://github.com/aws/amazon-vpc-cni-k8s?tab=readme-ov-file#annotate_pod_ip-v193 + rbacAWSNodePath := "/kind/aws-node_rbac.yaml" + + // Deploy Kubernetes additional RBAC aws node + c = "echo \"" + rbacAWSNode + "\" > " + rbacAWSNodePath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to write the kubernetes additional RBAC aws node") + } + c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + rbacAWSNodePath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to apply the kubernetes additional RBAC aws node") + } } if isMachinePool { // Wait for all the machine pools to be ready c = "kubectl -n " + capiClustersNamespace + " wait --for=condition=Ready --timeout=15m --all mp" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create the worker Cluster") } - // Wait for container metrics to be available c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system rollout status deployment -l k8s-app=metrics-server --timeout=90s" - _, err = commons.ExecuteCommand(n, c, 3, 15) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to wait for container metrics to be available") } } else { // Wait for all the machine deployments to be ready c = "kubectl -n " + capiClustersNamespace + " wait --for=condition=Ready --timeout=15m --all md" - _, err = commons.ExecuteCommand(n, c, 3, 5) + + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create the worker Cluster") } @@ -534,18 +595,13 @@ func (a *action) Execute(ctx *actions.ActionContext) error { if !a.keosCluster.Spec.ControlPlane.Managed && *a.keosCluster.Spec.ControlPlane.HighlyAvailable { // Wait for all control planes to be ready - c = "kubectl -n " + capiClustersNamespace + " wait --for=jsonpath=\"{.status.readyReplicas}\"=3 --timeout 10m kubeadmcontrolplanes " + a.keosCluster.Metadata.Name + "-control-plane" - _, err = commons.ExecuteCommand(n, c, 3, 5) + c = "kubectl -n " + capiClustersNamespace + + " wait --for=jsonpath=\"{.status.readyReplicas}\"=3" + + " --timeout 10m kubeadmcontrolplanes " + a.keosCluster.Metadata.Name + "-control-plane" + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create the worker Cluster") } - if a.keosCluster.Spec.InfraProvider == "azure" { - c = "kubectl --kubeconfig " + kubeconfigPath + " scale deployment cloud-controller-manager -n kube-system --replicas=2" - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to scale deployment cloud-controller-manager") - } - } } ctx.Status.End(true) // End Preparing nodes in workload cluster @@ -582,7 +638,7 @@ func (a *action) Execute(ctx *actions.ActionContext) error { combinedCoreDNS := combinedCoreDNSContents.String() coreDNSTemplate := "/kind/coredns-configmap.yaml" - coreDNSConfigmap, err := getManifest(a.keosCluster.Spec.InfraProvider, "coredns_configmap.tmpl", a.keosCluster.Spec) + coreDNSConfigmap, err := getManifest(a.keosCluster.Spec.InfraProvider, "coredns_configmap.tmpl", majorVersion,a.keosCluster.Spec) if err != nil { return errors.Wrap(err, "failed to get CoreDNS file") } @@ -624,36 +680,13 @@ func (a *action) Execute(ctx *actions.ActionContext) error { } } - if awsEKSEnabled { - c = "kubectl --kubeconfig " + kubeconfigPath + " get clusterrole aws-node -o jsonpath='{.rules}'" - awsnoderules, err := commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to get aws-node clusterrole rules") - } - var rules []json.RawMessage - err = json.Unmarshal([]byte(awsnoderules), &rules) - if err != nil { - return errors.Wrap(err, "failed to parse aws-node clusterrole rules") - } - rules = append(rules, json.RawMessage(`{"apiGroups": [""],"resources": ["pods"],"verbs": ["patch"]}`)) - newawsnoderules, err := json.Marshal(rules) - if err != nil { - return errors.Wrap(err, "failed to marshal aws-node clusterrole rules") - } - c = "kubectl --kubeconfig " + kubeconfigPath + " patch clusterrole aws-node -p '{\"rules\": " + string(newawsnoderules) + "}'" - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to patch aws-node clusterrole") - } - } - // Ensure CoreDNS replicas are assigned to different nodes - // once more than 2 control planes or workers are running - c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system rollout restart deployment coredns" - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to restart coredns deployment") - } + // once more than 2 control planes or workers are running + c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system rollout restart deployment coredns" + _, err = commons.ExecuteCommand(n, c, 3, 5) + if err != nil { + return errors.Wrap(err, "failed to restart coredns deployment") + } // Wait for CoreDNS deployment to be ready c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system rollout status deployment coredns" @@ -662,41 +695,42 @@ func (a *action) Execute(ctx *actions.ActionContext) error { return errors.Wrap(err, "failed to wait for coredns ready") } - // Apply custom CoreDNS configuration - if len(a.keosCluster.Spec.Dns.Forwarders) > 0 && (!awsEKSEnabled || !gcpGKEEnabled) { - ctx.Status.Start("Customizing CoreDNS configuration 🪡") - defer ctx.Status.End(false) - - err = customCoreDNS(n, a.keosCluster) - if err != nil { - return errors.Wrap(err, "failed to customized CoreDNS configuration") - } - - ctx.Status.End(true) // End Customizing CoreDNS configuration - } - ctx.Status.Start("Installing CAPx in workload cluster 🎖️") defer ctx.Status.End(false) - - if privateParams.Private { - err = provider.deployCertManager(n, keosRegistry.url, kubeconfigPath) - if err != nil { - return err - } + err = provider.deployCertManager(n, keosRegistry.url, kubeconfigPath, privateParams, chartsList) + if err != nil { + return err } - err = provider.installCAPXWorker(n, a.keosCluster, kubeconfigPath, allowCommonEgressNetPolPath) + err = provider.installCAPXWorker(n, a.keosCluster, kubeconfigPath) if err != nil { return err } - err = provider.configCAPIWorker(n, a.keosCluster, kubeconfigPath, allowCommonEgressNetPolPath) + err = provider.configCAPIWorker(n, a.keosCluster, kubeconfigPath) if err != nil { return err } - ctx.Status.End(true) // End Installing CAPx in workload cluster + ctx.Status.Start("Configuring Flux in workload cluster 🧭") + defer ctx.Status.End(false) + + err = configureFlux(n, kubeconfigPath, privateParams, helmRegistry, a.keosCluster.Spec, chartsList) + if err != nil { + return errors.Wrap(err, "failed to install Flux in workload cluster") + } + ctx.Status.End(true) // End Installing Flux in workload cluster + + ctx.Status.Start("Reconciling the existing Helm charts in workload cluster 🧲") + defer ctx.Status.End(false) + + err = reconcileCharts(n, kubeconfigPath, privateParams, a.keosCluster.Spec, chartsList) + if err != nil { + return errors.Wrap(err, "failed to reconcile with Flux the existing Helm charts in workload cluster") + } + ctx.Status.End(true) // End Installing Flux in workload cluster + ctx.Status.Start("Enabling workload cluster's self-healing 🏥") defer ctx.Status.End(false) @@ -707,172 +741,152 @@ func (a *action) Execute(ctx *actions.ActionContext) error { ctx.Status.End(true) // End Enabling workload cluster's self-healing - ctx.Status.Start("Installing StorageClass in workload cluster 💾") + ctx.Status.Start("Configuring Network Policy Engine in workload cluster 🚧") defer ctx.Status.End(false) - err = infra.configureStorageClass(n, kubeconfigPath) + // Allow egress in tigera-operator namespace + c = "kubectl --kubeconfig " + kubeconfigPath + " -n tigera-operator apply -f " + allowCommonEgressNetPolPath + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to configure StorageClass in workload cluster") + return errors.Wrap(err, "failed to apply tigera-operator egress NetworkPolicy") + } + + // Allow egress in calico-system namespace + c = "kubectl --kubeconfig " + kubeconfigPath + " -n calico-system apply -f " + allowCommonEgressNetPolPath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to apply calico-system egress NetworkPolicy") } - ctx.Status.End(true) // End Installing StorageClass in workload cluster - - - ctx.Status.End(true) // End Installing CAPx in workload cluster - // Use Calico as network policy engine in managed systems - if provider.capxProvider != "azure" { - ctx.Status.Start("Configuring Network Policy Engine in workload cluster 🚧") - defer ctx.Status.End(false) + // Allow egress in CAPX's Namespace + c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + provider.capxName + "-system apply -f " + allowCommonEgressNetPolPath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to apply CAPX's NetworkPolicy in workload cluster") + } - // Use Calico as network policy engine in managed systems - if a.keosCluster.Spec.ControlPlane.Managed { + capiDeployments := []struct { + name string + namespace string + }{ + {name: "capi-controller-manager", namespace: "capi-system"}, + {name: "capi-kubeadm-control-plane-controller-manager", namespace: "capi-kubeadm-control-plane-system"}, + {name: "capi-kubeadm-bootstrap-controller-manager", namespace: "capi-kubeadm-bootstrap-system"}, + } + allowedNamePattern := regexp.MustCompile(`^capi-kubeadm-(control-plane|bootstrap)-controller-manager$`) - err = installCalico(n, kubeconfigPath, privateParams, allowCommonEgressNetPolPath, true) + // Allow egress in CAPI's Namespaces + for _, deployment := range capiDeployments { + if !provider.capxManaged || (provider.capxManaged && !allowedNamePattern.MatchString(deployment.name)) { + c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + deployment.namespace + " apply -f " + allowCommonEgressNetPolPath + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to install Network Policy Engine in workload cluster") + return errors.Wrap(err, "failed to apply CAPI's egress NetworkPolicy in namespace "+deployment.namespace) } } + } - // Create the allow and deny (global) network policy file in the container - denyallEgressIMDSGNetPolPath := "/kind/deny-all-egress-imds_gnetpol.yaml" - allowCAPXEgressIMDSGNetPolPath := "/kind/allow-egress-imds_gnetpol.yaml" + // Allow egress in cert-manager Namespace + c = "kubectl --kubeconfig " + kubeconfigPath + " -n cert-manager apply -f " + allowCommonEgressNetPolPath + _, err = commons.ExecuteCommand(n, c, 5, 3) - // Allow egress in kube-system Namespace - c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system apply -f " + allowCommonEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to apply kube-system egress NetworkPolicy") - } - denyEgressIMDSGNetPol, err := provider.getDenyAllEgressIMDSGNetPol() - if err != nil { - return err - } + if err != nil { + return errors.Wrap(err, "failed to apply cert-manager's NetworkPolicy") + } - c = "echo \"" + denyEgressIMDSGNetPol + "\" > " + denyallEgressIMDSGNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to write the deny-all-traffic-to-aws-imds global network policy") - } - allowEgressIMDSGNetPol, err := provider.getAllowCAPXEgressIMDSGNetPol() - if err != nil { - return err - } + ctx.Status.End(true) // End Configuring Network Policy Engine in workload cluster - c = "echo \"" + allowEgressIMDSGNetPol + "\" > " + allowCAPXEgressIMDSGNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to write the allow-traffic-to-aws-imds-capa global network policy") - } + if !a.keosCluster.Spec.ControlPlane.Managed { - // Deny CAPA egress to AWS IMDS - c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + denyallEgressIMDSGNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to apply deny IMDS traffic GlobalNetworkPolicy") - } + ctx.Status.Start("Installing CSI in workload cluster 💾") + defer ctx.Status.End(false) - // Allow CAPA egress to AWS IMDS - c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + allowCAPXEgressIMDSGNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + err = infra.installCSI(n, kubeconfigPath, privateParams, providerParams, chartsList) if err != nil { - return errors.Wrap(err, "failed to apply allow CAPX as egress GlobalNetworkPolicy") + return errors.Wrap(err, "failed to install CSI in workload cluster") } - // After calico is installed and network policies are applied, patch the tigera-operator clusterrole to allow resourcequotas creation - if gcpGKEEnabled { - c = "kubectl --kubeconfig " + kubeconfigPath + " get clusterrole tigera-operator -o jsonpath='{.rules}'" - tigerarules, err := commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to get tigera-operator clusterrole rules") - } - var rules []json.RawMessage - err = json.Unmarshal([]byte(tigerarules), &rules) - if err != nil { - return errors.Wrap(err, "failed to parse tigera-operator clusterrole rules") - } - // create, delete - rules = append(rules, json.RawMessage(`{"apiGroups": [""],"resources": ["resourcequotas"],"verbs": ["create"]}`)) - newtigerarules, err := json.Marshal(rules) - if err != nil { - return errors.Wrap(err, "failed to marshal tigera-operator clusterrole rules") - } - c = "kubectl --kubeconfig " + kubeconfigPath + " patch clusterrole tigera-operator -p '{\"rules\": " + string(newtigerarules) + "}'" - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to patch tigera-operator clusterrole") - } - } - ctx.Status.End(true) // End Installing Network Policy Engine in workload cluster + ctx.Status.End(true) + } + ctx.Status.Start("Installing StorageClass in workload cluster 💾") + defer ctx.Status.End(false) + + err = infra.configureStorageClass(n, kubeconfigPath) + if err != nil { + return errors.Wrap(err, "failed to configure StorageClass in workload cluster") + } + ctx.Status.End(true) // End Installing StorageClass in workload cluster if a.keosCluster.Spec.DeployAutoscaler && !isMachinePool { ctx.Status.Start("Installing cluster-autoscaler in workload cluster 🗚") defer ctx.Status.End(false) - c = "helm install cluster-autoscaler /stratio/helm/cluster-autoscaler" + - " --kubeconfig " + kubeconfigPath + - " --namespace kube-system" + - " --set autoDiscovery.clusterName=" + a.keosCluster.Metadata.Name + - " --set autoDiscovery.labels[0].namespace=cluster-" + a.keosCluster.Metadata.Name + - " --set cloudProvider=clusterapi" + - " --set clusterAPIMode=incluster-incluster" + - " --set replicaCount=2" - - if privateParams.Private { - c += " --set image.repository=" + keosRegistry.url + "/autoscaling/cluster-autoscaler" + err = deployClusterAutoscaler(n, chartsList, privateParams, capiClustersNamespace, a.moveManagement) + if err != nil { + return errors.Wrap(err, "failed to install cluster-autoscaler in workload cluster") } - _, err = commons.ExecuteCommand(n, c, 3, 5) + ctx.Status.End(true) // End Installing cluster-autoscaler in workload cluster + } + + ctx.Status.Start("Installing keos cluster operator in workload cluster 💻") + defer ctx.Status.End(false) + + err = provider.deployClusterOperator(n, privateParams, a.clusterCredentials, keosRegistry, a.clusterConfig, kubeconfigPath, true, helmRegistry) + if err != nil { + return errors.Wrap(err, "failed to deploy cluster operator in workload cluster") + } + + ctx.Status.End(true) // Installing keos cluster operator in workload cluster + + // Apply custom CoreDNS configuration + if len(a.keosCluster.Spec.Dns.Forwarders) > 0 && (!awsEKSEnabled || !gcpGKEEnabled) { + ctx.Status.Start("Customizing CoreDNS configuration 🪡") + defer ctx.Status.End(false) + + err = customCoreDNS(n, a.keosCluster) if err != nil { - return errors.Wrap(err, "failed to deploy cluster-autoscaler in workload cluster") + return errors.Wrap(err, "failed to customized CoreDNS configuration") } - if !a.moveManagement { - autoscalerRBACPath := "/kind/autoscaler_rbac.yaml" + ctx.Status.End(true) // End Customizing CoreDNS configuration + } - autoscalerRBAC, err := getManifest("common", "autoscaler_rbac.tmpl", a.keosCluster) - if err != nil { - return errors.Wrap(err, "failed to get CA RBAC file") - } + if provider.capxProvider == "gcp" { + // XXX Ref kubernetes/kubernetes#86793 Starting from v1.18, gcp cloud-controller-manager requires RBAC to patch,update service/status (in-tree) + ctx.Status.Start("Creating Kubernetes RBAC for internal loadbalancing 🔐") + defer ctx.Status.End(false) - c = "echo '" + autoscalerRBAC + "' > " + autoscalerRBACPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to create CA RBAC file") - } + requiredInternalNginx, err := infra.internalNginx(providerParams, a.keosCluster.Spec.Networks) + if err != nil { + return err + } - // Create namespace for CAPI clusters (it must exists) in worker cluster - c = "kubectl --kubeconfig " + kubeconfigPath + " create ns " + capiClustersNamespace - _, err = commons.ExecuteCommand(n, c, 3, 5) + if requiredInternalNginx { + rbacInternalLoadBalancingPath := "/kind/internalloadbalancing_rbac.yaml" + + // Deploy Kubernetes RBAC internal loadbalancing + c = "echo \"" + rbacInternalLoadBalancing + "\" > " + rbacInternalLoadBalancingPath + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to create manifests Namespace") + return errors.Wrap(err, "failed to write the kubernetes RBAC internal loadbalancing") } - c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + autoscalerRBACPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + rbacInternalLoadBalancingPath + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to apply CA RBAC") + return errors.Wrap(err, "failed to the kubernetes RBAC internal loadbalancing") } } - - ctx.Status.End(true) + ctx.Status.End(true) // End Creating Kubernetes RBAC for internal loadbalancing } - ctx.Status.Start("Installing keos cluster operator in workload cluster 💻") - defer ctx.Status.End(false) - - err = provider.deployClusterOperator(n, privateParams, a.clusterCredentials, keosRegistry, a.clusterConfig, kubeconfigPath, true, helmRegistry) - if err != nil { - return errors.Wrap(err, "failed to deploy cluster operator in workload cluster") - } - - ctx.Status.End(true) - if awsEKSEnabled && a.clusterConfig.Spec.EKSLBController { ctx.Status.Start("Installing AWS LB controller in workload cluster ⚖️") defer ctx.Status.End(false) - err = installLBController(n, kubeconfigPath, privateParams, providerParams) + err = installLBController(n, kubeconfigPath, privateParams, providerParams, chartsList) if err != nil { return errors.Wrap(err, "failed to install AWS LB controller in workload cluster") @@ -891,13 +905,13 @@ func (a *action) Execute(ctx *actions.ActionContext) error { } c = "mkdir -p " + cloudProviderBackupPath + " && chmod -R 0755 " + cloudProviderBackupPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create cloud-provisioner backup directory") } c = "clusterctl move -n " + capiClustersNamespace + " --to-directory " + cloudProviderBackupPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to backup cloud-provisioner Objects") } @@ -917,17 +931,17 @@ func (a *action) Execute(ctx *actions.ActionContext) error { defer ctx.Status.End(false) c = "helm uninstall cluster-operator -n kube-system" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "Uninstalling cluster-operator") } // Create namespace, if not exists, for CAPI clusters in worker cluster c = "kubectl --kubeconfig " + kubeconfigPath + " get ns " + capiClustersNamespace - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { c = "kubectl --kubeconfig " + kubeconfigPath + " create ns " + capiClustersNamespace - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create manifests Namespace") } @@ -935,14 +949,14 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Pivot management role to worker cluster c = "clusterctl move -n " + capiClustersNamespace + " --to-kubeconfig " + kubeconfigPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to pivot management role to worker cluster") } // Wait for keoscluster-controller-manager deployment to be ready c = "kubectl --kubeconfig " + kubeconfigPath + " rollout status deploy keoscluster-controller-manager -n kube-system --timeout=5m" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to wait for keoscluster controller ready") } @@ -950,21 +964,21 @@ func (a *action) Execute(ctx *actions.ActionContext) error { if a.clusterConfig != nil { c = "kubectl -n " + capiClustersNamespace + " patch clusterconfig " + a.clusterConfig.Metadata.Name + " -p '{\"metadata\":{\"ownerReferences\":null,\"finalizers\":null}}' --type=merge" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to remove clusterconfig ownerReferences and finalizers") } // Move clusterConfig to workload cluster c = "kubectl -n " + capiClustersNamespace + " get clusterconfig " + a.clusterConfig.Metadata.Name + " -o json | kubectl apply --kubeconfig " + kubeconfigPath + " -f-" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to move clusterconfig to workload cluster") } // Delete clusterconfig in management cluster c = "kubectl -n " + capiClustersNamespace + " delete clusterconfig " + a.clusterConfig.Metadata.Name - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to delete clusterconfig in management cluster") } @@ -973,20 +987,20 @@ func (a *action) Execute(ctx *actions.ActionContext) error { // Move keoscluster to workload cluster c = "kubectl -n " + capiClustersNamespace + " get keoscluster " + a.keosCluster.Metadata.Name + " -o json | jq 'del(.status)' | kubectl apply --kubeconfig " + kubeconfigPath + " -f-" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to move keoscluster to workload cluster") } c = "kubectl -n " + capiClustersNamespace + " patch keoscluster " + a.keosCluster.Metadata.Name + " -p '{\"metadata\":{\"finalizers\":null}}' --type=merge" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to scale keoscluster deployment to 1") } // Delete keoscluster in management cluster c = "kubectl -n " + capiClustersNamespace + " delete keoscluster " + a.keosCluster.Metadata.Name - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to delete keoscluster in management cluster") } diff --git a/pkg/cluster/internal/create/actions/createworker/files/aws/allow-egress-imds_gnetpol.yaml b/pkg/cluster/internal/create/actions/createworker/files/aws/allow-egress-imds_gnetpol.yaml index 9e9ba4b345..9f06b798e6 100644 --- a/pkg/cluster/internal/create/actions/createworker/files/aws/allow-egress-imds_gnetpol.yaml +++ b/pkg/cluster/internal/create/actions/createworker/files/aws/allow-egress-imds_gnetpol.yaml @@ -2,7 +2,7 @@ apiVersion: crd.projectcalico.org/v1 kind: GlobalNetworkPolicy metadata: - name: allow-traffic-to-aws-imds-capa + name: allow-traffic-to-aws-imds spec: egress: - action: Allow @@ -12,6 +12,6 @@ spec: protocol: TCP order: 0 namespaceSelector: kubernetes.io/metadata.name in { 'kube-system', 'capa-system' } - selector: app.kubernetes.io/name in {'aws-ebs-csi-driver', 'aws-load-balancer-controller' } || cluster.x-k8s.io/provider == 'infrastructure-aws' || k8s-app == 'aws-cloud-controller-manager' + selector: app.kubernetes.io/name == 'aws-load-balancer-controller' || cluster.x-k8s.io/provider == 'infrastructure-aws' || k8s-app == 'aws-cloud-controller-manager' || app in {'ebs-csi-controller', 'source-controller'} types: - Egress diff --git a/pkg/cluster/internal/create/actions/createworker/files/aws/aws-node_rbac.yaml b/pkg/cluster/internal/create/actions/createworker/files/aws/aws-node_rbac.yaml new file mode 100644 index 0000000000..9ee68c6abe --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/files/aws/aws-node_rbac.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: aws-node + k8s-app: aws-node + name: aws-node-annotate-pod-ip +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: aws-node + name: aws-node-annotate-pod-ip +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: aws-node-annotate-pod-ip +subjects: +- kind: ServiceAccount + name: aws-node + namespace: kube-system \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/files/azure/flux2_azurepodidentityexception.yaml b/pkg/cluster/internal/create/actions/createworker/files/azure/flux2_azurepodidentityexception.yaml new file mode 100644 index 0000000000..301d3ab603 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/files/azure/flux2_azurepodidentityexception.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: aadpodidentity.k8s.io/v1 +kind: AzurePodIdentityException +metadata: + name: flux-source-controller + namespace: kube-system +spec: + podLabels: + app: source-controller \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/files/common/calico-metrics.yaml b/pkg/cluster/internal/create/actions/createworker/files/common/calico-metrics.yaml index f348ae26a3..e63c5eef60 100644 --- a/pkg/cluster/internal/create/actions/createworker/files/common/calico-metrics.yaml +++ b/pkg/cluster/internal/create/actions/createworker/files/common/calico-metrics.yaml @@ -10,29 +10,9 @@ metadata: labels: k8s-app: calico-node spec: - clusterIP: None selector: k8s-app: calico-node ports: - name: metrics port: 9191 targetPort: 9191 ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - prometheus.io/port: "9093" - prometheus.io/scrape: "true" - name: calico-typha-metrics - namespace: calico-system - labels: - k8s-app: calico-typha -spec: - clusterIP: None - selector: - k8s-app: calico-typha - ports: - - name: metrics - port: 9093 - targetPort: 9093 diff --git a/pkg/cluster/internal/create/actions/createworker/files/gcp/allow-egress-imds_gnetpol.yaml b/pkg/cluster/internal/create/actions/createworker/files/gcp/allow-egress-imds_gnetpol.yaml index 3d4d38283f..2830b4e9a0 100644 --- a/pkg/cluster/internal/create/actions/createworker/files/gcp/allow-egress-imds_gnetpol.yaml +++ b/pkg/cluster/internal/create/actions/createworker/files/gcp/allow-egress-imds_gnetpol.yaml @@ -12,6 +12,6 @@ spec: protocol: TCP order: 0 namespaceSelector: kubernetes.io/metadata.name in { 'kube-system', 'capg-system' } - selector: app == 'gcp-compute-persistent-disk-csi-driver' || cluster.x-k8s.io/provider == 'infrastructure-gcp' + selector: app in {'gcp-compute-persistent-disk-csi-driver','source-controller'} || cluster.x-k8s.io/provider == 'infrastructure-gcp' || || app.kubernetes.io/name == 'gcp-cloud-controller-manager' types: - Egress diff --git a/pkg/cluster/internal/create/actions/createworker/gcp.go b/pkg/cluster/internal/create/actions/createworker/gcp.go index c0dee11e80..40cfb91cc3 100644 --- a/pkg/cluster/internal/create/actions/createworker/gcp.go +++ b/pkg/cluster/internal/create/actions/createworker/gcp.go @@ -49,6 +49,40 @@ type GCPBuilder struct { csiNamespace string } +var googleCharts = ChartsDictionary{ + Charts: map[string]map[string]map[string]commons.ChartEntry{ + "28": { + "managed": { + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + "unmanaged": { + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.34.1", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + "29": { + "managed": { + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + "unmanaged": { + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.35.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + "30": { + "managed": { + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + "unmanaged": { + // "default" repository defaults to the descriptor Helm repository + "gcp-cloud-controller-manager": {Repository: "default", Version: "1.30.0", Namespace: "kube-system", Pull: true}, + "cluster-autoscaler": {Repository: "https://kubernetes.github.io/autoscaler", Version: "9.37.0", Namespace: "kube-system", Pull: false}, + "tigera-operator": {Repository: "https://docs.projectcalico.org/charts", Version: "v3.27.3", Namespace: "tigera-operator", Pull: true}, + }, + }, + }, +} + func newGCPBuilder() *GCPBuilder { return &GCPBuilder{} } @@ -96,11 +130,7 @@ func (b *GCPBuilder) setSC(p ProviderParams) { b.scProvisioner = "pd.csi.storage.gke.io" if b.scParameters.Type == "" { - if p.StorageClass.Class == "premium" { - b.scParameters.Type = "pd-ssd" - } else { - b.scParameters.Type = "pd-standard" - } + b.scParameters.Type = "pd-ssd" } if p.StorageClass.EncryptionKey != "" { @@ -108,6 +138,27 @@ func (b *GCPBuilder) setSC(p ProviderParams) { } } +func (b *GCPBuilder) pullProviderCharts(n nodes.Node, clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials, clusterType string) error { + return pullGenericCharts(n, clusterConfigSpec, keosSpec, clusterCredentials, googleCharts, clusterType) +} + +func (b *GCPBuilder) getProviderCharts(clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterType string) map[string]commons.ChartEntry { + return getGenericCharts(clusterConfigSpec, keosSpec, googleCharts, clusterType) +} + +func (b *GCPBuilder) getOverriddenCharts(charts *[]commons.Chart, clusterConfigSpec *commons.ClusterConfigSpec, clusterType string) []commons.Chart { + providerCharts := ConvertToChart(googleCharts.Charts[majorVersion][clusterType]) + for _, ovChart := range clusterConfigSpec.Charts { + for _, chart := range *providerCharts { + if chart.Name == ovChart.Name { + chart.Version = ovChart.Version + } + } + } + *charts = append(*charts, *providerCharts...) + return *charts +} + func (b *GCPBuilder) getProvider() Provider { return Provider{ capxProvider: b.capxProvider, @@ -123,10 +174,46 @@ func (b *GCPBuilder) getProvider() Provider { } func (b *GCPBuilder) installCloudProvider(n nodes.Node, k string, privateParams PrivateParams) error { + var podsCidrBlock string + keosCluster := privateParams.KeosCluster + if keosCluster.Spec.Networks.PodsCidrBlock != "" { + podsCidrBlock = keosCluster.Spec.Networks.PodsCidrBlock + } else { + podsCidrBlock = "192.168.0.0/16" + } + + cloudControllerManagerValuesFile := "/kind/gcp-cloud-controller-manager-helm-values.yaml" + cloudControllerManagerHelmParams := cloudControllerHelmParams{ + ClusterName: privateParams.KeosCluster.Metadata.Name, + Private: privateParams.Private, + KeosRegUrl: privateParams.KeosRegUrl, + PodsCidr: podsCidrBlock, + } + + // Generate the CCM helm values + cloudControllerManagerHelmValues, err := getManifest(b.capxProvider, "gcp-cloud-controller-manager-helm-values.tmpl", majorVersion, cloudControllerManagerHelmParams) + if err != nil { + return errors.Wrap(err, "failed to create cloud controller manager Helm chart values file") + } + c := "echo '" + cloudControllerManagerHelmValues + "' > " + cloudControllerManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create cloud controller manager Helm chart values file") + } + + c = "helm install gcp-cloud-controller-manager /stratio/helm/gcp-cloud-controller-manager" + + " --kubeconfig " + k + + " --namespace kube-system" + + " --values " + cloudControllerManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to deploy gcp-cloud-controller-manager Helm Chart") + } + return nil } -func (b *GCPBuilder) installCSI(n nodes.Node, k string, privateParams PrivateParams) error { +func (b *GCPBuilder) installCSI(n nodes.Node, k string, privateParams PrivateParams, providerParams ProviderParams, charstList map[string]commons.ChartEntry) error { var c string var err error var cmd exec.Cmd @@ -134,12 +221,12 @@ func (b *GCPBuilder) installCSI(n nodes.Node, k string, privateParams PrivatePar // Create CSI secret in CSI namespace secret, _ := b64.StdEncoding.DecodeString(strings.Split(b.capxEnvVars[0], "GCP_B64ENCODED_CREDENTIALS=")[1]) c = "kubectl --kubeconfig " + k + " -n " + b.csiNamespace + " create secret generic cloud-sa --from-literal=cloud-sa.json='" + string(secret) + "'" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create CSI secret in CSI namespace") } - csiManifests, err := getManifest(privateParams.KeosCluster.Spec.InfraProvider, "gcp-compute-persistent-disk-csi-driver.tmpl", privateParams) + csiManifests, err := getManifest(privateParams.KeosCluster.Spec.InfraProvider, "gcp-compute-persistent-disk-csi-driver.tmpl", majorVersion, privateParams) if err != nil { return errors.Wrap(err, "failed to get CSI driver manifests") } @@ -177,13 +264,13 @@ func (b *GCPBuilder) configureStorageClass(n nodes.Node, k string) error { if b.capxManaged { // Remove annotation from default storage class c = "kubectl --kubeconfig " + k + ` get sc -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}'` - output, err := commons.ExecuteCommand(n, c, 3, 5) + output, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to get default storage class") } if strings.TrimSpace(output) != "" && strings.TrimSpace(output) != "No resources found" { c = "kubectl --kubeconfig " + k + " annotate sc " + strings.TrimSpace(output) + " " + defaultScAnnotation + "-" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to remove annotation from default storage class") } @@ -257,7 +344,7 @@ func (b *GCPBuilder) postInstallPhase(n nodes.Node, k string) error { var coreDNSPDBName = "coredns" c := "kubectl --kubeconfig " + kubeconfigPath + " get pdb " + coreDNSPDBName + " -n kube-system" - _, err := commons.ExecuteCommand(n, c, 3, 5) + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { err = installCorednsPdb(n) if err != nil { diff --git a/pkg/cluster/internal/create/actions/createworker/keosinstaller.go b/pkg/cluster/internal/create/actions/createworker/keosinstaller.go index 341937fd92..02daca0c77 100644 --- a/pkg/cluster/internal/create/actions/createworker/keosinstaller.go +++ b/pkg/cluster/internal/create/actions/createworker/keosinstaller.go @@ -45,7 +45,6 @@ type KEOSDescriptor struct { Azure struct { Enabled bool `yaml:"enabled"` AKS bool `yaml:"aks"` - ResourceGroup string `yaml:"resource_group"` } `yaml:"azure,omitempty"` GCP struct { Enabled bool `yaml:"enabled"` @@ -116,7 +115,6 @@ func createKEOSDescriptor(keosCluster commons.KeosCluster, storageClass string) if keosCluster.Spec.InfraProvider == "azure" { keosDescriptor.Azure.Enabled = true keosDescriptor.Azure.AKS = keosCluster.Spec.ControlPlane.Managed - keosDescriptor.Azure.ResourceGroup = keosCluster.Metadata.Name } // GCP diff --git a/pkg/cluster/internal/create/actions/createworker/provider.go b/pkg/cluster/internal/create/actions/createworker/provider.go index 5b59666831..52cd92cecc 100644 --- a/pkg/cluster/internal/create/actions/createworker/provider.go +++ b/pkg/cluster/internal/create/actions/createworker/provider.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "path/filepath" + "reflect" "regexp" "strconv" "time" @@ -47,6 +48,9 @@ var denyAllEgressIMDSgnpFiles embed.FS //go:embed files/*/allow-egress-imds_gnetpol.yaml var allowEgressIMDSgnpFiles embed.FS +//go:embed files/azure/flux2_azurepodidentityexception.yaml +var azureFlux2PodIdentityException string + var stratio_helm_repo string //go:embed files/*/*_pdb.yaml @@ -56,12 +60,10 @@ const ( CAPICoreProvider = "cluster-api" CAPIBootstrapProvider = "kubeadm" CAPIControlPlaneProvider = "kubeadm" - CAPIVersion = "v1.5.3" + CAPIVersion = "v1.7.4" scName = "keos" - certManagerVersion = "v1.12.3" - postInstallAnnotation = "cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes" corednsPdbPath = "/kind/coredns_pdb.yaml" @@ -73,18 +75,26 @@ const ( //go:embed files/common/calico-metrics.yaml var calicoMetrics string +type ChartsDictionary struct { + Charts map[string]map[string]map[string]commons.ChartEntry +} + type PrivateParams struct { KeosCluster commons.KeosCluster KeosRegUrl string Private bool + HelmPrivate bool } type PBuilder interface { setCapx(managed bool) setCapxEnvVars(p ProviderParams) setSC(p ProviderParams) + pullProviderCharts(n nodes.Node, clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials, clusterType string) error + getProviderCharts(clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterType string) map[string]commons.ChartEntry + getOverriddenCharts(charts *[]commons.Chart, clusterConfigSpec *commons.ClusterConfigSpec, clusterType string) []commons.Chart installCloudProvider(n nodes.Node, k string, privateParams PrivateParams) error - installCSI(n nodes.Node, k string, privateParams PrivateParams) error + installCSI(n nodes.Node, k string, privateParams PrivateParams, providerParams ProviderParams, chartsList map[string]commons.ChartEntry) error getProvider() Provider configureStorageClass(n nodes.Node, k string) error internalNginx(p ProviderParams, networks commons.Networks) (bool, error) @@ -152,6 +162,34 @@ type calicoHelmParams struct { Annotations map[string]string } +type commonHelmParams struct { + KeosRegUrl string + Private bool +} + +type cloudControllerHelmParams struct { + ClusterName string + Private bool + KeosRegUrl string + PodsCidr string +} + +type fluxHelmRepositoryParams struct { + ChartName string + ChartRepoUrl string + ChartRepoScheme string + Spec commons.KeosSpec + HelmRepoCreds HelmRegistry + RepositoryInterval string +} + +type fluxHelmReleaseParams struct { + ChartName string + ChartNamespace string + ChartRepoRef string + ChartVersion string +} + var scTemplate = DefaultStorageClass{ APIVersion: "storage.k8s.io/v1", Kind: "StorageClass", @@ -168,6 +206,41 @@ var scTemplate = DefaultStorageClass{ VolumeBindingMode: "WaitForFirstConsumer", } +var commonsCharts = ChartsDictionary{ + Charts: map[string]map[string]map[string]commons.ChartEntry{ + "28": { + "managed": { + "cert-manager": {Repository: "https://charts.jetstack.io", Version: "v1.14.5", Namespace: "cert-manager", Pull: true}, + "flux2": {Repository: "https://fluxcd-community.github.io/helm-charts", Version: "2.12.2", Namespace: "kube-system", Pull: true}, + }, + "unmanaged": { + "cert-manager": {Repository: "https://charts.jetstack.io", Version: "v1.14.5", Namespace: "cert-manager", Pull: true}, + "flux2": {Repository: "https://fluxcd-community.github.io/helm-charts", Version: "2.12.2", Namespace: "kube-system", Pull: true}, + }, + }, + "29": { + "managed": { + "cert-manager": {Repository: "https://charts.jetstack.io", Version: "v1.14.5", Namespace: "cert-manager", Pull: true}, + "flux2": {Repository: "https://fluxcd-community.github.io/helm-charts", Version: "2.12.2", Namespace: "kube-system", Pull: true}, + }, + "unmanaged": { + "cert-manager": {Repository: "https://charts.jetstack.io", Version: "v1.14.5", Namespace: "cert-manager", Pull: true}, + "flux2": {Repository: "https://fluxcd-community.github.io/helm-charts", Version: "2.12.2", Namespace: "kube-system", Pull: true}, + }, + }, + "30": { + "managed": { + "cert-manager": {Repository: "https://charts.jetstack.io", Version: "v1.14.5", Namespace: "cert-manager", Pull: true}, + "flux2": {Repository: "https://fluxcd-community.github.io/helm-charts", Version: "2.12.2", Namespace: "kube-system", Pull: true}, + }, + "unmanaged": { + "cert-manager": {Repository: "https://charts.jetstack.io", Version: "v1.14.5", Namespace: "cert-manager", Pull: true}, + "flux2": {Repository: "https://fluxcd-community.github.io/helm-charts", Version: "2.12.2", Namespace: "kube-system", Pull: true}, + }, + }, + }, +} + func getBuilder(builderType string) PBuilder { if builderType == "aws" { return newAWSBuilder() @@ -196,12 +269,103 @@ func (i *Infra) buildProvider(p ProviderParams) Provider { return i.builder.getProvider() } +func (i *Infra) getProviderCharts(clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec) map[string]commons.ChartEntry { + clusterType := "managed" + if !keosSpec.ControlPlane.Managed { + clusterType = "unmanaged" + } + + commonsChartsList := getGenericCharts(clusterConfigSpec, keosSpec, commonsCharts, clusterType) + + providerChartsList := i.builder.getProviderCharts(clusterConfigSpec, keosSpec, clusterType) + + completedChartsList := make(map[string]commons.ChartEntry) + for key, value := range commonsChartsList { + completedChartsList[key] = value + } + for key, value := range providerChartsList { + completedChartsList[key] = value + } + + return completedChartsList +} + +func (i *Infra) pullProviderCharts(n nodes.Node, clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials) error { + clusterType := "managed" + if !keosSpec.ControlPlane.Managed { + clusterType = "unmanaged" + } + + if err := pullGenericCharts(n, clusterConfigSpec, keosSpec, clusterCredentials, commonsCharts, clusterType); err != nil { + return err + } + + if err := i.builder.pullProviderCharts(n, clusterConfigSpec, keosSpec, clusterCredentials, clusterType); err != nil { + return err + } + clusterConfigSpec.Charts = i.getOverriddenCharts(clusterConfigSpec, clusterType) + return nil + +} + +func (i *Infra) getOverriddenCharts(clusterConfigSpec *commons.ClusterConfigSpec, clusterType string) []commons.Chart { + charts := ConvertToChart(commonsCharts.Charts[majorVersion][clusterType]) + for _, ovChart := range clusterConfigSpec.Charts { + for _, chart := range *charts { + if chart.Name == ovChart.Name { + chart.Version = ovChart.Version + } + } + } + return i.builder.getOverriddenCharts(charts, clusterConfigSpec, clusterType) +} + +func ConvertToChart(chartEntries map[string]commons.ChartEntry) *[]commons.Chart { + var charts []commons.Chart + for name, entry := range chartEntries { + if entry.Pull { + chart := commons.Chart{ + Name: name, + Version: entry.Version, + } + charts = append(charts, chart) + } + + } + return &charts +} + +func getGenericCharts(clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, chartDictionary ChartsDictionary, clusterType string) map[string]commons.ChartEntry { + chartsToInstall := chartDictionary.Charts[majorVersion][clusterType] + + for _, overrideChart := range clusterConfigSpec.Charts { + chart := chartsToInstall[overrideChart.Name] + if !reflect.DeepEqual(chart, commons.ChartEntry{}) { + + chart.Version = overrideChart.Version + chartsToInstall[overrideChart.Name] = chart + } + } + if clusterConfigSpec.PrivateHelmRepo { + for name, entry := range chartsToInstall { + entry.Repository = keosSpec.HelmRepository.URL + chartsToInstall[name] = entry + } + } + return chartsToInstall +} + +func pullGenericCharts(n nodes.Node, clusterConfigSpec *commons.ClusterConfigSpec, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials, chartDictionary ChartsDictionary, clusterType string) error { + chartsToInstall := getGenericCharts(clusterConfigSpec, keosSpec, chartDictionary, clusterType) + return pullCharts(n, chartsToInstall, keosSpec, clusterCredentials) +} + func (i *Infra) installCloudProvider(n nodes.Node, k string, privateParams PrivateParams) error { return i.builder.installCloudProvider(n, k, privateParams) } -func (i *Infra) installCSI(n nodes.Node, k string, privateParams PrivateParams) error { - return i.builder.installCSI(n, k, privateParams) +func (i *Infra) installCSI(n nodes.Node, k string, privateParams PrivateParams, providerParams ProviderParams, chartsList map[string]commons.ChartEntry) error { + return i.builder.installCSI(n, k, privateParams, providerParams, chartsList) } func (i *Infra) configureStorageClass(n nodes.Node, k string) error { @@ -268,44 +432,37 @@ func getcapxPDB(commonsPDBLocalPath string) (string, error) { return string(capaPDBContent), nil } -func (p *Provider) deployCertManager(n nodes.Node, keosRegistryUrl string, kubeconfigPath string) error { - c := "kubectl create -f " + CAPILocalRepository + "/cert-manager/" + certManagerVersion + "/cert-manager.crds.yaml" - if kubeconfigPath != "" { - c += " --kubeconfig " + kubeconfigPath +func (p *Provider) deployCertManager(n nodes.Node, keosRegistryUrl string, kubeconfigPath string, privateParams PrivateParams, chartsList map[string]commons.ChartEntry) error { + certManagerValuesFile := "/kind/cert-manager-helm-values.yaml" + certManagerHelmParams := commonHelmParams{ + KeosRegUrl: keosRegistryUrl, + Private: privateParams.Private, } - _, err := commons.ExecuteCommand(n, c, 3, 5) + certManagerHelmValues, err := getManifest("common", "cert-manager-helm-values.tmpl", majorVersion, certManagerHelmParams) if err != nil { - return errors.Wrap(err, "failed to create cert-manager crds") + return errors.Wrap(err, "failed to generate cert-manager helm values") } - c = "kubectl create ns cert-manager" - if kubeconfigPath != "" { - c += " --kubeconfig " + kubeconfigPath - } - _, err = commons.ExecuteCommand(n, c, 3, 5) + c := "echo '" + certManagerHelmValues + "' > " + certManagerValuesFile + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to create cert-manager namespace") + return errors.Wrap(err, "failed to create cert-manager Helm chart values file") } - c = "helm install --wait cert-manager /stratio/helm/cert-manager" + - " --set namespace=cert-manager" + - " --set cainjector.image.repository=" + keosRegistryUrl + "/jetstack/cert-manager-cainjector" + - " --set webhook.image.repository=" + keosRegistryUrl + "/jetstack/cert-manager-webhook" + - " --set acmesolver.image.repository=" + keosRegistryUrl + "/jetstack/cert-manager-acmesolver" + - " --set startupapicheck.image.repository=" + keosRegistryUrl + "/jetstack/cert-manager-ctl" + - " --set image.repository=" + keosRegistryUrl + "/jetstack/cert-manager-controller" - + " --namespace=cert-manager" + + " --create-namespace" + + " --values " + certManagerValuesFile if kubeconfigPath != "" { - c += " --kubeconfig " + kubeconfigPath + c = c + " --kubeconfig " + kubeconfigPath } - - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to deploy cert-manager Helm Chart") } return nil } + func (p *Provider) deployClusterOperator(n nodes.Node, privateParams PrivateParams, clusterCredentials commons.ClusterCredentials, keosRegistry KeosRegistry, clusterConfig *commons.ClusterConfig, kubeconfigPath string, firstInstallation bool, helmRepoCreds HelmRegistry) error { var c string var err error @@ -331,19 +488,19 @@ func (p *Provider) deployClusterOperator(n nodes.Node, privateParams PrivatePara if firstInstallation && keosCluster.Spec.InfraProvider == "aws" && strings.HasPrefix(keosCluster.Spec.HelmRepository.URL, "s3://") { c = "mkdir -p ~/.aws" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create aws config file") } c = "echo [default] > ~/.aws/config && " + "echo region = " + keosCluster.Spec.Region + " >> ~/.aws/config" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create aws config file") } awsCredentials := "[default]\naws_access_key_id = " + clusterCredentials.ProviderCredentials["AccessKey"] + "\naws_secret_access_key = " + clusterCredentials.ProviderCredentials["SecretKey"] + "\n" c = "echo '" + awsCredentials + "' > ~/.aws/credentials" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create aws credentials file") } @@ -373,7 +530,7 @@ func (p *Provider) deployClusterOperator(n nodes.Node, privateParams PrivatePara } // Write keoscluster file c = "echo '" + string(clusterConfigYAML) + "' > " + manifestsPath + "/clusterconfig.yaml" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to write the keoscluster file") } @@ -385,103 +542,127 @@ func (p *Provider) deployClusterOperator(n nodes.Node, privateParams PrivatePara } // Write keoscluster file c = "echo '" + string(keosClusterYAML) + "' > " + manifestsPath + "/keoscluster.yaml" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to write the keoscluster file") } // Add helm repository helmRepository.url = keosCluster.Spec.HelmRepository.URL - if strings.HasPrefix(keosCluster.Spec.HelmRepository.URL, "oci://") { + if strings.HasPrefix(helmRepository.url, "oci://") { stratio_helm_repo = helmRepoCreds.URL - urlLogin := strings.Split(strings.Split(keosCluster.Spec.HelmRepository.URL, "//")[1], "/")[0] - - c = "helm registry login " + urlLogin + " --username " + helmRepoCreds.User + " --password " + helmRepoCreds.Pass - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to add and authenticate to helm repository: "+helmRepoCreds.URL) - } - } else if keosCluster.Spec.HelmRepository.AuthRequired { - helmRepository.user = clusterCredentials.HelmRepositoryCredentials["User"] - helmRepository.pass = clusterCredentials.HelmRepositoryCredentials["Pass"] - stratio_helm_repo = "stratio-helm-repo" - c = "helm repo add " + stratio_helm_repo + " " + helmRepoCreds.URL + " --username " + helmRepoCreds.User + " --password " + helmRepoCreds.Pass - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to add and authenticate to helm repository: "+helmRepository.url) - } } else { stratio_helm_repo = "stratio-helm-repo" - c = "helm repo add " + stratio_helm_repo + " " + helmRepoCreds.URL - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to add helm repository: "+helmRepoCreds.URL) - } } if firstInstallation { // Pull cluster-operator helm chart c = "helm pull " + stratio_helm_repo + "/cluster-operator --version " + chartVersion + " --untar --untardir /stratio/helm" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to pull cluster-operator helm chart") } } - } - - // Create the docker registries credentials secret for keoscluster-controller-manager - if clusterCredentials.DockerRegistriesCredentials != nil && firstInstallation { - jsonDockerRegistriesCredentials, err := json.Marshal(clusterCredentials.DockerRegistriesCredentials) - if err != nil { - return errors.Wrap(err, "failed to marshal docker registries credentials") + // Create the docker registries credentials secret for keoscluster-controller-manager + if clusterCredentials.DockerRegistriesCredentials != nil && firstInstallation { + jsonDockerRegistriesCredentials, err := json.Marshal(clusterCredentials.DockerRegistriesCredentials) + if err != nil { + return errors.Wrap(err, "failed to marshal docker registries credentials") + } + c = "kubectl -n kube-system create secret generic keoscluster-registries --from-literal=credentials='" + string(jsonDockerRegistriesCredentials) + "'" + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create keoscluster-registries secret") + } + } + // Deploy cluster-operator chart + c = "helm install --wait cluster-operator /stratio/helm/cluster-operator" + + " --namespace kube-system" + + " --set provider=" + keosCluster.Spec.InfraProvider + + " --set app.containers.controllerManager.image.registry=" + keosRegistry.url + + " --set app.containers.controllerManager.image.repository=stratio/cluster-operator" + + " --set app.containers.controllerManager.imagePullSecrets.enabled=true" + if clusterOperatorImage != "" { + c += " --set app.containers.controllerManager.image.tag=" + clusterOperatorImage } - c = "kubectl -n kube-system create secret generic keoscluster-registries --from-literal=credentials='" + string(jsonDockerRegistriesCredentials) + "'" - if kubeconfigPath != "" { - c = c + " --kubeconfig " + kubeconfigPath + if privateParams.Private { + c += " --set app.containers.kubeRbacProxy.image=" + keosRegistry.url + "/stratio/kube-rbac-proxy:v0.13.1" } - _, err = commons.ExecuteCommand(n, c, 3, 5) + if keosCluster.Spec.InfraProvider == "azure" { + c += " --set secrets.azure.clientIDBase64=" + strings.Split(p.capxEnvVars[1], "AZURE_CLIENT_ID_B64=")[1] + + " --set secrets.azure.clientSecretBase64=" + strings.Split(p.capxEnvVars[0], "AZURE_CLIENT_SECRET_B64=")[1] + + " --set secrets.azure.subscriptionIDBase64=" + strings.Split(p.capxEnvVars[2], "AZURE_SUBSCRIPTION_ID_B64=")[1] + + " --set secrets.azure.tenantIDBase64=" + strings.Split(p.capxEnvVars[3], "AZURE_TENANT_ID_B64=")[1] + } else if keosCluster.Spec.InfraProvider == "gcp" { + c += " --set secrets.common.credentialsBase64=" + strings.Split(p.capxEnvVars[0], "GCP_B64ENCODED_CREDENTIALS=")[1] + } else if keosCluster.Spec.InfraProvider == "aws" { + c += " --set secrets.common.credentialsBase64=" + strings.Split(p.capxEnvVars[3], "AWS_B64ENCODED_CREDENTIALS=")[1] + } + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to create keoscluster-registries secret") + return errors.Wrap(err, "failed to deploy cluster-operator chart") } - } - - // Deploy cluster-operator chart - c = "helm install --wait cluster-operator /stratio/helm/cluster-operator" + - " --namespace kube-system" + - " --set provider=" + keosCluster.Spec.InfraProvider + - " --set app.containers.controllerManager.image.registry=" + keosRegistry.url + - " --set app.containers.controllerManager.image.repository=stratio/cluster-operator" - if clusterOperatorImage != "" { - c += " --set app.containers.controllerManager.image.tag=" + clusterOperatorImage - } - if privateParams.Private { - c += " --set app.containers.kubeRbacProxy.image=" + keosRegistry.url + "/stratio/kube-rbac-proxy:v0.13.1" - } - if keosCluster.Spec.InfraProvider == "azure" { - c += " --set secrets.azure.clientIDBase64=" + strings.Split(p.capxEnvVars[1], "AZURE_CLIENT_ID_B64=")[1] + - " --set secrets.azure.clientSecretBase64=" + strings.Split(p.capxEnvVars[0], "AZURE_CLIENT_SECRET_B64=")[1] + - " --set secrets.azure.subscriptionIDBase64=" + strings.Split(p.capxEnvVars[2], "AZURE_SUBSCRIPTION_ID_B64=")[1] + - " --set secrets.azure.tenantIDBase64=" + strings.Split(p.capxEnvVars[3], "AZURE_TENANT_ID_B64=")[1] - } else if keosCluster.Spec.InfraProvider == "gcp" { - c += " --set secrets.common.credentialsBase64=" + strings.Split(p.capxEnvVars[0], "GCP_B64ENCODED_CREDENTIALS=")[1] - } else if keosCluster.Spec.InfraProvider == "aws" { - c += " --set secrets.common.credentialsBase64=" + strings.Split(p.capxEnvVars[3], "AWS_B64ENCODED_CREDENTIALS=")[1] - } - if kubeconfigPath == "" { - c += " --set app.containers.controllerManager.imagePullSecrets.enabled=true" + - " --set app.containers.controllerManager.imagePullSecrets.name=regcred" } else { - c += " --set app.replicas=2" + " --kubeconfig " + kubeconfigPath - } + helmValuesClusterOperatorFile := "/kind/cluster-operator-helm-values.yaml" + c = "helm get values cluster-operator" + + " --namespace kube-system --all > " + + helmValuesClusterOperatorFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create cluster-operator helm values file") + } - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to deploy cluster-operator chart") + // Read the YAML file + c = "cat " + helmValuesClusterOperatorFile + helmValuesClusterOperatorData, err := commons.ExecuteCommand(n, c, 5, 3) + if err != nil || helmValuesClusterOperatorData == "" { + return errors.Wrap(err, "failed to read HelmRelease values file") + } + // Unmarshal YAML data into a map + var helmReleaseValues map[string]interface{} + if err := yaml.Unmarshal([]byte(helmValuesClusterOperatorData), &helmReleaseValues); err != nil { + return errors.Wrap(err, "failed to unmarshal HelmRelease values file") + } + + // Convert app field to map[string]interface{} + appValues, _ := helmReleaseValues["app"].(map[string]interface{}) + + // Update the app.replicas + appValues["replicas"] = 2 + // Update the app.containers.controllerManager.imagePullSecrets.enabled + nested := appValues["containers"].(map[string]interface{}) + nested = nested["controllerManager"].(map[string]interface{}) + nested = nested["imagePullSecrets"].(map[string]interface{}) + nested["enabled"] = false + // Update the 'app' field in the original map + helmReleaseValues["app"] = appValues + // Marshal the updated data back to YAML + updatedHelmValuesClusterOperatorData, err := yaml.Marshal(&helmReleaseValues) + if err != nil { + return errors.Wrap(err, "failed to marshal updated HelmRelease values content") + } + // Write the updated YAML data back to the file + c = "echo '" + string(updatedHelmValuesClusterOperatorData) + "' > " + helmValuesClusterOperatorFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to write updated HelmRelease values file") + } + + clusterOperatorHelmReleaseParams := fluxHelmReleaseParams{ + ChartName: "cluster-operator", + ChartNamespace: "kube-system", + ChartRepoRef: "keos", + ChartVersion: chartVersion, + } + // Create Helm release using the fluxHelmReleaseParams + if err := configureHelmRelease(n, kubeconfigPath, "flux2_helmrelease.tmpl", clusterOperatorHelmReleaseParams, privateParams.KeosCluster.Spec.HelmRepository); err != nil { + return err + } } // Wait for cluster-operator deployment c = "kubectl -n kube-system rollout status deploy/keoscluster-controller-manager --timeout=3m" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to wait for cluster-operator deployment") } @@ -492,13 +673,13 @@ func (p *Provider) deployClusterOperator(n nodes.Node, privateParams PrivatePara return nil } -func installCalico(n nodes.Node, k string, privateParams PrivateParams, allowCommonEgressNetPolPath string, isNetPolEngine bool) error { +func installCalico(n nodes.Node, k string, privateParams PrivateParams, isNetPolEngine bool, dryRun bool) error { var c string var cmd exec.Cmd var err error keosCluster := privateParams.KeosCluster - calicoTemplate := "/kind/calico-helm-values.yaml" + calicoTemplate := "/kind/tigera-operator-helm-values.yaml" calicoHelmParams := calicoHelmParams{ Spec: keosCluster.Spec, @@ -509,56 +690,344 @@ func installCalico(n nodes.Node, k string, privateParams PrivateParams, allowCom postInstallAnnotation: "var-lib-calico", }, } + // Generate the calico helm values - calicoHelmValues, err := getManifest("common", "calico-helm-values.tmpl", calicoHelmParams) + calicoHelmValues, err := getManifest("common", "tigera-operator-helm-values.tmpl", majorVersion, calicoHelmParams) if err != nil { return errors.Wrap(err, "failed to generate calico helm values") } c = "echo '" + calicoHelmValues + "' > " + calicoTemplate - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create Calico Helm chart values file") } - c = "helm install calico /stratio/helm/tigera-operator" + + if !dryRun { + c = "helm install tigera-operator /stratio/helm/tigera-operator" + + " --kubeconfig " + k + + " --namespace tigera-operator" + + " --create-namespace" + + " --values " + calicoTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to deploy Calico Helm Chart") + } + + // Wait for calico-system namespace to be created + c = "kubectl --kubeconfig " + kubeconfigPath + " get ns calico-system" + _, err = commons.ExecuteCommand(n, c, 20, 5) + if err != nil { + return errors.Wrap(err, "failed to wait for calico-system namespace") + } + + // Create calico metrics services + cmd = n.Command("kubectl", "--kubeconfig", k, "apply", "-f", "-") + if err = cmd.SetStdin(strings.NewReader(calicoMetrics)).Run(); err != nil { + return errors.Wrap(err, "failed to create calico metrics services") + } + } + return nil +} + +func deployClusterAutoscaler(n nodes.Node, chartsList map[string]commons.ChartEntry, privateParams PrivateParams, capiClustersNamespace string, moveManagement bool) error { + helmValuesCAFile := "/kind/cluster-autoscaler-helm-values.yaml" + clusterAutoscalerEntry := chartsList["cluster-autoscaler"] + clusterAutoscalerHelmReleaseParams := fluxHelmReleaseParams{ + ChartRepoRef: "keos", + ChartName: "cluster-autoscaler", + ChartNamespace: clusterAutoscalerEntry.Namespace, + ChartVersion: clusterAutoscalerEntry.Version, + } + if !privateParams.HelmPrivate { + clusterAutoscalerHelmReleaseParams.ChartRepoRef = "cluster-autoscaler" + } + + helmValuesCA, err := getManifest("common", "cluster-autoscaler-helm-values.tmpl", majorVersion, privateParams) + if err != nil { + return errors.Wrap(err, "failed to get CA helm values") + } + c := "echo '" + helmValuesCA + "' > " + helmValuesCAFile + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create CA helm values file") + } + // Create Helm release using the fluxHelmReleaseParams + if err := configureHelmRelease(n, kubeconfigPath, "flux2_helmrelease.tmpl", clusterAutoscalerHelmReleaseParams, privateParams.KeosCluster.Spec.HelmRepository); err != nil { + return err + } + if !moveManagement { + autoscalerRBACPath := "/kind/autoscaler_rbac.yaml" + + autoscalerRBAC, err := getManifest("common", "autoscaler_rbac.tmpl", "", privateParams.KeosCluster) + if err != nil { + return errors.Wrap(err, "failed to get CA RBAC file") + } + + c = "echo '" + autoscalerRBAC + "' > " + autoscalerRBACPath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create CA RBAC file") + } + + // Create namespace for CAPI clusters (it must exists) in worker cluster + c = "kubectl --kubeconfig " + kubeconfigPath + " create ns " + capiClustersNamespace + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create manifests Namespace") + } + + c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + autoscalerRBACPath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to apply CA RBAC") + } + } + return nil +} + +func configureFlux(n nodes.Node, k string, privateParams PrivateParams, helmRepoCreds HelmRegistry, keosClusterSpec commons.KeosSpec, chartsList map[string]commons.ChartEntry) error { + var c string + var err error + + fluxTemplate := "/kind/flux2-helm-values.yaml" + keosChartRepoScheme := "default" + chartRepoScheme := "default" + + fluxHelmParams := commonHelmParams{ + KeosRegUrl: privateParams.KeosRegUrl, + Private: privateParams.Private, + } + + // Make flux work after capz-nmi deployment in Azure + if keosClusterSpec.InfraProvider == "azure" { + azureFlux2PodIdentityExceptionPath := "/kind/flux2_azurepodidentityexception.yaml" + c := "echo \"" + azureFlux2PodIdentityException + "\" > " + azureFlux2PodIdentityExceptionPath + _, err := commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to write the flux2 azure pod identity exception") + } + // Apply HelmRepository + c = "kubectl --kubeconfig " + k + " apply -f " + azureFlux2PodIdentityExceptionPath + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to deploy Flux2 azure pod identity exception") + } + } + + // Generate the flux helm values + fluxHelmValues, err := getManifest("common", "flux2-helm-values.tmpl", majorVersion, fluxHelmParams) + + if err != nil { + return errors.Wrap(err, "failed to generate flux helm values") + } + + c = "echo '" + fluxHelmValues + "' > " + fluxTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create Flux Helm chart values file") + } + + c = "helm install flux2 /stratio/helm/flux2" + " --kubeconfig " + k + - " --namespace tigera-operator" + + " --namespace kube-system" + " --create-namespace" + - " --values " + calicoTemplate - _, err = commons.ExecuteCommand(n, c, 3, 5) + " --values " + fluxTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to deploy Calico Helm Chart") + return errors.Wrap(err, "failed to deploy Flux Helm Chart") + } + + // Set repository scheme for private case + if strings.HasPrefix(helmRepoCreds.URL, "oci://") { + keosChartRepoScheme = "oci" } - // Allow egress in tigera-operator namespace - c = "kubectl --kubeconfig " + kubeconfigPath + " -n tigera-operator apply -f " + allowCommonEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + var helmRepositoryInterval = "10m" + + // Create fluxHelmRepositoryParams for the private case + fluxHelmRepositoryParams := fluxHelmRepositoryParams{ + ChartName: "keos", + ChartRepoUrl: helmRepoCreds.URL, + ChartRepoScheme: keosChartRepoScheme, + Spec: keosClusterSpec, + HelmRepoCreds: helmRepoCreds, + RepositoryInterval: helmRepositoryInterval, + } + + if fluxHelmRepositoryParams.ChartName == "keos" && keosClusterSpec.HelmRepository.RepositoryInterval != "" { + fluxHelmRepositoryParams.RepositoryInterval = keosClusterSpec.HelmRepository.RepositoryInterval + } + + // Create Helm repository using the fluxHelmRepositoryParams + if err := configureHelmRepository(n, k, "flux2_helmrepository.tmpl", fluxHelmRepositoryParams); err != nil { + return err + } + + // Update fluxHelmRepositoryParams if not private + if !privateParams.HelmPrivate { + // Iterate through charts and create Helm repositories and releases + for name, entry := range chartsList { + if entry.Repository != "default" { + // Set repository scheme if it's oci + if strings.HasPrefix(entry.Repository, "oci://") { + chartRepoScheme = "oci" + } + + // Update fluxHelmRepositoryParams if not private + fluxHelmRepositoryParams.ChartName = name + fluxHelmRepositoryParams.ChartRepoScheme = chartRepoScheme + fluxHelmRepositoryParams.ChartRepoUrl = entry.Repository + + // Create Helm repository using the fluxHelmRepositoryParams + if err := configureHelmRepository(n, k, "flux2_helmrepository.tmpl", fluxHelmRepositoryParams); err != nil { + return err + } + } + } + } + return nil +} + +func reconcileCharts(n nodes.Node, k string, privateParams PrivateParams, keosClusterSpec commons.KeosSpec, chartsList map[string]commons.ChartEntry) error { + // Iterate through charts and create Helm repositories and releases + for name, entry := range chartsList { + // Create fluxHelmReleaseParams for the current entry + fluxHelmReleaseParams := fluxHelmReleaseParams { + ChartRepoRef: "keos", + } + // Update fluxHelmRepositoryParams if not private + if !privateParams.HelmPrivate && entry.Repository != "default" { + fluxHelmReleaseParams.ChartRepoRef = name + } + + fluxAdoptedCharts := regexp.MustCompile(`^(tigera-operator|.+-cloud-controller-manager|cloud-provider-azure|flux2|cert-manager)$`) + + // Adopt helm charts already deployed: tigera-operator and cloud-provider + if fluxAdoptedCharts.MatchString(name) { + fluxHelmReleaseParams.ChartName = name + fluxHelmReleaseParams.ChartNamespace = entry.Namespace + fluxHelmReleaseParams.ChartVersion = entry.Version + // tigera-operator-helm-values.yaml is required to install Calico as Network Policy engine + + if err := configureHelmRelease(n, k, "flux2_helmrelease.tmpl", fluxHelmReleaseParams, keosClusterSpec.HelmRepository); err != nil { + return err + } + } + } + return nil +} + +func configureHelmRepository(n nodes.Node, k string, templatePath string, params fluxHelmRepositoryParams) error { + // Generate HelmRepository manifest + fluxHelmRepository, err := getManifest("common", templatePath, majorVersion, params) + if err != nil { + return errors.Wrap(err, "failed to generate "+params.ChartName+" HelmRepository") + } + + // Write HelmRepository manifest to file + fluxHelmRepositoryTemplate := "/kind/" + params.ChartName + "_helmrepository.yaml" + c := "echo '" + fluxHelmRepository + "' > " + fluxHelmRepositoryTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create "+params.ChartName+" Flux HelmRepository file") + } + + // Apply HelmRepository + c = "kubectl --kubeconfig " + k + " apply -f " + fluxHelmRepositoryTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to deploy "+params.ChartName+" Flux HelmRepository") + } + return nil +} + +func configureHelmRelease(n nodes.Node, k string, templatePath string, params fluxHelmReleaseParams, helmRepository commons.HelmRepository) error { + valuesFile := "/kind/" + params.ChartName + "-helm-values.yaml" + + // Create default HelmRelease configmap + c := "kubectl --kubeconfig " + kubeconfigPath + " " + + "-n " + params.ChartNamespace + " create configmap " + + "00-" + params.ChartName + "-helm-chart-default-values " + + "--from-file=values.yaml=" + valuesFile + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to apply tigera-operator egress NetworkPolicy") + return errors.Wrap(err, "failed to deploy "+params.ChartName+" HelmRelease default configuration map") } - // Wait for calico-system namespace to be created - c = "kubectl --kubeconfig " + kubeconfigPath + " get ns calico-system" - _, err = commons.ExecuteCommand(n, c, 3, 30) + // Create override HelmRelease configmap + c = "kubectl --kubeconfig " + kubeconfigPath + " " + + "-n " + params.ChartNamespace + " create configmap " + + "01-" + params.ChartName + "-helm-chart-override-values " + + "--from-literal=values.yaml=\"\"" + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { - return errors.Wrap(err, "failed to wait for calico-system namespace") + return errors.Wrap(err, "failed to deploy "+params.ChartName+" HelmRelease override configmap") } - // Allow egress in calico-system namespace - c = "kubectl --kubeconfig " + kubeconfigPath + " -n calico-system apply -f " + allowCommonEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + var defaultHelmReleaseInterval = "1m" + var defaultHelmReleaseRetries = 3 + var defaultHelmReleaseSourceInterval = "1m" + + completedfluxHelmReleaseParams := struct { + ChartName string + ChartNamespace string + ChartRepoRef string + ChartVersion string + HelmReleaseInterval string + HelmReleaseRetries int + HelmReleaseSourceInterval string + }{ + ChartName: params.ChartName, + ChartNamespace: params.ChartNamespace, + ChartRepoRef: params.ChartRepoRef, + ChartVersion: params.ChartVersion, + HelmReleaseInterval: defaultHelmReleaseInterval, + HelmReleaseRetries: defaultHelmReleaseRetries, + HelmReleaseSourceInterval: defaultHelmReleaseSourceInterval, + } + + if completedfluxHelmReleaseParams.ChartRepoRef == "keos" { + if helmRepository.ReleaseInterval != "" { + completedfluxHelmReleaseParams.HelmReleaseInterval = helmRepository.ReleaseInterval + } + if helmRepository.ReleaseSourceInterval != "" { + completedfluxHelmReleaseParams.HelmReleaseSourceInterval = helmRepository.ReleaseSourceInterval + } + if helmRepository.ReleaseRetries != nil { + completedfluxHelmReleaseParams.HelmReleaseRetries = *helmRepository.ReleaseRetries + } + } + + // Generate HelmRelease manifest + fluxHelmHelmRelease, err := getManifest("common", templatePath, majorVersion, completedfluxHelmReleaseParams) if err != nil { - return errors.Wrap(err, "failed to apply calico-system egress NetworkPolicy") + return errors.Wrap(err, "failed to generate "+params.ChartName+" HelmHelmRelease") } - // Create calico metrics services - cmd = n.Command("kubectl", "--kubeconfig", k, "apply", "-f", "-") - if err = cmd.SetStdin(strings.NewReader(calicoMetrics)).Run(); err != nil { - return errors.Wrap(err, "failed to create calico metrics services") + // Write HelmHelmRelease manifest to file + fluxHelmHelmReleaseTemplate := "/kind/" + params.ChartName + "_helmrelease.yaml" + c = "echo '" + fluxHelmHelmRelease + "' > " + fluxHelmHelmReleaseTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to create "+params.ChartName+" Flux HelmHelmRelease file") } + // Apply HelmHelmRelease + c = "kubectl --kubeconfig " + k + " apply -f " + fluxHelmHelmReleaseTemplate + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to deploy "+params.ChartName+" Flux HelmHelmRelease") + } + // Wait for HelmRelease to become ready + c = "kubectl --kubeconfig " + kubeconfigPath + " " + + "-n " + params.ChartNamespace + " wait helmrelease/" + params.ChartName + + " --for=condition=ready --timeout=5m" + _, err = commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to wait for "+params.ChartName+" HelmRelease to become ready") + } return nil } @@ -575,34 +1044,34 @@ func customCoreDNS(n nodes.Node, keosCluster commons.KeosCluster) error { coreDNSSuffix = "-aks" } - coreDNSConfigmap, err := getManifest(keosCluster.Spec.InfraProvider, "coredns-patch_configmap"+coreDNSSuffix+".tmpl", keosCluster.Spec) + coreDNSConfigmap, err := getManifest(keosCluster.Spec.InfraProvider, "coredns_configmap"+coreDNSSuffix+".tmpl", "", keosCluster.Spec) if err != nil { return errors.Wrap(err, "failed to get CoreDNS file") } c = "echo '" + coreDNSConfigmap + "' > " + coreDNSTemplate - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create CoreDNS configmap file") } // Patch configmap c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system patch cm " + coreDNSPatchFile + " --patch-file " + coreDNSTemplate - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to customize coreDNS patching ConfigMap") } // Rollout restart to catch the made changes c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system rollout restart deploy coredns" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to redeploy coreDNS") } // Wait until CoreDNS completely rollout c = "kubectl --kubeconfig " + kubeconfigPath + " -n kube-system rollout status deploy coredns --timeout=3m" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to wait for the customatization of CoreDNS configmap") } @@ -611,7 +1080,7 @@ func customCoreDNS(n nodes.Node, keosCluster commons.KeosCluster) error { } // installCAPXWorker installs CAPX in the worker cluster -func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosCluster, kubeconfigPath string, allowAllEgressNetPolPath string) error { +func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosCluster, kubeconfigPath string) error { var c string var err error @@ -620,7 +1089,7 @@ func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosClust if p.capxProvider == "azure" { // Create capx namespace c = "kubectl --kubeconfig " + kubeconfigPath + " create namespace " + p.capxName + "-system" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create CAPx namespace") } @@ -632,7 +1101,7 @@ func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosClust c := fmt.Sprintf( "kubectl --kubeconfig %s -n %s create secret generic cluster-identity-secret --from-literal=clientSecret='%s'", kubeconfigPath, namespace, clientSecret) - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create CAPx secret") } @@ -644,7 +1113,7 @@ func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosClust " --bootstrap " + CAPIBootstrapProvider + ":" + CAPIVersion + " --control-plane " + CAPIControlPlaneProvider + ":" + CAPIVersion + " --infrastructure " + p.capxProvider + ":" + p.capxVersion - _, err = commons.ExecuteCommand(n, c, 3, 5, p.capxEnvVars) + _, err = commons.ExecuteCommand(n, c, 5, 3, p.capxEnvVars) if err != nil { return errors.Wrap(err, "failed to install CAPX in workload cluster") } @@ -657,17 +1126,17 @@ func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosClust Namespace string }{{"capi", "capi-system"}, {p.capxName, p.capxName + "-system"}} for _, d := range deploys { - resourceQuota, err := getManifest("gcp", "resourcequota.tmpl", d) + resourceQuota, err := getManifest("gcp", "resourcequota.tmpl", majorVersion, d) if err != nil { return errors.Wrap(err, "failed to get ResourceQuota template") } c = "echo '" + resourceQuota + "' > " + resourceQuotaPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to save ResourceQuota manifest") } c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + resourceQuotaPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to apply ResourceQuota manifest") } @@ -676,18 +1145,19 @@ func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosClust // Manually assign PriorityClass to capx service c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system get deploy " + p.capxName + "-controller-manager -o jsonpath='{.spec.template.spec.priorityClassName}'" - priorityClassName, err := commons.ExecuteCommand(n, c, 3, 5) + priorityClassName, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to get priorityClass for "+p.capxName+"-controller-manager") } + if priorityClassName != "system-node-critical" { c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system patch deploy " + p.capxName + "-controller-manager -p '{\"spec\": {\"template\": {\"spec\": {\"priorityClassName\": \"system-node-critical\"}}}}' --type=merge" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to assigned priorityClass to "+p.capxName+"-controller-manager") } c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system rollout status deploy " + p.capxName + "-controller-manager --timeout 60s" - _, err = commons.ExecuteCommand(n, c, 3, 30) + _, err = commons.ExecuteCommand(n, c, 30, 3) if err != nil { return errors.Wrap(err, "failed to check rollout status for "+p.capxName+"-controller-manager") } @@ -695,45 +1165,38 @@ func (p *Provider) installCAPXWorker(n nodes.Node, keosCluster commons.KeosClust // Scale CAPX to 2 replicas c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system scale --replicas 2 deploy " + p.capxName + "-controller-manager" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to scale CAPX in workload cluster") } c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system rollout status deploy " + p.capxName + "-controller-manager --timeout 60s" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to check rollout status for "+p.capxName+"-controller-manager") } // Define PodDisruptionBudget for capx services - capxPDB, err := getManifest("common", "capx_pdb.tmpl", keosCluster.Spec) + capxPDB, err := getManifest("common", "capx_pdb.tmpl", "", keosCluster.Spec) if err != nil { return errors.Wrap(err, "failed to get PodDisruptionBudget file") } c = "echo '" + capxPDB + "' > " + capxPDBPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create PodDisruptionBudget file") } c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + capxPDBPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to apply "+p.capxName+" PodDisruptionBudget") } - // Allow egress in CAPX's Namespace - c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system apply -f " + allowAllEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to apply CAPX's NetworkPolicy in workload cluster") - } - return nil } -func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluster, kubeconfigPath string, allowCommonEgressNetPolPath string) error { +func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluster, kubeconfigPath string) error { var c string var err error var capiKubeadmReplicas int @@ -761,7 +1224,7 @@ func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluste for _, deployment := range capiDeployments { if !p.capxManaged || (p.capxManaged && !allowedNamePattern.MatchString(deployment.name)) { c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + deployment.namespace + " patch deploy " + deployment.name + " -p '{\"spec\": {\"template\": {\"spec\": {\"priorityClassName\": \"system-node-critical\"}}}}' --type=merge" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to assigned priorityClass to "+deployment.name) } @@ -771,12 +1234,12 @@ func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluste // Manually assign PriorityClass to nmi if p.capxProvider == "azure" { c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system patch ds capz-nmi -p '{\"spec\": {\"template\": {\"spec\": {\"priorityClassName\": \"system-node-critical\"}}}}' --type=merge" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to assigned priorityClass to nmi") } c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + p.capxName + "-system rollout status ds capz-nmi --timeout 90s" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to check rollout status for nmi") } @@ -784,12 +1247,11 @@ func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluste // Scale number of replicas to 2 for capi service c = "kubectl --kubeconfig " + kubeconfigPath + " -n capi-system scale deploy capi-controller-manager --replicas 2" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to scale the CAPI Deployment") } - c = "kubectl --kubeconfig " + kubeconfigPath + " -n capi-system rollout status deploy capi-controller-manager --timeout 60s" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to check rollout status for capi-controller-manager") } @@ -798,12 +1260,12 @@ func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluste for _, deployment := range capiDeployments { if deployment.name != "capi-controller-manager" { c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + deployment.namespace + " scale --replicas " + strconv.Itoa(capiKubeadmReplicas) + " deploy " + deployment.name - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to scale the "+deployment.name+" deployment") } c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + deployment.namespace + " rollout status deploy " + deployment.name + " --timeout 60s" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to check rollout status for "+deployment.name) } @@ -811,38 +1273,21 @@ func (p *Provider) configCAPIWorker(n nodes.Node, keosCluster commons.KeosCluste } // Define PodDisruptionBudget for capi services - capiPDB, err := getManifest("common", "capi_pdb.tmpl", keosCluster.Spec) + capiPDB, err := getManifest("common", "capi_pdb.tmpl", "", keosCluster.Spec) if err != nil { return errors.Wrap(err, "failed to get PodDisruptionBudget file") } c = "echo '" + capiPDB + "' > " + capiPDBPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create PodDisruptionBudget file") } c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + capiPDBPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to apply "+p.capxName+" PodDisruptionBudget") - } - - // Allow egress in CAPI's Namespaces - for _, deployment := range capiDeployments { - if !p.capxManaged || (p.capxManaged && !allowedNamePattern.MatchString(deployment.name)) { - c = "kubectl --kubeconfig " + kubeconfigPath + " -n " + deployment.namespace + " apply -f " + allowCommonEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) - if err != nil { - return errors.Wrap(err, "failed to apply CAPI's egress NetworkPolicy in namespace "+deployment.namespace) - } - } - } + _, err = commons.ExecuteCommand(n, c, 5, 3) - // Allow egress in cert-manager Namespace - c = "kubectl --kubeconfig " + kubeconfigPath + " -n cert-manager apply -f " + allowCommonEgressNetPolPath - _, err = commons.ExecuteCommand(n, c, 3, 5) if err != nil { - return errors.Wrap(err, "failed to apply cert-manager's NetworkPolicy") + return errors.Wrap(err, "failed to apply "+p.capxName+" PodDisruptionBudget") } return nil @@ -856,7 +1301,7 @@ func (p *Provider) installCAPXLocal(n nodes.Node) error { if p.capxProvider == "azure" { // Create capx namespace c = "kubectl create namespace " + p.capxName + "-system" - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create CAPx namespace") } @@ -869,7 +1314,7 @@ func (p *Provider) installCAPXLocal(n nodes.Node) error { "kubectl -n %s create secret generic cluster-identity-secret "+ "--from-literal=clientSecret='%s' ", namespace, clientSecret) - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create CAPx secret") } @@ -880,7 +1325,7 @@ func (p *Provider) installCAPXLocal(n nodes.Node) error { " --bootstrap " + CAPIBootstrapProvider + ":" + CAPIVersion + " --control-plane " + CAPIControlPlaneProvider + ":" + CAPIVersion + " --infrastructure " + p.capxProvider + ":" + p.capxVersion - _, err = commons.ExecuteCommand(n, c, 3, 5, p.capxEnvVars) + _, err = commons.ExecuteCommand(n, c, 5, 3, p.capxEnvVars) if err != nil { return errors.Wrap(err, "failed to install CAPX in local cluster") } @@ -901,10 +1346,12 @@ func enableSelfHealing(n nodes.Node, keosCluster commons.KeosCluster, namespace } } - generateMHCManifest(n, keosCluster.Metadata.Name, namespace, machineHealthCheckControlPlaneNodePath, machineRole, controlplane_maxunhealty) - + err = generateMHCManifest(n, keosCluster.Metadata.Name, namespace, machineHealthCheckControlPlaneNodePath, machineRole, controlplane_maxunhealty) + if err != nil { + return errors.Wrap(err, "failed to create the MachineHealthCheck manifest") + } c = "kubectl -n " + namespace + " apply -f " + machineHealthCheckControlPlaneNodePath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to apply the MachineHealthCheck manifest") } @@ -917,10 +1364,12 @@ func enableSelfHealing(n nodes.Node, keosCluster commons.KeosCluster, namespace workernode_maxunhealty = *clusterConfig.Spec.WorkersConfig.MaxUnhealthy } } - generateMHCManifest(n, keosCluster.Metadata.Name, namespace, machineHealthCheckWorkerNodePath, machineRole, workernode_maxunhealty) - + err = generateMHCManifest(n, keosCluster.Metadata.Name, namespace, machineHealthCheckWorkerNodePath, machineRole, workernode_maxunhealty) + if err != nil { + return errors.Wrap(err, "failed to create the MachineHealthCheck manifest") + } c = "kubectl -n " + namespace + " apply -f " + machineHealthCheckWorkerNodePath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to apply the MachineHealthCheck manifest") } @@ -955,7 +1404,7 @@ spec: timeout: 180s` c = "echo \"" + machineHealthCheck + "\" > " + manifestPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to write the MachineHealthCheck manifest") } @@ -963,8 +1412,11 @@ spec: return nil } -func getManifest(parentPath string, name string, params interface{}) (string, error) { - templatePath := filepath.Join("templates", parentPath, name) +func getManifest(parentPath string, name string, majorVersion string, params interface{}) (string, error) { + templatePath := filepath.Join("templates", parentPath, majorVersion, name) + if majorVersion == "" { + templatePath = filepath.Join("templates", parentPath, name) + } var tpl bytes.Buffer t, err := template.New("").ParseFS(ctel, templatePath) @@ -981,7 +1433,7 @@ func getManifest(parentPath string, name string, params interface{}) (string, er func patchDeploy(n nodes.Node, k string, ns string, deployName string, patch string) error { c := "kubectl --kubeconfig " + k + " patch deploy -n " + ns + " " + deployName + " -p '" + patch + "'" - _, err := commons.ExecuteCommand(n, c, 3, 5) + _, err := commons.ExecuteCommand(n, c, 5, 3) if err != nil { return err } @@ -990,7 +1442,7 @@ func patchDeploy(n nodes.Node, k string, ns string, deployName string, patch str func rolloutStatus(n nodes.Node, k string, ns string, deployName string) error { c := "kubectl --kubeconfig " + k + " rollout status deploy -n " + ns + " " + deployName + " --timeout=5m" - _, err := commons.ExecuteCommand(n, c, 3, 5) + _, err := commons.ExecuteCommand(n, c, 5, 3) return err } @@ -1004,15 +1456,101 @@ func installCorednsPdb(n nodes.Node) error { } c := "echo \"" + corednsPDB + "\" > " + corednsPdbPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to create coredns PodDisruptionBudget file") } c = "kubectl --kubeconfig " + kubeconfigPath + " apply -f " + corednsPdbPath - _, err = commons.ExecuteCommand(n, c, 3, 5) + _, err = commons.ExecuteCommand(n, c, 5, 3) if err != nil { return errors.Wrap(err, "failed to apply coredns PodDisruptionBudget") } return nil } + +func pullCharts(n nodes.Node, charts map[string]commons.ChartEntry, keosSpec commons.KeosSpec, clusterCredentials commons.ClusterCredentials) error { + for name, chart := range charts { + // Set default repository if needed + if chart.Repository == "default" { + chart.Repository = keosSpec.HelmRepository.URL + } + // Check if the chart needs to be pulled + if chart.Pull { + var c string + if strings.HasPrefix(chart.Repository, "oci://") { + c = "helm pull " + chart.Repository + "/" + name + " --version " + chart.Version + " --untar --untardir /stratio/helm" + } else { + c = "helm pull " + name + " --version " + chart.Version + " --repo " + chart.Repository + " --untar --untardir /stratio/helm" + } + // Add authentication if required + if chart.Repository == keosSpec.HelmRepository.URL && keosSpec.HelmRepository.AuthRequired { + if keosSpec.HelmRepository.AuthRequired { + c = c + " --username " + clusterCredentials.HelmRepositoryCredentials["User"] + " --password " + clusterCredentials.HelmRepositoryCredentials["Pass"] + } + } + // Execute the command + _, err := commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to pull the helm chart: "+fmt.Sprint(chart)) + } + } + } + return nil +} + +func loginHelmRepo(n nodes.Node, keosCluster commons.KeosCluster, clusterCredentials commons.ClusterCredentials, helmRepoCreds *HelmRegistry, infra *Infra, providerParams ProviderParams) error { + + var helmRepository helmRepository + var err error + + helmRepoCreds.Type = keosCluster.Spec.HelmRepository.Type + helmRepoCreds.URL = keosCluster.Spec.HelmRepository.URL + if keosCluster.Spec.HelmRepository.Type != "generic" { + urlLogin := strings.Split(strings.Split(helmRepoCreds.URL, "//")[1], "/")[0] + helmRepoCreds.User, helmRepoCreds.Pass, err = infra.getRegistryCredentials(providerParams, urlLogin) + if err != nil { + return errors.Wrap(err, "failed to get helm registry credentials") + } + } else { + helmRepoCreds.User = clusterCredentials.HelmRepositoryCredentials["User"] + helmRepoCreds.Pass = clusterCredentials.HelmRepositoryCredentials["Pass"] + } + + if strings.HasPrefix(keosCluster.Spec.HelmRepository.URL, "oci://") { + stratio_helm_repo = helmRepoCreds.URL + urlLogin := strings.Split(strings.Split(keosCluster.Spec.HelmRepository.URL, "//")[1], "/")[0] + + c := "helm registry login " + urlLogin + " --username " + helmRepoCreds.User + " --password " + helmRepoCreds.Pass + _, err := commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to add and authenticate to helm repository: "+helmRepoCreds.URL) + } + } else if keosCluster.Spec.HelmRepository.AuthRequired { + helmRepository.user = clusterCredentials.HelmRepositoryCredentials["User"] + helmRepository.pass = clusterCredentials.HelmRepositoryCredentials["Pass"] + stratio_helm_repo = "stratio-helm-repo" + c := "helm repo add " + stratio_helm_repo + " " + helmRepoCreds.URL + " --username " + helmRepoCreds.User + " --password " + helmRepoCreds.Pass + _, err := commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to add and authenticate to helm repository: "+helmRepository.url) + } + } else { + stratio_helm_repo = "stratio-helm-repo" + c := "helm repo add " + stratio_helm_repo + " " + helmRepoCreds.URL + _, err := commons.ExecuteCommand(n, c, 5, 3) + if err != nil { + return errors.Wrap(err, "failed to add helm repository: "+helmRepoCreds.URL) + } + } + return nil +} + +func getChartVersion(charts []commons.Chart, chartName string) string { + for _, chart := range charts { + if chart.Name == chartName { + return chart.Version + } + } + return "" +} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-cloud-controller-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-cloud-controller-manager-helm-values.tmpl new file mode 100644 index 0000000000..7c8dbf5e85 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-cloud-controller-manager-helm-values.tmpl @@ -0,0 +1,9 @@ +args: +- --v=2 +- --cloud-provider=aws +- --cluster-cidr={{ $.PodsCidr }} +- --cluster-name={{ $.ClusterName }} + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}registry.k8s.io{{ end }}/provider-aws/cloud-controller-manager + diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-ebs-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-ebs-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..a1f0e4fc71 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-ebs-csi-driver-helm-values.tmpl @@ -0,0 +1,31 @@ +# Default values for aws-ebs-csi-driver. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/ebs-csi-driver/aws-ebs-csi-driver + + +sidecars: + provisioner: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-provisioner + attacher: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-attacher + snapshotter: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter + livenessProbe: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/livenessprobe + resizer: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-resizer + nodeDriverRegistrar: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/node-driver-registrar + volumemodifier: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/ebs-csi-driver/volume-modifier-for-k8s + \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-load-balancer-controller-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-load-balancer-controller-helm-values.tmpl new file mode 100644 index 0000000000..d6e8bdda5d --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/28/aws-load-balancer-controller-helm-values.tmpl @@ -0,0 +1,2 @@ +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks/aws-load-balancer-controller diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-cloud-controller-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-cloud-controller-manager-helm-values.tmpl new file mode 100644 index 0000000000..c15c61c36b --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-cloud-controller-manager-helm-values.tmpl @@ -0,0 +1,11 @@ +args: +- --v=2 +- --cloud-provider=aws +- --cluster-cidr={{ $.PodsCidr }} +- --cluster-name={{ $.ClusterName }} + +hostNetworking: true + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}registry.k8s.io{{ end }}/provider-aws/cloud-controller-manager + diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-ebs-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-ebs-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..a1f0e4fc71 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-ebs-csi-driver-helm-values.tmpl @@ -0,0 +1,31 @@ +# Default values for aws-ebs-csi-driver. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/ebs-csi-driver/aws-ebs-csi-driver + + +sidecars: + provisioner: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-provisioner + attacher: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-attacher + snapshotter: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter + livenessProbe: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/livenessprobe + resizer: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-resizer + nodeDriverRegistrar: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/node-driver-registrar + volumemodifier: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/ebs-csi-driver/volume-modifier-for-k8s + \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-load-balancer-controller-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-load-balancer-controller-helm-values.tmpl new file mode 100644 index 0000000000..d6e8bdda5d --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/29/aws-load-balancer-controller-helm-values.tmpl @@ -0,0 +1,2 @@ +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks/aws-load-balancer-controller diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-cloud-controller-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-cloud-controller-manager-helm-values.tmpl new file mode 100644 index 0000000000..826c67c7e8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-cloud-controller-manager-helm-values.tmpl @@ -0,0 +1,12 @@ +args: +- --v=2 +- --cloud-provider=aws +- --cluster-cidr={{ $.PodsCidr }} +- --cluster-name={{ $.ClusterName }} + +# https://github.com/projectcalico/calico/issues/8453 +hostNetworking: true + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}registry.k8s.io{{ end }}/provider-aws/cloud-controller-manager + tag: v1.30.1 \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-ebs-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-ebs-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..a1f0e4fc71 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-ebs-csi-driver-helm-values.tmpl @@ -0,0 +1,31 @@ +# Default values for aws-ebs-csi-driver. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/ebs-csi-driver/aws-ebs-csi-driver + + +sidecars: + provisioner: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-provisioner + attacher: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-attacher + snapshotter: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter + livenessProbe: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/livenessprobe + resizer: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/external-resizer + nodeDriverRegistrar: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks-distro/kubernetes-csi/node-driver-registrar + volumemodifier: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/ebs-csi-driver/volume-modifier-for-k8s + \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-load-balancer-controller-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-load-balancer-controller-helm-values.tmpl new file mode 100644 index 0000000000..d6e8bdda5d --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/aws/30/aws-load-balancer-controller-helm-values.tmpl @@ -0,0 +1,2 @@ +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}public.ecr.aws{{ end }}/eks/aws-load-balancer-controller diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-azure-json.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-azure-json.tmpl new file mode 100644 index 0000000000..75aa811e82 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-azure-json.tmpl @@ -0,0 +1,39 @@ +{ + "cloud": "AzurePublicCloud", + "tenantId": "{{ $.TenantID }}", + "subscriptionId": "{{ $.SubscriptionID }}", + "resourceGroup": "{{ $.KeosClusterName }}", + "securityGroupName": "{{ $.KeosClusterName }}-node-nsg", + "securityGroupResourceGroup": "{{ $.KeosClusterName }}", + "location": "{{ $.Region }}", + "vmType": "standard", + {{- if ne $.Networks.ResourceGroup "" }} + "vnetResourceGroup": "{{ $.Networks.ResourceGroup }}", + {{- else }} + "vnetResourceGroup": "{{ $.KeosClusterName }}", + {{- end}} + {{- if $.Networks }} + "vnetName": {{- if ne $.Networks.VPCID "" }} "{{ $.Networks.VPCID }}", {{- else }} "{{ $.KeosClusterName }}-vnet", {{- end }} + {{- if $.Networks.Subnets }} + {{- $found := false }} + {{- range $.Networks.Subnets }} + {{- if and (not $found) (eq .Role "node") }} + "subnetName": "{{ $.SubnetId }}", + {{- $found = true }} + {{- end }} + {{- end }} + {{- else }} + "subnetName": "node-subnet", + {{- end }} + {{- else }} + "vnetName": "{{ $.KeosClusterName }}-vnet", + "subnetName": "node-subnet", + {{- end }} + "routeTableName": "{{ $.KeosClusterName }}-node-routetable", + "loadBalancerSku": "Standard", + "loadBalancerName": "", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": true, + "useInstanceMetadata": true, + "userAssignedIdentityID": "{{ $.UserAssignedIdentityID }}" +} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..10509f75b7 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azuredisk-csi-driver-helm-values.tmpl @@ -0,0 +1,8 @@ +controller: + podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir,azure-cred + tolerations: {} + vmType: standard + +image: + baseRepo: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azurefile-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azurefile-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..e07296f3c8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/azurefile-csi-driver-helm-values.tmpl @@ -0,0 +1,5 @@ +controller: + podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir,azure-cred +image: + baseRepo: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }} \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/28/cloud-provider-azure-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/cloud-provider-azure-helm-values.tmpl new file mode 100644 index 0000000000..a2913d17b1 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/28/cloud-provider-azure-helm-values.tmpl @@ -0,0 +1,10 @@ +--- +cloudControllerManager: + clusterCIDR: {{ $.PodsCidr }} + configureCloudRoutes: false + imageRepository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }}/oss/kubernetes + replicas: 2 +cloudNodeManager: + imageRepository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }}/oss/kubernetes +infra: + clusterName: {{ $.ClusterName }} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-azure-json.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-azure-json.tmpl new file mode 100644 index 0000000000..75aa811e82 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-azure-json.tmpl @@ -0,0 +1,39 @@ +{ + "cloud": "AzurePublicCloud", + "tenantId": "{{ $.TenantID }}", + "subscriptionId": "{{ $.SubscriptionID }}", + "resourceGroup": "{{ $.KeosClusterName }}", + "securityGroupName": "{{ $.KeosClusterName }}-node-nsg", + "securityGroupResourceGroup": "{{ $.KeosClusterName }}", + "location": "{{ $.Region }}", + "vmType": "standard", + {{- if ne $.Networks.ResourceGroup "" }} + "vnetResourceGroup": "{{ $.Networks.ResourceGroup }}", + {{- else }} + "vnetResourceGroup": "{{ $.KeosClusterName }}", + {{- end}} + {{- if $.Networks }} + "vnetName": {{- if ne $.Networks.VPCID "" }} "{{ $.Networks.VPCID }}", {{- else }} "{{ $.KeosClusterName }}-vnet", {{- end }} + {{- if $.Networks.Subnets }} + {{- $found := false }} + {{- range $.Networks.Subnets }} + {{- if and (not $found) (eq .Role "node") }} + "subnetName": "{{ $.SubnetId }}", + {{- $found = true }} + {{- end }} + {{- end }} + {{- else }} + "subnetName": "node-subnet", + {{- end }} + {{- else }} + "vnetName": "{{ $.KeosClusterName }}-vnet", + "subnetName": "node-subnet", + {{- end }} + "routeTableName": "{{ $.KeosClusterName }}-node-routetable", + "loadBalancerSku": "Standard", + "loadBalancerName": "", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": true, + "useInstanceMetadata": true, + "userAssignedIdentityID": "{{ $.UserAssignedIdentityID }}" +} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..10509f75b7 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azuredisk-csi-driver-helm-values.tmpl @@ -0,0 +1,8 @@ +controller: + podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir,azure-cred + tolerations: {} + vmType: standard + +image: + baseRepo: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azurefile-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azurefile-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..e07296f3c8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/azurefile-csi-driver-helm-values.tmpl @@ -0,0 +1,5 @@ +controller: + podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir,azure-cred +image: + baseRepo: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }} \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/29/cloud-provider-azure-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/cloud-provider-azure-helm-values.tmpl new file mode 100644 index 0000000000..a2913d17b1 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/29/cloud-provider-azure-helm-values.tmpl @@ -0,0 +1,10 @@ +--- +cloudControllerManager: + clusterCIDR: {{ $.PodsCidr }} + configureCloudRoutes: false + imageRepository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }}/oss/kubernetes + replicas: 2 +cloudNodeManager: + imageRepository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }}/oss/kubernetes +infra: + clusterName: {{ $.ClusterName }} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-azure-json.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-azure-json.tmpl new file mode 100644 index 0000000000..75aa811e82 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-azure-json.tmpl @@ -0,0 +1,39 @@ +{ + "cloud": "AzurePublicCloud", + "tenantId": "{{ $.TenantID }}", + "subscriptionId": "{{ $.SubscriptionID }}", + "resourceGroup": "{{ $.KeosClusterName }}", + "securityGroupName": "{{ $.KeosClusterName }}-node-nsg", + "securityGroupResourceGroup": "{{ $.KeosClusterName }}", + "location": "{{ $.Region }}", + "vmType": "standard", + {{- if ne $.Networks.ResourceGroup "" }} + "vnetResourceGroup": "{{ $.Networks.ResourceGroup }}", + {{- else }} + "vnetResourceGroup": "{{ $.KeosClusterName }}", + {{- end}} + {{- if $.Networks }} + "vnetName": {{- if ne $.Networks.VPCID "" }} "{{ $.Networks.VPCID }}", {{- else }} "{{ $.KeosClusterName }}-vnet", {{- end }} + {{- if $.Networks.Subnets }} + {{- $found := false }} + {{- range $.Networks.Subnets }} + {{- if and (not $found) (eq .Role "node") }} + "subnetName": "{{ $.SubnetId }}", + {{- $found = true }} + {{- end }} + {{- end }} + {{- else }} + "subnetName": "node-subnet", + {{- end }} + {{- else }} + "vnetName": "{{ $.KeosClusterName }}-vnet", + "subnetName": "node-subnet", + {{- end }} + "routeTableName": "{{ $.KeosClusterName }}-node-routetable", + "loadBalancerSku": "Standard", + "loadBalancerName": "", + "maximumLoadBalancerRuleCount": 250, + "useManagedIdentityExtension": true, + "useInstanceMetadata": true, + "userAssignedIdentityID": "{{ $.UserAssignedIdentityID }}" +} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..10509f75b7 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azuredisk-csi-driver-helm-values.tmpl @@ -0,0 +1,8 @@ +controller: + podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir,azure-cred + tolerations: {} + vmType: standard + +image: + baseRepo: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azurefile-csi-driver-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azurefile-csi-driver-helm-values.tmpl new file mode 100644 index 0000000000..e07296f3c8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/azurefile-csi-driver-helm-values.tmpl @@ -0,0 +1,5 @@ +controller: + podAnnotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir,azure-cred +image: + baseRepo: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }} \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/azure/30/cloud-provider-azure-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/cloud-provider-azure-helm-values.tmpl new file mode 100644 index 0000000000..0fb9a47099 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/azure/30/cloud-provider-azure-helm-values.tmpl @@ -0,0 +1,11 @@ +--- +cloudControllerManager: + clusterCIDR: {{ $.PodsCidr }} + # "false" for Azure CNI and "true" for other network plugins + configureCloudRoutes: true + imageRepository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }}/oss/kubernetes + replicas: 2 +cloudNodeManager: + imageRepository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}mcr.microsoft.com{{ end }}/oss/kubernetes +infra: + clusterName: {{ $.ClusterName }} diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/cert-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/cert-manager-helm-values.tmpl new file mode 100644 index 0000000000..de1f4a3cbd --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/cert-manager-helm-values.tmpl @@ -0,0 +1,17 @@ +installCRDs: true +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-controller +acmesolver: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-acmesolver +cainjector: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-cainjector +startupapicheck: + extraArgs: [] + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-ctl +webhook: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-webhook + diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/cluster-autoscaler-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/cluster-autoscaler-helm-values.tmpl new file mode 100644 index 0000000000..17654c2d01 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/cluster-autoscaler-helm-values.tmpl @@ -0,0 +1,14 @@ +autoDiscovery: + clusterName: {{ $.KeosCluster.Metadata.Name }} + labels: + - namespace: cluster-{{ $.KeosCluster.Metadata.Name }} + roles: + - worker + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ $.KeosCluster.Metadata.Name }} +cloudProvider: clusterapi + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}registry.k8s.io{{ end }}/autoscaling/cluster-autoscaler +replicaCount: 2 diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-chart-values_configmap.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-chart-values_configmap.tmpl new file mode 100644 index 0000000000..0cdd6edead --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-chart-values_configmap.tmpl @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 00-{{ $.CMName }}-helm-chart-default-values + namespace: {{ $.CMNamespace }} +data: + values.yaml: |- + {{- indent 4 $.CMValue }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 01-{{ $.CMName }}-helm-chart-override-values + namespace: {{ $.CMNamespace }} +data: + values.yaml: "" \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-values.tmpl new file mode 100644 index 0000000000..a6a65d6ab8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2-helm-values.tmpl @@ -0,0 +1,29 @@ +cli: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/flux-cli + +# controllers + +helmController: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/helm-controller + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: temp + +imageAutomationController: + create: false + +imageReflectionController: + create: false + +kustomizeController: + create: false + +notificationController: + create: false + +policies: + create: false + +sourceController: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/source-controller + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: data,tmp diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrelease.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrelease.tmpl new file mode 100644 index 0000000000..22f0ff4529 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrelease.tmpl @@ -0,0 +1,35 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: {{ $.ChartName }} + namespace: {{ $.ChartNamespace }} +spec: + releaseName: {{ $.ChartName }} + chart: + spec: + chart: {{ $.ChartName }} + version: "{{ $.ChartVersion }}" + sourceRef: + kind: HelmRepository + name: {{ $.ChartRepoRef }} + namespace: kube-system + interval: 1m + interval: 1m + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + crds: CreateReplace + remediation: + retries: 3 + remediateLastFailure: true + force: false + valuesFrom: + - kind: ConfigMap + name: 00-{{ $.ChartName }}-helm-chart-default-values + valuesKey: values.yaml + - kind: ConfigMap + name: 01-{{ $.ChartName }}-helm-chart-override-values + valuesKey: values.yaml \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrepository.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrepository.tmpl new file mode 100644 index 0000000000..48f1793c7e --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/flux2_helmrepository.tmpl @@ -0,0 +1,24 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: {{ $.ChartName }} + namespace: kube-system +spec: + url: {{ $.ChartRepoUrl }} + type: {{ $.ChartRepoScheme }} + interval: 10m + provider: {{ $.Spec.InfraProvider }} + {{- if and ($.Spec.HelmRepository.AuthRequired) (eq $.ChartName "keos") }} + secretRef: + name: {{ $.ChartName }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.ChartName }} + namespace: kube-system +stringData: + username: {{ $.HelmRepoCreds.User }} + password: {{ $.HelmRepoCreds.Pass }} + {{- end }} \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/28/tigera-operator-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/28/tigera-operator-helm-values.tmpl new file mode 100644 index 0000000000..19f85d4794 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/28/tigera-operator-helm-values.tmpl @@ -0,0 +1,65 @@ +--- +apiServer: + enabled: false +calicoctl: +{{- if $.Private }} + image: {{ $.KeosRegUrl }}/calico/ctl +{{- else }} + image: docker.io/calico/ctl +{{- end }} +installation: + calicoNetwork: + bgp: {{- if or ($.Spec.ControlPlane.Managed) (eq $.Spec.InfraProvider "azure") }} Disabled {{- else }} Enabled {{- end }} + {{- if not $.Spec.ControlPlane.Managed }} + {{- if eq $.Spec.InfraProvider "azure" }} + mtu: 1350 + {{- end }} + ipPools: + - cidr: {{- if $.Spec.Networks.PodsCidrBlock }} {{ $.Spec.Networks.PodsCidrBlock }} {{- else }} 192.168.0.0/16 {{- end }} + encapsulation: {{- if eq $.Spec.InfraProvider "azure" }} VXLAN {{- else }} IPIP {{- end }} + {{- end }} + cni: + {{- if and ($.Spec.ControlPlane.Managed) (eq $.Spec.InfraProvider "aws") }} + ipam: + type: AmazonVPC + type: AmazonVPC + {{- else }} + ipam: + type: Calico + type: Calico + {{- end }} + enabled: true + kubernetesProvider: {{- if and ($.Spec.ControlPlane.Managed) (eq $.Spec.InfraProvider "aws") }} EKS {{- else }} "" {{- end }} + nodeMetricsPort: 9191 + {{- if $.Private }} + registry: {{ $.KeosRegUrl }} + {{- else }} + registry: docker.io + {{- end }} + typhaMetricsPort: 9093 +# NodeSelector for the tigera/operator pod. +nodeSelector: + kubernetes.io/os: linux +# Custom annotations for the tigera/operator pod. +podAnnotations: +{{- range $key, $value := $.Annotations }} + {{ $key }}: {{ $value }} +{{- end }} +# Custom labels for the tigera/operator pod. +podLabels: {} +# Resource requests and limits for the tigera/operator pod. +resources: {} +# Image and registry configuration for the tigera/operator pod. +tigeraOperator: +{{- if $.Private }} + registry: {{ $.KeosRegUrl }} +{{- else }} + registry: quay.io +{{- end }} + image: tigera/operator +# Tolerations for the tigera/operator pod. +tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/cert-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/cert-manager-helm-values.tmpl new file mode 100644 index 0000000000..a4332b927e --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/cert-manager-helm-values.tmpl @@ -0,0 +1,15 @@ +installCRDs: true +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-controller +acmesolver: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-acmesolver +cainjector: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-cainjector +startupapicheck: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-ctl +webhook: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-webhook \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/cluster-autoscaler-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/cluster-autoscaler-helm-values.tmpl new file mode 100644 index 0000000000..17654c2d01 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/cluster-autoscaler-helm-values.tmpl @@ -0,0 +1,14 @@ +autoDiscovery: + clusterName: {{ $.KeosCluster.Metadata.Name }} + labels: + - namespace: cluster-{{ $.KeosCluster.Metadata.Name }} + roles: + - worker + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ $.KeosCluster.Metadata.Name }} +cloudProvider: clusterapi + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}registry.k8s.io{{ end }}/autoscaling/cluster-autoscaler +replicaCount: 2 diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-chart-values_configmap.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-chart-values_configmap.tmpl new file mode 100644 index 0000000000..0cdd6edead --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-chart-values_configmap.tmpl @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 00-{{ $.CMName }}-helm-chart-default-values + namespace: {{ $.CMNamespace }} +data: + values.yaml: |- + {{- indent 4 $.CMValue }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 01-{{ $.CMName }}-helm-chart-override-values + namespace: {{ $.CMNamespace }} +data: + values.yaml: "" \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-values.tmpl new file mode 100644 index 0000000000..a6a65d6ab8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2-helm-values.tmpl @@ -0,0 +1,29 @@ +cli: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/flux-cli + +# controllers + +helmController: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/helm-controller + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: temp + +imageAutomationController: + create: false + +imageReflectionController: + create: false + +kustomizeController: + create: false + +notificationController: + create: false + +policies: + create: false + +sourceController: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/source-controller + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: data,tmp diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrelease.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrelease.tmpl new file mode 100644 index 0000000000..22f0ff4529 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrelease.tmpl @@ -0,0 +1,35 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: {{ $.ChartName }} + namespace: {{ $.ChartNamespace }} +spec: + releaseName: {{ $.ChartName }} + chart: + spec: + chart: {{ $.ChartName }} + version: "{{ $.ChartVersion }}" + sourceRef: + kind: HelmRepository + name: {{ $.ChartRepoRef }} + namespace: kube-system + interval: 1m + interval: 1m + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + crds: CreateReplace + remediation: + retries: 3 + remediateLastFailure: true + force: false + valuesFrom: + - kind: ConfigMap + name: 00-{{ $.ChartName }}-helm-chart-default-values + valuesKey: values.yaml + - kind: ConfigMap + name: 01-{{ $.ChartName }}-helm-chart-override-values + valuesKey: values.yaml \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrepository.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrepository.tmpl new file mode 100644 index 0000000000..48f1793c7e --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/flux2_helmrepository.tmpl @@ -0,0 +1,24 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: {{ $.ChartName }} + namespace: kube-system +spec: + url: {{ $.ChartRepoUrl }} + type: {{ $.ChartRepoScheme }} + interval: 10m + provider: {{ $.Spec.InfraProvider }} + {{- if and ($.Spec.HelmRepository.AuthRequired) (eq $.ChartName "keos") }} + secretRef: + name: {{ $.ChartName }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.ChartName }} + namespace: kube-system +stringData: + username: {{ $.HelmRepoCreds.User }} + password: {{ $.HelmRepoCreds.Pass }} + {{- end }} \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/29/tigera-operator-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/29/tigera-operator-helm-values.tmpl new file mode 100644 index 0000000000..e9a1313fc2 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/29/tigera-operator-helm-values.tmpl @@ -0,0 +1,73 @@ +--- +apiServer: + enabled: false +calicoctl: +{{- if $.Private }} + image: {{ $.KeosRegUrl }}/calico/ctl +{{- else }} + image: docker.io/calico/ctl +{{- end }} +installation: + calicoNetwork: + bgp: {{- if or ($.Spec.ControlPlane.Managed) (eq $.Spec.InfraProvider "azure") }} Disabled {{- else }} Enabled {{- end }} + {{- if not $.Spec.ControlPlane.Managed }} + {{- if eq $.Spec.InfraProvider "azure" }} + mtu: 1350 + {{- end }} + {{- if not $.IsNetPolEngine }} + ipPools: + - cidr: {{- if $.Spec.Networks.PodsCidrBlock }} {{ $.Spec.Networks.PodsCidrBlock }} {{- else }} 192.168.0.0/16 {{- end }} + encapsulation: {{- if eq $.Spec.InfraProvider "azure" }} VXLAN {{- else }} IPIP {{- end }} + {{- end }} + {{- end }} + cni: + {{- if and $.Spec.ControlPlane.Managed (eq $.Spec.InfraProvider "aws") }} + ipam: + type: AmazonVPC + type: AmazonVPC + {{- else if and $.Spec.ControlPlane.Managed (eq $.Spec.InfraProvider "gcp") $.IsNetPolEngine }} + ipam: + type: HostLocal + type: GKE + {{- else }} + ipam: + type: Calico + type: Calico + {{- end }} + {{- if not $.IsNetPolEngine }} + enabled: true + {{- end }} + kubernetesProvider: {{- if and $.Spec.ControlPlane.Managed (eq $.Spec.InfraProvider "aws") }} EKS {{- else if and $.Spec.ControlPlane.Managed (eq $.Spec.InfraProvider "gcp") }} GKE {{- else }} "" {{- end }} + nodeMetricsPort: 9191 + {{- if $.Private }} + registry: {{ $.KeosRegUrl }} + {{- else }} + registry: docker.io + {{- end }} + typhaMetricsPort: 9093 +# NodeSelector for the tigera/operator pod. +nodeSelector: + kubernetes.io/os: linux +# Custom annotations for the tigera/operator pod. +podAnnotations: +{{- range $key, $value := $.Annotations }} + {{ $key }}: {{ $value }} +{{- end }} +# Custom labels for the tigera/operator pod. +podLabels: {} +# Resource requests and limits for the tigera/operator pod. +resources: {} +# Image and registry configuration for the tigera/operator pod. +tigeraOperator: +{{- if $.Private }} + registry: {{ $.KeosRegUrl }} +{{- else }} + registry: quay.io +{{- end }} + image: tigera/operator +# Tolerations for the tigera/operator pod. +tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/30/cert-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/cert-manager-helm-values.tmpl new file mode 100644 index 0000000000..a4332b927e --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/30/cert-manager-helm-values.tmpl @@ -0,0 +1,15 @@ +installCRDs: true +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-controller +acmesolver: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-acmesolver +cainjector: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-cainjector +startupapicheck: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-ctl +webhook: + image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}quay.io{{ end }}/jetstack/cert-manager-webhook \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/30/cluster-autoscaler-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/cluster-autoscaler-helm-values.tmpl new file mode 100644 index 0000000000..17654c2d01 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/30/cluster-autoscaler-helm-values.tmpl @@ -0,0 +1,14 @@ +autoDiscovery: + clusterName: {{ $.KeosCluster.Metadata.Name }} + labels: + - namespace: cluster-{{ $.KeosCluster.Metadata.Name }} + roles: + - worker + tags: + - k8s.io/cluster-autoscaler/enabled + - k8s.io/cluster-autoscaler/{{ $.KeosCluster.Metadata.Name }} +cloudProvider: clusterapi + +image: + repository: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}registry.k8s.io{{ end }}/autoscaling/cluster-autoscaler +replicaCount: 2 diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-chart-values_configmap.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-chart-values_configmap.tmpl new file mode 100644 index 0000000000..0cdd6edead --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-chart-values_configmap.tmpl @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 00-{{ $.CMName }}-helm-chart-default-values + namespace: {{ $.CMNamespace }} +data: + values.yaml: |- + {{- indent 4 $.CMValue }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: 01-{{ $.CMName }}-helm-chart-override-values + namespace: {{ $.CMNamespace }} +data: + values.yaml: "" \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-values.tmpl new file mode 100644 index 0000000000..a6a65d6ab8 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2-helm-values.tmpl @@ -0,0 +1,29 @@ +cli: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/flux-cli + +# controllers + +helmController: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/helm-controller + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: temp + +imageAutomationController: + create: false + +imageReflectionController: + create: false + +kustomizeController: + create: false + +notificationController: + create: false + +policies: + create: false + +sourceController: + image: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}ghcr.io{{ end }}/fluxcd/source-controller + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: data,tmp diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrelease.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrelease.tmpl new file mode 100644 index 0000000000..90c598ac85 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrelease.tmpl @@ -0,0 +1,35 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: {{ $.ChartName }} + namespace: {{ $.ChartNamespace }} +spec: + releaseName: {{ $.ChartName }} + chart: + spec: + chart: {{ $.ChartName }} + version: "{{ $.ChartVersion }}" + sourceRef: + kind: HelmRepository + name: {{ $.ChartRepoRef }} + namespace: kube-system + interval: {{ $.HelmReleaseSourceInterval }} + interval: {{ $.HelmReleaseInterval }} + install: + crds: CreateReplace + remediation: + retries: {{ $.HelmReleaseRetries }} + upgrade: + crds: CreateReplace + remediation: + retries: {{ $.HelmReleaseRetries }} + remediateLastFailure: true + force: false + valuesFrom: + - kind: ConfigMap + name: 00-{{ $.ChartName }}-helm-chart-default-values + valuesKey: values.yaml + - kind: ConfigMap + name: 01-{{ $.ChartName }}-helm-chart-override-values + valuesKey: values.yaml \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrepository.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrepository.tmpl new file mode 100644 index 0000000000..8af04cc49e --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/common/30/flux2_helmrepository.tmpl @@ -0,0 +1,24 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: {{ $.ChartName }} + namespace: kube-system +spec: + url: {{ $.ChartRepoUrl }} + type: {{ $.ChartRepoScheme }} + interval: {{ $.RepositoryInterval }} + provider: {{ $.Spec.InfraProvider }} + {{- if and ($.Spec.HelmRepository.AuthRequired) (eq $.ChartName "keos") }} + secretRef: + name: {{ $.ChartName }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $.ChartName }} + namespace: kube-system +stringData: + username: {{ $.HelmRepoCreds.User }} + password: {{ $.HelmRepoCreds.Pass }} + {{- end }} \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/common/calico-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/common/30/tigera-operator-helm-values.tmpl similarity index 100% rename from pkg/cluster/internal/create/actions/createworker/templates/common/calico-helm-values.tmpl rename to pkg/cluster/internal/create/actions/createworker/templates/common/30/tigera-operator-helm-values.tmpl diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/coredns_configmap.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/28/coredns_configmap.tmpl similarity index 100% rename from pkg/cluster/internal/create/actions/createworker/templates/gcp/coredns_configmap.tmpl rename to pkg/cluster/internal/create/actions/createworker/templates/gcp/28/coredns_configmap.tmpl diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/gcp-compute-persistent-disk-csi-driver.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/28/gcp-compute-persistent-disk-csi-driver.tmpl similarity index 99% rename from pkg/cluster/internal/create/actions/createworker/templates/gcp/gcp-compute-persistent-disk-csi-driver.tmpl rename to pkg/cluster/internal/create/actions/createworker/templates/gcp/28/gcp-compute-persistent-disk-csi-driver.tmpl index 771a3f5ea5..3c156d847a 100644 --- a/pkg/cluster/internal/create/actions/createworker/templates/gcp/gcp-compute-persistent-disk-csi-driver.tmpl +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/28/gcp-compute-persistent-disk-csi-driver.tmpl @@ -410,12 +410,14 @@ metadata: name: csi-gce-pd-controller namespace: kube-system spec: - replicas: 1 + replicas: 2 selector: matchLabels: app: gcp-compute-persistent-disk-csi-driver template: metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir labels: app: gcp-compute-persistent-disk-csi-driver spec: diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/resourcequota.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/28/resourcequota.tmpl similarity index 100% rename from pkg/cluster/internal/create/actions/createworker/templates/gcp/resourcequota.tmpl rename to pkg/cluster/internal/create/actions/createworker/templates/gcp/28/resourcequota.tmpl diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/coredns-patch_configmap.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/coredns_configmap.tmpl similarity index 88% rename from pkg/cluster/internal/create/actions/createworker/templates/gcp/coredns-patch_configmap.tmpl rename to pkg/cluster/internal/create/actions/createworker/templates/gcp/29/coredns_configmap.tmpl index fcfdea7733..7008726736 100644 --- a/pkg/cluster/internal/create/actions/createworker/templates/gcp/coredns-patch_configmap.tmpl +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/coredns_configmap.tmpl @@ -1,3 +1,9 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system data: Corefile: | .:53 { diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/gcp-compute-persistent-disk-csi-driver.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/gcp-compute-persistent-disk-csi-driver.tmpl new file mode 100644 index 0000000000..be117e2b3a --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/gcp-compute-persistent-disk-csi-driver.tmpl @@ -0,0 +1,708 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: gce-pd-csi-driver +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-gce-pd-node-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + k8s-app: gcp-compute-persistent-disk-csi-driver + name: csi-gce-pd-leaderelection-role + namespace: gce-pd-csi-driver +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csi-gce-pd-attacher-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csi-gce-pd-controller-deploy +rules: +- apiGroups: + - policy + resourceNames: + - csi-gce-pd-controller-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csi-gce-pd-node-deploy +rules: +- apiGroups: + - policy + resourceNames: + - csi-gce-pd-node-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csi-gce-pd-provisioner-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - delete +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - storage.k8s.io + resources: + - csinodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +- apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csi-gce-pd-resizer-role +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csi-gce-pd-snapshotter-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - get + - list + - watch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: gcp-compute-persistent-disk-csi-driver + name: csi-gce-pd-controller-leaderelection-binding + namespace: gce-pd-csi-driver +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: csi-gce-pd-leaderelection-role +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-node-deploy +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-controller-attacher-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-attacher-role +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-controller-deploy +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-controller-deploy +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-controller-provisioner-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-provisioner-role +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-controller-snapshotter-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-snapshotter-role +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-node-deploy +subjects: +- kind: ServiceAccount + name: csi-gce-pd-node-sa + namespace: gce-pd-csi-driver +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: csi-gce-pd-resizer-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csi-gce-pd-resizer-role +subjects: +- kind: ServiceAccount + name: csi-gce-pd-controller-sa + namespace: gce-pd-csi-driver +--- +apiVersion: scheduling.k8s.io/v1 +description: This priority class should be used for the GCE PD CSI driver controller + deployment only. +globalDefault: false +kind: PriorityClass +metadata: + name: csi-gce-pd-controller +value: 900000000 +--- +apiVersion: scheduling.k8s.io/v1 +description: This priority class should be used for the GCE PD CSI driver node deployment + only. +globalDefault: false +kind: PriorityClass +metadata: + name: csi-gce-pd-node +value: 900001000 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: csi-gce-pd-controller + namespace: gce-pd-csi-driver +spec: + replicas: 2 + selector: + matchLabels: + app: gcp-compute-persistent-disk-csi-driver + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: socket-dir + labels: + app: gcp-compute-persistent-disk-csi-driver + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - gcp-compute-persistent-disk-csi-driver + topologyKey: "kubernetes.io/hostname" + weight: 100 + containers: + - args: + - --v=5 + - --csi-address=/csi/csi.sock + - --feature-gates=Topology=true + - --http-endpoint=:22011 + - --leader-election-namespace=$(PDCSI_NAMESPACE) + - --timeout=250s + - --extra-create-metadata + - --leader-election + - --default-fstype=ext4 + - --controller-publish-readonly=true + env: + - name: PDCSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-provisioner:v3.4.0 + {{- else }} + image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + {{- end }} + livenessProbe: + failureThreshold: 1 + httpGet: + path: /healthz/leader-election + port: http-endpoint + initialDelaySeconds: 10 + periodSeconds: 20 + timeoutSeconds: 10 + name: csi-provisioner + ports: + - containerPort: 22011 + name: http-endpoint + protocol: TCP + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=5 + - --csi-address=/csi/csi.sock + - --http-endpoint=:22012 + - --leader-election + - --leader-election-namespace=$(PDCSI_NAMESPACE) + - --timeout=250s + - --max-grpc-log-length=10000 + - --default-fstype=ext4 + env: + - name: PDCSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-attacher:v4.2.0 + {{- else }} + image: registry.k8s.io/sig-storage/csi-attacher:v4.2.0 + {{- end }} + livenessProbe: + failureThreshold: 1 + httpGet: + path: /healthz/leader-election + port: http-endpoint + initialDelaySeconds: 10 + periodSeconds: 20 + timeoutSeconds: 10 + name: csi-attacher + ports: + - containerPort: 22012 + name: http-endpoint + protocol: TCP + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=5 + - --csi-address=/csi/csi.sock + - --http-endpoint=:22013 + - --leader-election + - --leader-election-namespace=$(PDCSI_NAMESPACE) + - --handle-volume-inuse-error=false + env: + - name: PDCSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-resizer:v1.7.0 + {{- else }} + image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0 + {{- end }} + livenessProbe: + failureThreshold: 1 + httpGet: + path: /healthz/leader-election + port: http-endpoint + initialDelaySeconds: 10 + periodSeconds: 20 + timeoutSeconds: 10 + name: csi-resizer + ports: + - containerPort: 22013 + name: http-endpoint + protocol: TCP + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=5 + - --csi-address=/csi/csi.sock + - --metrics-address=:22014 + - --leader-election + - --leader-election-namespace=$(PDCSI_NAMESPACE) + - --timeout=300s + env: + - name: PDCSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-snapshotter:v6.1.0 + {{- else }} + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + {{- end }} + name: csi-snapshotter + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=5 + - --endpoint=unix:/csi/csi.sock + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /etc/cloud-sa/cloud-sa.json + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- else }} + image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- end }} + name: gce-pd-driver + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /etc/cloud-sa + name: cloud-sa-volume + readOnly: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: csi-gce-pd-controller + serviceAccountName: csi-gce-pd-controller-sa + volumes: + - emptyDir: {} + name: socket-dir + - name: cloud-sa-volume + secret: + secretName: cloud-sa +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: csi-gce-pd-node + namespace: gce-pd-csi-driver +spec: + selector: + matchLabels: + app: gcp-compute-persistent-disk-csi-driver + template: + metadata: + labels: + app: gcp-compute-persistent-disk-csi-driver + spec: + containers: + - args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-node-driver-registrar:v2.7.0 + {{- else }} + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0 + {{- end }} + name: csi-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --v=5 + - --endpoint=unix:/csi/csi.sock + - --run-controller-service=false + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- else }} + image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- end }} + name: gce-pd-driver + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: kubelet-dir + - mountPath: /csi + name: plugin-dir + - mountPath: /dev + name: device-dir + - mountPath: /etc/udev + name: udev-rules-etc + - mountPath: /lib/udev + name: udev-rules-lib + - mountPath: /run/udev + name: udev-socket + - mountPath: /sys + name: sys + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: csi-gce-pd-node + serviceAccountName: csi-gce-pd-node-sa + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: kubelet-dir + - hostPath: + path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /dev + type: Directory + name: device-dir + - hostPath: + path: /etc/udev + type: Directory + name: udev-rules-etc + - hostPath: + path: /lib/udev + type: Directory + name: udev-rules-lib + - hostPath: + path: /run/udev + type: Directory + name: udev-socket + - hostPath: + path: /sys + type: Directory + name: sys +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: pd.csi.storage.gke.io +spec: + attachRequired: true + podInfoOnMount: false \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/resourcequota.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/resourcequota.tmpl new file mode 100644 index 0000000000..3e395db4f1 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/29/resourcequota.tmpl @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: ResourceQuota +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + name: {{ .Name }}-critical-pods + namespace: {{ .Namespace }} +spec: + hard: + pods: 1G + scopeSelector: + matchExpressions: + - operator: In + scopeName: PriorityClass + values: + - system-node-critical + - system-cluster-critical diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/coredns_configmap.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/coredns_configmap.tmpl new file mode 100644 index 0000000000..7008726736 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/coredns_configmap.tmpl @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + {{- if gt (len $.Dns.Forwarders) 0 }} + forward .{{ range $i, $server := .Dns.Forwarders }} {{ $server }}{{ end }} { + prefer_udp + } + {{- else }} + forward . /etc/resolv.conf { + max_concurrent 1000 + } + {{- end }} + cache 30 + loop + reload + loadbalance + } \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-cloud-controller-manager-helm-values.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-cloud-controller-manager-helm-values.tmpl new file mode 100644 index 0000000000..e4c97e3f90 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-cloud-controller-manager-helm-values.tmpl @@ -0,0 +1,13 @@ +args: +- --cloud-provider=gce +- --leader-elect=true +- --use-service-account-credentials +- --allocate-node-cidrs=true +- --cluster-cidr={{ $.PodsCidr }} +- --v=2 +- --cloud-config=/etc/kubernetes/gce.conf + +image: + registry: {{ if $.Private }}{{ $.KeosRegUrl }}{{ else }}gcr.io{{ end }} + repository: k8s-staging-cloud-provider-gcp/cloud-controller-manager + tag: release-1.30 \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/files/gcp/gcp-compute-persistent-disk-csi-driver.yaml b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-compute-persistent-disk-csi-driver.tmpl similarity index 92% rename from pkg/cluster/internal/create/actions/createworker/files/gcp/gcp-compute-persistent-disk-csi-driver.yaml rename to pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-compute-persistent-disk-csi-driver.tmpl index b074e4739e..18ec8ac019 100644 --- a/pkg/cluster/internal/create/actions/createworker/files/gcp/gcp-compute-persistent-disk-csi-driver.yaml +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/gcp-compute-persistent-disk-csi-driver.tmpl @@ -1,4 +1,9 @@ apiVersion: v1 +kind: Namespace +metadata: + name: gce-pd-csi-driver +--- +apiVersion: v1 kind: ServiceAccount metadata: name: csi-gce-pd-controller-sa @@ -410,7 +415,7 @@ metadata: name: csi-gce-pd-controller namespace: kube-system spec: - replicas: 1 + replicas: 2 selector: matchLabels: app: gcp-compute-persistent-disk-csi-driver @@ -450,7 +455,11 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-provisioner:v3.4.0 + {{- else }} image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + {{- end }} livenessProbe: failureThreshold: 1 httpGet: @@ -481,7 +490,11 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-attacher:v4.2.0 + {{- else }} image: registry.k8s.io/sig-storage/csi-attacher:v4.2.0 + {{- end }} livenessProbe: failureThreshold: 1 httpGet: @@ -510,7 +523,11 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-resizer:v1.7.0 + {{- else }} image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0 + {{- end }} livenessProbe: failureThreshold: 1 httpGet: @@ -539,7 +556,11 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-snapshotter:v6.1.0 + {{- else }} image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + {{- end }} name: csi-snapshotter volumeMounts: - mountPath: /csi @@ -550,7 +571,11 @@ spec: env: - name: GOOGLE_APPLICATION_CREDENTIALS value: /etc/cloud-sa/cloud-sa.json + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- else }} image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- end }} name: gce-pd-driver volumeMounts: - mountPath: /csi @@ -594,7 +619,11 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/sig-storage/csi-node-driver-registrar:v2.7.0 + {{- else }} image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0 + {{- end }} name: csi-driver-registrar volumeMounts: - mountPath: /csi @@ -605,7 +634,11 @@ spec: - --v=5 - --endpoint=unix:/csi/csi.sock - --run-controller-service=false + {{- if $.Private }} + image: {{ $.KeosRegUrl }}/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- else }} image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.10.1 + {{- end }} name: gce-pd-driver securityContext: privileged: true @@ -672,4 +705,4 @@ metadata: name: pd.csi.storage.gke.io spec: attachRequired: true - podInfoOnMount: false + podInfoOnMount: false \ No newline at end of file diff --git a/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/resourcequota.tmpl b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/resourcequota.tmpl new file mode 100644 index 0000000000..3e395db4f1 --- /dev/null +++ b/pkg/cluster/internal/create/actions/createworker/templates/gcp/30/resourcequota.tmpl @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: ResourceQuota +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + name: {{ .Name }}-critical-pods + namespace: {{ .Namespace }} +spec: + hard: + pods: 1G + scopeSelector: + matchExpressions: + - operator: In + scopeName: PriorityClass + values: + - system-node-critical + - system-cluster-critical diff --git a/pkg/cluster/internal/providers/docker/stratio/Dockerfile b/pkg/cluster/internal/providers/docker/stratio/Dockerfile index 665d254659..1e0ff2cf80 100644 --- a/pkg/cluster/internal/providers/docker/stratio/Dockerfile +++ b/pkg/cluster/internal/providers/docker/stratio/Dockerfile @@ -7,27 +7,15 @@ ENV CLUSTER_TOPOLOGY=true ENV CLUSTERCTL_DISABLE_VERSIONCHECK=true # Tools versions -ENV CLUSTERCTL=v1.5.3 -ENV CLUSTERAWSADM=v2.2.1 +ENV CLUSTERCTL=v1.7.4 +ENV CLUSTERAWSADM=v2.5.2 ENV HELM=v3.13.1 -# Helm charts -ENV HELM_EXPERIMENTAL_OCI=1 -ENV CLOUD_PROVIDER_AWS_CHART=0.0.8 -ENV AWS_LOAD_BALANCER_CONTROLLER_CHART=1.6.2 -ENV AWS_EBS_CSI_DRIVER_CHART=v2.20.0 -ENV AZUREDISK_CSI_DRIVER_CHART=v1.28.7 -ENV AZUREFILE_CSI_DRIVER_CHART=v1.28.7 -ENV CLOUD_PROVIDER_AZURE_CHART=v1.28.5 -ENV CLUSTER_AUTOSCALER_CHART=9.29.1 -ENV TIGERA_OPERATOR_CHART=v3.26.1 -ENV CERT_MANAGER_CHART_VERSION=v1.12.3 - # Cluster-api artifacts ENV CAPI_REPO=/root/.cluster-api/local-repository -ENV CAPA=v2.2.1 +ENV CAPA=v2.5.2 ENV CAPG=v1.6.1 -ENV CAPZ=v1.11.4 +ENV CAPZ=v1.12.4 # Install vim RUN apt-get update && apt-get install -y \ @@ -57,21 +45,9 @@ RUN curl -L https://get.helm.sh/helm-${HELM}-linux-amd64.tar.gz -o /root/helm.ta && rm -rf /root/linux-amd64 /root/helm.tar.gz \ && chmod +x /usr/local/bin/helm \ && helm plugin install https://github.com/hypnoglow/helm-s3.git - -RUN mkdir -p ${CAPI_REPO}/cert-manager/${CERT_MANAGER_CHART_VERSION} \ - && curl -LJ -o ${CAPI_REPO}/cert-manager/${CERT_MANAGER_CHART_VERSION}/cert-manager.crds.yaml https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml - -# Download helm charts -RUN mkdir -p /stratio/helm \ - && for i in $(seq 1 3); do timeout 5 helm pull aws-cloud-controller-manager --version ${CLOUD_PROVIDER_AWS_CHART} --repo https://kubernetes.github.io/cloud-provider-aws --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull aws-load-balancer-controller --version ${AWS_LOAD_BALANCER_CONTROLLER_CHART} --repo https://aws.github.io/eks-charts --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull aws-ebs-csi-driver --version ${AWS_EBS_CSI_DRIVER_CHART} --repo https://kubernetes-sigs.github.io/aws-ebs-csi-driver --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull azuredisk-csi-driver --version ${AZUREDISK_CSI_DRIVER_CHART} --repo https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull azurefile-csi-driver --version ${AZUREFILE_CSI_DRIVER_CHART} --repo https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull cloud-provider-azure --version ${CLOUD_PROVIDER_AZURE_CHART} --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull cluster-autoscaler --version ${CLUSTER_AUTOSCALER_CHART} --repo https://kubernetes.github.io/autoscaler --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull tigera-operator --version ${TIGERA_OPERATOR_CHART} --repo https://docs.projectcalico.org/charts --untar --untardir /stratio/helm && break; done \ - && for i in $(seq 1 3); do timeout 5 helm pull cert-manager --version ${CERT_MANAGER_CHART_VERSION} --repo https://charts.jetstack.io --untar --untardir /stratio/helm && break; done + +# Create helm charts directory +RUN mkdir -p /stratio/helm # Prepare cluster-api private repository RUN mkdir -p ${CAPI_REPO}/infrastructure-aws/${CAPA} ${CAPI_REPO}/infrastructure-gcp/${CAPG} ${CAPI_REPO}/infrastructure-azure/${CAPZ} ${CAPI_REPO}/cluster-api/${CLUSTERCTL} ${CAPI_REPO}/bootstrap-kubeadm/${CLUSTERCTL} ${CAPI_REPO}/control-plane-kubeadm/${CLUSTERCTL} ${CROSSPLANE_CACHE} \ diff --git a/pkg/cluster/internal/validate/common.go b/pkg/cluster/internal/validate/common.go index ef5a95292f..607007c727 100644 --- a/pkg/cluster/internal/validate/common.go +++ b/pkg/cluster/internal/validate/common.go @@ -33,7 +33,7 @@ const ( MinWorkerNodeNameLength = 3 ) -var k8sVersionSupported = []string{ "1.28", "1.29", "1.30"} +var k8sVersionSupported = []string{"1.28", "1.29", "1.30"} func validateCommon(spec commons.KeosSpec, clusterConfigSpec commons.ClusterConfigSpec) error { var err error @@ -58,6 +58,15 @@ func validateClusterConfig(spec commons.KeosSpec, clusterConfigSpec commons.Clus return errors.New("spec: Invalid value: \"controlplane_config.max_unhealthy\" in clusterConfig: This field cannot be set with managed cluster") } } + for i, chart := range clusterConfigSpec.Charts { + for j, chartCheck := range clusterConfigSpec.Charts { + if i != j { + if chart.Name == chartCheck.Name { + return errors.New("spec: Invalid value: Cannot be indicated more than one version: " + chart.Version + ", " + chartCheck.Version + " for the same chart: " + chart.Name) + } + } + } + } return nil } diff --git a/pkg/cmd/kind/version/version.go b/pkg/cmd/kind/version/version.go index b90134a49b..3b301dfea9 100644 --- a/pkg/cmd/kind/version/version.go +++ b/pkg/cmd/kind/version/version.go @@ -55,7 +55,7 @@ func DisplayVersion() string { // versionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0 -const versionCore = "0.17.0-0.4.0" +const versionCore = "0.17.0-0.6.0" // versionPreRelease is the base pre-release portion of the kind CLI version per // Semantic Versioning 2.0.0 diff --git a/pkg/commons/cluster.go b/pkg/commons/cluster.go index 67c3da74dd..4b0e308846 100644 --- a/pkg/commons/cluster.go +++ b/pkg/commons/cluster.go @@ -37,7 +37,7 @@ var DeviceNameRegex = "^/dev/(sd[a-z]|xvd([a-d]|[a-d][a-z]|[e-z]))$" var AWSVolumeType = "gp3" var AzureVMsVolumeType = "Standard_LRS" -var GCPVMsVolumeType = "pd-standard" +var GCPVMsVolumeType = "pd-ssd" type Resource struct { APIVersion string `yaml:"apiVersion" validate:"required"` @@ -74,6 +74,20 @@ type ClusterConfigSpec struct { WorkersConfig WorkersConfig `yaml:"workers_config"` ClusterOperatorVersion string `yaml:"cluster_operator_version,omitempty"` ClusterOperatorImageVersion string `yaml:"cluster_operator_image_version,omitempty"` + PrivateHelmRepo bool `yaml:"private_helm_repo"` + Charts []Chart `yaml:"charts,omitempty"` +} + +type Chart struct { + Name string `yaml:"name,omitempty"` + Version string `yaml:"version,omitempty"` +} + +type ChartEntry struct { + Repository string + Version string + Namespace string + Pull bool } type ControlplaneConfig struct { @@ -289,7 +303,6 @@ type GCPCredentials struct { PrivateKey string `yaml:"private_key"` ClientEmail string `yaml:"client_email"` ClientID string `yaml:"client_id"` - Region string `yaml:"region"` } type DockerRegistryCredentials struct { @@ -312,9 +325,13 @@ type HelmRepositoryCredentials struct { } type HelmRepository struct { - AuthRequired bool `yaml:"auth_required" validate:"boolean"` - URL string `yaml:"url" validate:"required"` - Type string `yaml:"type,omitempty" validate:"oneof='ecr' 'acr' 'gar' 'generic'"` + AuthRequired bool `yaml:"auth_required" validate:"boolean"` + URL string `yaml:"url" validate:"required"` + Type string `yaml:"type,omitempty" validate:"oneof='ecr' 'acr' 'gar' 'generic'"` + ReleaseInterval string `yaml:"release_interval,omitempty"` + ReleaseRetries *int `yaml:"release_retries,omitempty"` + ReleaseSourceInterval string `yaml:"release_source_interval,omitempty"` + RepositoryInterval string `yaml:"repository_interval,omitempty"` } type AWS struct { @@ -586,7 +603,6 @@ func GetClusterDescriptor(descriptorPath string) (*KeosCluster, *ClusterConfig, if err != nil { return nil, nil, err } - err = validate.Struct(clusterConfig) if err != nil { return nil, nil, err diff --git a/pkg/commons/utils.go b/pkg/commons/utils.go index 244d445bec..e4970a3a98 100644 --- a/pkg/commons/utils.go +++ b/pkg/commons/utils.go @@ -72,6 +72,10 @@ func EnsureSecretsFile(spec KeosSpec, vaultPassword string, clusterCredentials C helmRepository := clusterCredentials.HelmRepositoryCredentials github_token := clusterCredentials.GithubToken + if spec.InfraProvider == "gcp" || spec.ControlPlane.Managed { + credentials["region"] = spec.Region + } + _, err = os.Stat(secretPath) if err != nil { secretMap := map[string]interface{}{} @@ -240,7 +244,7 @@ func removeKey(nodes []*yaml.Node, key string) []*yaml.Node { return newNodes } -func ExecuteCommand(n nodes.Node, command string, retries int, timeout int, envVars ...[]string) (string, error) { +func ExecuteCommand(n nodes.Node, command string, timeout int, retries int, envVars ...[]string) (string, error) { var err error var raw bytes.Buffer cmd := n.Command("sh", "-c", command) diff --git a/stratio-docs/en/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json b/stratio-docs/en/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json index d199167d01..0055ed0edf 100644 --- a/stratio-docs/en/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json +++ b/stratio-docs/en/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json @@ -7,6 +7,7 @@ "ec2:CreateTags", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", "ec2:DescribeImages", "ec2:DescribeInstances", "ec2:DescribeInstanceTypes", diff --git a/stratio-docs/en/modules/ROOT/assets/attachments/stratio-eks-policy.json b/stratio-docs/en/modules/ROOT/assets/attachments/stratio-eks-policy.json index 3d2d0b45c6..d40fd72011 100644 --- a/stratio-docs/en/modules/ROOT/assets/attachments/stratio-eks-policy.json +++ b/stratio-docs/en/modules/ROOT/assets/attachments/stratio-eks-policy.json @@ -14,6 +14,7 @@ "ec2:DisassociateRouteTable", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", "ec2:DescribeImages", "ec2:DescribeInstances", "ec2:DescribeInstanceTypes", diff --git a/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc b/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc index 780daa8766..6cc62b80b7 100644 --- a/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc +++ b/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc @@ -6,14 +6,14 @@ | Chart | Version | Public image | Version | *Installed by Cluster API - CAPZ* -| *v1.11.4* +| *v1.12.4* | | | | | registry.k8s.io/cluster-api-azure/cluster-api-azure-controller -| v1.11.4 +| v1.12.4 | | diff --git a/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc b/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc index f0875933eb..7d00e95659 100644 --- a/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc +++ b/stratio-docs/en/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc @@ -6,14 +6,14 @@ | Chart | Version | Public image | Version | *Installed by Cluster API - CAPZ* -| *v1.11.4* +| *v1.12.4* | | | | | registry.k8s.io/cluster-api-azure/cluster-api-azure-controller -| v1.11.4 +| v1.12.4 | | diff --git a/stratio-docs/es/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json b/stratio-docs/es/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json index d199167d01..0055ed0edf 100644 --- a/stratio-docs/es/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json +++ b/stratio-docs/es/modules/ROOT/assets/attachments/stratio-aws-unmanaged-policy.json @@ -7,6 +7,7 @@ "ec2:CreateTags", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", "ec2:DescribeImages", "ec2:DescribeInstances", "ec2:DescribeInstanceTypes", diff --git a/stratio-docs/es/modules/ROOT/assets/attachments/stratio-eks-policy.json b/stratio-docs/es/modules/ROOT/assets/attachments/stratio-eks-policy.json index 3d2d0b45c6..d40fd72011 100644 --- a/stratio-docs/es/modules/ROOT/assets/attachments/stratio-eks-policy.json +++ b/stratio-docs/es/modules/ROOT/assets/attachments/stratio-eks-policy.json @@ -14,6 +14,7 @@ "ec2:DisassociateRouteTable", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", + "ec2:DescribeDhcpOptions", "ec2:DescribeImages", "ec2:DescribeInstances", "ec2:DescribeInstanceTypes", diff --git a/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc b/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc index 827a12468a..0883423b7f 100644 --- a/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc +++ b/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-aks-images.adoc @@ -6,14 +6,14 @@ | _Chart_ | Versión | Imagen pública | Versión | *Instalado por Cluster API - CAPZ* -| *v1.11.4* +| *v1.12.4* | | | | | registry.k8s.io/cluster-api-azure/cluster-api-azure-controller -| v1.11.4 +| v1.12.4 | | diff --git a/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc b/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc index 441e989342..af44e20d12 100644 --- a/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc +++ b/stratio-docs/es/modules/operations-manual/pages/offline-installation/azure-vms-images.adoc @@ -6,14 +6,14 @@ | _Chart_ | Versión | Imagen pública | Versión | *Instalado por Cluster API - CAPZ* -| *v1.11.4* +| *v1.12.4* | | | | | registry.k8s.io/cluster-api-azure/cluster-api-azure-controller -| v1.11.4 +| v1.12.4 | |