From 185d220fc3435139667da0bdba0e484cae80b4d9 Mon Sep 17 00:00:00 2001 From: Steve Hipwell Date: Fri, 16 Feb 2024 04:31:29 +0000 Subject: [PATCH] chore(chart): Updated release logic to use SemVer (#5561) Signed-off-by: Steve Hipwell --- .../actions/e2e/install-karpenter/action.yaml | 6 ++-- Makefile | 2 +- charts/karpenter/Chart.yaml | 20 ++++++++++- charts/karpenter/README.md.gotmpl | 2 +- hack/release/common.sh | 14 ++++---- hack/toolchain.sh | 2 +- .../preview/contributing/development-guide.md | 3 +- website/content/en/preview/faq.md | 4 +-- .../getting-started-with-karpenter/_index.md | 16 ++++----- .../scripts/step01-config.sh | 2 +- .../scripts/step02-create-cluster.sh | 8 ++--- .../scripts/step03-iam-cloud-formation.sh | 4 +-- .../scripts/step09-add-prometheus-grafana.sh | 4 +-- .../scripts/step16-delete-node.sh | 2 +- .../scripts/step17-cleanup.sh | 2 +- .../migrating-from-cas/_index.md | 4 +-- .../migrating-from-cas/scripts/step01-env.sh | 2 +- .../scripts/step03-node-policies.sh | 8 ++--- .../scripts/step04-controller-iam.sh | 6 ++-- .../scripts/step05-tag-subnets.sh | 8 ++--- .../scripts/step06-tag-security-groups.sh | 16 ++++----- .../scripts/step09-deploy.sh | 6 ++-- .../scripts/step12-scale-multiple-ng.sh | 6 ++-- .../scripts/step12-scale-single-ng.sh | 4 +-- .../en/preview/reference/cloudformation.md | 4 +-- website/content/en/preview/troubleshooting.md | 10 +++--- .../en/preview/upgrading/compatibility.md | 2 +- .../en/preview/upgrading/upgrade-guide.md | 36 +++++++++---------- website/hugo.yaml | 14 ++++---- 29 files changed, 118 insertions(+), 99 deletions(-) diff --git a/.github/actions/e2e/install-karpenter/action.yaml b/.github/actions/e2e/install-karpenter/action.yaml index edb7f0c6895c..a8463f054508 100644 --- a/.github/actions/e2e/install-karpenter/action.yaml +++ b/.github/actions/e2e/install-karpenter/action.yaml @@ -57,16 +57,16 @@ runs: aws eks update-kubeconfig --name "$CLUSTER_NAME" # Parse minor version to determine whether to enable the webhooks - RELEASE_VERSION_MINOR="${K8S_VERSION#*.}" + K8S_VERSION_MINOR="${K8S_VERSION#*.}" WEBHOOK_ENABLED=false - if (( RELEASE_VRESION_MINOR < 25 )); then + if (( K8S_VERSION_MINOR < 25 )); then WEBHOOK_ENABLED=true fi # Remove service account annotation when dropping support for 1.23 helm upgrade --install karpenter "oci://$ECR_ACCOUNT_ID.dkr.ecr.$ECR_REGION.amazonaws.com/karpenter/snapshot/karpenter" \ -n kube-system \ - --version "v0-$(git rev-parse HEAD)" \ + --version "0-$(git rev-parse HEAD)" \ --set logLevel=debug \ --set webhook.enabled=${WEBHOOK_ENABLED} \ --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::$ACCOUNT_ID:role/karpenter-irsa-$CLUSTER_NAME" \ diff --git a/Makefile b/Makefile index 437361eb8200..1257cfe3cfe3 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ HELM_OPTS ?= --set serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn=${K # CR for local builds of Karpenter KARPENTER_NAMESPACE ?= kube-system -KARPENTER_VERSION ?= $(shell git tag --sort=committerdate | tail -1) +KARPENTER_VERSION ?= $(shell git tag --sort=committerdate | tail -1 | cut -d"v" -f2) KO_DOCKER_REPO ?= ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/dev # Common Directories diff --git a/charts/karpenter/Chart.yaml b/charts/karpenter/Chart.yaml index cadda4114984..60da1f8e84c1 100644 --- a/charts/karpenter/Chart.yaml +++ b/charts/karpenter/Chart.yaml @@ -13,4 +13,22 @@ keywords: home: https://karpenter.sh/ icon: https://repository-images.githubusercontent.com/278480393/dab059c8-caa1-4b55-aaa7-3d30e47a5616 sources: - - https://github.com/aws/karpenter/ + - https://github.com/aws/karpenter-provider-aws/ +annotations: + artifacthub.io/alternativeName: karpenter-provider-aws + artifacthub.io/crds: | + - kind: EC2NodeClass + version: v1beta1 + name: ec2nodeclasses.karpenter.k8s.aws + displayName: EC2NodeClass + description: EC2NodeClass is the Schema for the EC2NodeClass API. + - kind: NodeClaim + version: v1beta1 + name: nodeclaims.karpenter.sh + displayName: NodeClaim + description: NodeClaim is the Schema for the NodeClaims API. + - kind: NodePool + version: v1beta1 + name: nodepools.karpenter.sh + displayName: NodePool + description: NodePool is the Schema for the NodePools API. diff --git a/charts/karpenter/README.md.gotmpl b/charts/karpenter/README.md.gotmpl index ed86390ec4df..19f7e7754dba 100644 --- a/charts/karpenter/README.md.gotmpl +++ b/charts/karpenter/README.md.gotmpl @@ -14,7 +14,7 @@ You can follow the detailed installation instruction in the [documentation](http ```bash helm upgrade --install --namespace karpenter --create-namespace \ karpenter oci://public.ecr.aws/karpenter/{{ template "chart.name" . }} \ - --version v{{ template "chart.version" . }} \ + --version {{ template "chart.version" . }} \ --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set settings.clusterName=${CLUSTER_NAME} \ --set settings.interruptionQueue=${CLUSTER_NAME} \ diff --git a/hack/release/common.sh b/hack/release/common.sh index 0e058134d596..2a80f15f9ba0 100644 --- a/hack/release/common.sh +++ b/hack/release/common.sh @@ -35,7 +35,7 @@ snapshot() { echo "Release Type: snapshot Release Version: ${RELEASE_VERSION} Commit: $(git rev-parse HEAD) -Helm Chart Version $(helmChartVersion $RELEASE_VERSION)" +Helm Chart Version $(helmChartVersion "${RELEASE_VERSION}")" authenticatePrivateRepo buildImages "${SNAPSHOT_REPO_ECR}" @@ -49,7 +49,7 @@ release() { echo "Release Type: stable Release Version: ${RELEASE_VERSION} Commit: $(git rev-parse HEAD) -Helm Chart Version $(helmChartVersion $RELEASE_VERSION)" +Helm Chart Version $(helmChartVersion "${RELEASE_VERSION}")" authenticate buildImages "${RELEASE_REPO_ECR}" @@ -88,20 +88,20 @@ releaseType(){ RELEASE_VERSION=$1 if [[ "${RELEASE_VERSION}" == v* ]]; then - echo $RELEASE_TYPE_STABLE + echo "${RELEASE_TYPE_STABLE}" else - echo $RELEASE_TYPE_SNAPSHOT + echo "${RELEASE_TYPE_SNAPSHOT}" fi } helmChartVersion(){ RELEASE_VERSION=$1 if [[ $(releaseType "$RELEASE_VERSION") == "$RELEASE_TYPE_STABLE" ]]; then - echo "$RELEASE_VERSION" + echo "${RELEASE_VERSION#v}" fi if [[ $(releaseType "$RELEASE_VERSION") == "$RELEASE_TYPE_SNAPSHOT" ]]; then - echo "v${CURRENT_MAJOR_VERSION}-${RELEASE_VERSION}" + echo "${CURRENT_MAJOR_VERSION}-${RELEASE_VERSION}" fi } @@ -131,7 +131,7 @@ publishHelmChart() { cd charts helm dependency update "${CHART_NAME}" helm lint "${CHART_NAME}" - helm package "${CHART_NAME}" --version "$HELM_CHART_VERSION" + helm package "${CHART_NAME}" --version "${HELM_CHART_VERSION}" helm push "${HELM_CHART_FILE_NAME}" "oci://${RELEASE_REPO}" rm "${HELM_CHART_FILE_NAME}" cd .. diff --git a/hack/toolchain.sh b/hack/toolchain.sh index 0ebbd86d5207..e1f89bc46bf8 100755 --- a/hack/toolchain.sh +++ b/hack/toolchain.sh @@ -33,7 +33,7 @@ kubebuilder() { sudo mkdir -p ${KUBEBUILDER_ASSETS} sudo chown "${USER}" ${KUBEBUILDER_ASSETS} arch=$(go env GOARCH) - ln -sf $(setup-envtest use -p path "${K8S_VERSION}" --arch="${arch}" --bin-dir="${KUBEBUILDER_ASSETS}")/* ${KUBEBUILDER_ASSETS} + ln -sf "$(setup-envtest use -p path "${K8S_VERSION}" --arch="${arch}" --bin-dir="${KUBEBUILDER_ASSETS}")"/* ${KUBEBUILDER_ASSETS} find $KUBEBUILDER_ASSETS # Install latest binaries for 1.25.x (contains CEL fix) diff --git a/website/content/en/preview/contributing/development-guide.md b/website/content/en/preview/contributing/development-guide.md index e803169d6c5a..2afbb4d2ead7 100644 --- a/website/content/en/preview/contributing/development-guide.md +++ b/website/content/en/preview/contributing/development-guide.md @@ -73,7 +73,8 @@ make test # E2E correctness tests ### Change Log Level -By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your helm values. +By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your Helm values. + ```bash --set logLevel=debug ``` diff --git a/website/content/en/preview/faq.md b/website/content/en/preview/faq.md index 23cb4f03aa84..e8c6ad455bbf 100644 --- a/website/content/en/preview/faq.md +++ b/website/content/en/preview/faq.md @@ -26,7 +26,7 @@ Karpenter has multiple mechanisms for configuring the [operating system]({{< ref Karpenter is flexible to multi-architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}). ### What RBAC access is required? -All the required RBAC rules can be found in the helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/role.yaml) files for details. +All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/role.yaml) files for details. ### Can I run Karpenter outside of a Kubernetes cluster? Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API. @@ -202,7 +202,7 @@ Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes Karpenter requires proper permissions in the `KarpenterNode IAM Role` and the `KarpenterController IAM Role`. To upgrade Karpenter to version `$VERSION`, make sure that the `KarpenterNode IAM Role` and the `KarpenterController IAM Role` have the right permission described in `https://karpenter.sh/$VERSION/getting-started/getting-started-with-karpenter/cloudformation.yaml`. -Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the helm upgrade command. +Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the Helm upgrade command. {{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}} For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgrading/upgrade-guide/" >}}). diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md index 03a2c66af11a..d99ade42ba70 100644 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/_index.md @@ -44,9 +44,9 @@ authenticate properly by running `aws sts get-caller-identity`. After setting up the tools, set the Karpenter and Kubernetes version: ```bash -export KARPENTER_NAMESPACE=kube-system -export KARPENTER_VERSION=v0.34.0 -export K8S_VERSION={{< param "latest_k8s_version" >}} +export KARPENTER_NAMESPACE="kube-system" +export KARPENTER_VERSION="{{< param "latest_release_version" >}}" +export K8S_VERSION="{{< param "latest_k8s_version" >}}" ``` Then set the following environment variable: @@ -58,7 +58,7 @@ If you open a new shell to run steps in this procedure, you need to set some or To remind yourself of these values, type: ```bash -echo $KARPENTER_NAMESPACE $KARPENTER_VERSION $K8S_VERSION $CLUSTER_NAME $AWS_DEFAULT_REGION $AWS_ACCOUNT_ID $TEMPOUT +echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${K8S_VERSION}" "${CLUSTER_NAME}" "${AWS_DEFAULT_REGION}" "${AWS_ACCOUNT_ID}" "${TEMPOUT}" ``` {{% /alert %}} @@ -75,7 +75,7 @@ The following cluster configuration will: * Use [AWS EKS managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for the kube-system and karpenter namespaces. Uncomment fargateProfiles settings (and comment out managedNodeGroups settings) to use Fargate for both namespaces instead. * Set KARPENTER_IAM_ROLE_ARN variables. * Create a role to allow spot instances. -* Run helm to install karpenter +* Run Helm to install Karpenter {{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}} @@ -97,11 +97,11 @@ If you need Karpenter to manage the DNS service pods' capacity, this means that {{% /alert %}} {{% alert title="Common Expression Language/Webhooks Notice" color="warning" %}} -Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter helm chart. +Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart. {{% /alert %}} {{% alert title="Pod Identity Supports Notice" color="warning" %}} -Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter helm chart. +Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter Helm chart. {{% /alert %}} {{% alert title="Warning" color="warning" %}} @@ -177,7 +177,7 @@ The section below covers advanced installation techniques for installing Karpent ### Private Clusters -You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` helm chart. +You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` Helm chart. ```bash privateCluster: diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step01-config.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step01-config.sh index a3af512d02ac..bbaa418df349 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step01-config.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step01-config.sh @@ -2,4 +2,4 @@ export AWS_PARTITION="aws" # if you are not using standard partitions, you may n export CLUSTER_NAME="${USER}-karpenter-demo" export AWS_DEFAULT_REGION="us-west-2" export AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" -export TEMPOUT=$(mktemp) +export TEMPOUT="$(mktemp)" diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index 7a7b57c05a4c..cdad96eeca23 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ @@ -26,7 +26,7 @@ iam: - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} ## Optionally run on fargate or on k8s 1.23 -# Pod Identity is not available on fargate +# Pod Identity is not available on fargate # https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html # iam: # withOIDC: true @@ -67,7 +67,7 @@ addons: # - namespace: "${KARPENTER_NAMESPACE}" EOF -export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)" +export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.endpoint" --output text)" export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" -echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN +echo "${CLUSTER_ENDPOINT} ${KARPENTER_IAM_ROLE_ARN}" diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh index b8e610f7bee6..54e826db269b 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh @@ -1,6 +1,6 @@ -TEMPOUT=$(mktemp) +TEMPOUT="$(mktemp)" -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh index a539f7491e10..ddb4cb02e7f7 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh @@ -4,8 +4,8 @@ helm repo update kubectl create namespace monitoring -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | tee prometheus-values.yaml +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | tee prometheus-values.yaml helm install --namespace monitoring prometheus prometheus-community/prometheus --values prometheus-values.yaml -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml helm install --namespace monitoring grafana grafana-charts/grafana --values grafana-values.yaml diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh index 9d431160dda0..93b1ac792ade 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh @@ -1 +1 @@ -kubectl delete node $NODE_NAME +kubectl delete node "${NODE_NAME}" diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh index dc84b673389f..f694ac3797f4 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh @@ -1,6 +1,6 @@ helm uninstall karpenter --namespace "${KARPENTER_NAMESPACE}" aws cloudformation delete-stack --stack-name "Karpenter-${CLUSTER_NAME}" -aws ec2 describe-launch-templates --filters Name=tag:karpenter.k8s.aws/cluster,Values=${CLUSTER_NAME} | +aws ec2 describe-launch-templates --filters "Name=tag:karpenter.k8s.aws/cluster,Values=${CLUSTER_NAME}" | jq -r ".LaunchTemplates[].LaunchTemplateName" | xargs -I{} aws ec2 delete-launch-template --launch-template-name {} eksctl delete cluster --name "${CLUSTER_NAME}" diff --git a/website/content/en/preview/getting-started/migrating-from-cas/_index.md b/website/content/en/preview/getting-started/migrating-from-cas/_index.md index cfd3b82fec51..24ab981ebbfc 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/_index.md +++ b/website/content/en/preview/getting-started/migrating-from-cas/_index.md @@ -92,10 +92,10 @@ One for your Karpenter node role and one for your existing node group. First set the Karpenter release you want to deploy. ```bash -export KARPENTER_VERSION={{< param "latest_release_version" >}} +export KARPENTER_VERSION="{{< param "latest_release_version" >}}" ``` -We can now generate a full Karpenter deployment yaml from the helm chart. +We can now generate a full Karpenter deployment yaml from the Helm chart. {{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh" language="bash" %}} diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step01-env.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step01-env.sh index 20645685137b..f456eddc75ee 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step01-env.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step01-env.sh @@ -1,6 +1,6 @@ AWS_PARTITION="aws" # if you are not using standard partitions, you may need to configure to aws-cn / aws-us-gov AWS_REGION="$(aws configure list | grep region | tr -s " " | cut -d" " -f3)" -OIDC_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} \ +OIDC_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" \ --query "cluster.identity.oidc.issuer" --output text)" AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' \ --output text) diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step03-node-policies.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step03-node-policies.sh index d57f79039d04..fbc5455e541b 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step03-node-policies.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step03-node-policies.sh @@ -1,11 +1,11 @@ aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy" aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy" aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore" diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh index 70c59e826393..cc3d7f929986 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh @@ -19,7 +19,7 @@ cat << EOF > controller-trust-policy.json } EOF -aws iam create-role --role-name KarpenterControllerRole-${CLUSTER_NAME} \ +aws iam create-role --role-name "KarpenterControllerRole-${CLUSTER_NAME}" \ --assume-role-policy-document file://controller-trust-policy.json cat << EOF > controller-policy.json @@ -138,6 +138,6 @@ cat << EOF > controller-policy.json } EOF -aws iam put-role-policy --role-name KarpenterControllerRole-${CLUSTER_NAME} \ - --policy-name KarpenterControllerPolicy-${CLUSTER_NAME} \ +aws iam put-role-policy --role-name "KarpenterControllerRole-${CLUSTER_NAME}" \ + --policy-name "KarpenterControllerPolicy-${CLUSTER_NAME}" \ --policy-document file://controller-policy.json diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh index de972ea2bddd..47df188dc87d 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh @@ -1,6 +1,6 @@ -for NODEGROUP in $(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ - --query 'nodegroups' --output text); do aws ec2 create-tags \ +for NODEGROUP in $(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" --query 'nodegroups' --output text); do + aws ec2 create-tags \ --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ - --resources $(aws eks describe-nodegroup --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name $NODEGROUP --query 'nodegroup.subnets' --output text ) + --resources "$(aws eks describe-nodegroup --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" --query 'nodegroup.subnets' --output text )" done diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh index 397e40904cee..c63bde3b78dc 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh @@ -1,22 +1,22 @@ -NODEGROUP=$(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ +NODEGROUP=$(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" \ --query 'nodegroups[0]' --output text) -LAUNCH_TEMPLATE=$(aws eks describe-nodegroup --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} --query 'nodegroup.launchTemplate.{id:id,version:version}' \ +LAUNCH_TEMPLATE=$(aws eks describe-nodegroup --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" --query 'nodegroup.launchTemplate.{id:id,version:version}' \ --output text | tr -s "\t" ",") # If your EKS setup is configured to use only Cluster security group, then please execute - SECURITY_GROUPS=$(aws eks describe-cluster \ - --name ${CLUSTER_NAME} --query "cluster.resourcesVpcConfig.clusterSecurityGroupId" --output text) + --name "${CLUSTER_NAME}" --query "cluster.resourcesVpcConfig.clusterSecurityGroupId" --output text) # If your setup uses the security groups in the Launch template of a managed node group, then : -SECURITY_GROUPS=$(aws ec2 describe-launch-template-versions \ - --launch-template-id ${LAUNCH_TEMPLATE%,*} --versions ${LAUNCH_TEMPLATE#*,} \ +SECURITY_GROUPS="$(aws ec2 describe-launch-template-versions \ + --launch-template-id "${LAUNCH_TEMPLATE%,*}" --versions "${LAUNCH_TEMPLATE#*,}" \ --query 'LaunchTemplateVersions[0].LaunchTemplateData.[NetworkInterfaces[0].Groups||SecurityGroupIds]' \ - --output text) + --output text)" aws ec2 create-tags \ --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ - --resources ${SECURITY_GROUPS} + --resources "${SECURITY_GROUPS}" diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step09-deploy.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step09-deploy.sh index 51714d78f6dd..e46742fd22ea 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step09-deploy.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step09-deploy.sh @@ -1,8 +1,8 @@ kubectl create namespace "${KARPENTER_NAMESPACE}" || true kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodepools.yaml + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodepools.yaml" kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml" kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodeclaims.yaml + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodeclaims.yaml" kubectl apply -f karpenter.yaml diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh index d88a6d5c7236..a28c9759bcdb 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh @@ -1,5 +1,5 @@ -for NODEGROUP in $(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ - --query 'nodegroups' --output text); do aws eks update-nodegroup-config --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} \ +for NODEGROUP in $(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" \ + --query 'nodegroups' --output text); do aws eks update-nodegroup-config --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" \ --scaling-config "minSize=1,maxSize=1,desiredSize=1" done diff --git a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh index 51ad964c28a7..efce68394779 100644 --- a/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh +++ b/website/content/en/preview/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh @@ -1,3 +1,3 @@ -aws eks update-nodegroup-config --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} \ +aws eks update-nodegroup-config --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" \ --scaling-config "minSize=2,maxSize=2,desiredSize=2" diff --git a/website/content/en/preview/reference/cloudformation.md b/website/content/en/preview/reference/cloudformation.md index 707349166ac3..1a984bbdb671 100644 --- a/website/content/en/preview/reference/cloudformation.md +++ b/website/content/en/preview/reference/cloudformation.md @@ -17,8 +17,8 @@ These descriptions should allow you to understand: To download a particular version of `cloudformation.yaml`, set the version and use `curl` to pull the file to your local system: ```bash -export KARPENTER_VERSION={{< param "latest_release_version" >}} -curl https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > cloudformation.yaml +export KARPENTER_VERSION="{{< param "latest_release_version" >}}" +curl https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > cloudformation.yaml ``` Following some header information, the rest of the `cloudformation.yaml` file describes the resources that CloudFormation deploys. diff --git a/website/content/en/preview/troubleshooting.md b/website/content/en/preview/troubleshooting.md index c33f04264e3b..7bc963e05115 100644 --- a/website/content/en/preview/troubleshooting.md +++ b/website/content/en/preview/troubleshooting.md @@ -29,7 +29,7 @@ Update the zap-logger-config "level" and restart the Karpenter pod(s) to enable #### Debug logging via Helm -You can enable debug logging during installation with helm by setting the option `logLevel`. +You can enable debug logging during installation with Helm by setting the option `logLevel`. ``` helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter \ @@ -89,18 +89,18 @@ To do so on AWS increase the `minimum` and `desired` parameters on the node grou ### Helm Error When Pulling the Chart -If Helm is showing an error when trying to install Karpenter helm charts: +If Helm is showing an error when trying to install Karpenter Helm charts: - Ensure you are using a newer Helm version, Helm started supporting OCI images since v3.8.0. - Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this - Verify that the image you are trying to pull actually exists in [gallery.ecr.aws/karpenter](https://gallery.ecr.aws/karpenter/karpenter) -- Sometimes Helm generates a generic error, you can add the --debug switch to any of the helm commands in this doc for more verbose error messages +- Sometimes Helm generates a generic error, you can add the --debug switch to any of the Helm commands in this doc for more verbose error messages - If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html) -- If you are receiving this error: `Error: failed to download "oci://public.ecr.aws/karpenter/karpenter" at version "0.17.0"`, then you need to prepend a `v` to the version number: `v0.17.0`. Before Karpenter moved to OCI helm charts (pre-v0.17.0), both `v0.16.0` and `0.16.0` would work, but OCI charts require an exact version match. +- If you are receiving this error: `Error: failed to download "oci://public.ecr.aws/karpenter/karpenter" at version "0.17.0"`, then you need to prepend a `v` to the version number: `v0.17.0`. Before Karpenter moved to OCI Helm charts (pre-v0.17.0), both `v0.16.0` and `0.16.0` would work, but OCI charts require an exact version match. ### Helm Error when installing the `karpenter-crd` chart -Karpenter v0.26.1+ introduced the `karpenter-crd` helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. +Karpenter v0.26.1+ introduced the `karpenter-crd` Helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. - In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run: diff --git a/website/content/en/preview/upgrading/compatibility.md b/website/content/en/preview/upgrading/compatibility.md index 296fd9475134..e2e8c55a3700 100644 --- a/website/content/en/preview/upgrading/compatibility.md +++ b/website/content/en/preview/upgrading/compatibility.md @@ -35,7 +35,7 @@ For more information on Karpenter's support for these keys, view [this tracking {{% /alert %}} {{% alert title="Note" color="warning" %}} -Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter helm chart. +Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart. {{% /alert %}} ## Compatibility issues diff --git a/website/content/en/preview/upgrading/upgrade-guide.md b/website/content/en/preview/upgrading/upgrade-guide.md index 4af14699c5f6..2b908fbbd36c 100644 --- a/website/content/en/preview/upgrading/upgrade-guide.md +++ b/website/content/en/preview/upgrading/upgrade-guide.md @@ -13,19 +13,19 @@ This guide contains information needed to upgrade to the latest release of Karpe ### CRD Upgrades Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are published: -* As an independent helm chart [karpenter-crd](https://gallery.ecr.aws/karpenter/karpenter-crd) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter-crd) that can be used by Helm to manage the lifecycle of these CRDs. To upgrade or install `karpenter-crd` run: +* As an independent Helm chart [karpenter-crd](https://gallery.ecr.aws/karpenter/karpenter-crd) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter-crd) that can be used by Helm to manage the lifecycle of these CRDs. To upgrade or install `karpenter-crd` run: ```bash KARPENTER_NAMESPACE=kube-system - helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version vx.y.z --namespace "${KARPENTER_NAMESPACE}" --create-namespace + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version x.y.z --namespace "${KARPENTER_NAMESPACE}" --create-namespace ``` {{% alert title="Note" color="warning" %}} If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} -* As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. +* As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. -In general, you can reapply the CRDs in the `crds` directory of the Karpenter helm chart: +In general, you can reapply the CRDs in the `crds` directory of the Karpenter Helm chart: ```shell kubectl apply -f https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef >}}pkg/apis/crds/karpenter.sh_nodepools.yaml @@ -69,7 +69,7 @@ v0.33.0+ _only_ supports Karpenter v1beta1 APIs and will not work with existing * `v0.33.x` disables mutating and validating webhooks by default in favor of using [Common Expression Language for CRD validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). The Common Expression Language Validation Feature [is enabled by default on EKS 1.25](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). If you are using Kubernetes version >= 1.25, no further action is required. If you are using a Kubernetes version below 1.25, you now need to set `DISABLE_WEBHOOK=false` in your container environment variables or `--set webhook.enabled=true` if using Helm. View the [Webhook Support Deprecated in Favor of CEL Section of the v1beta1 Migration Guide]({{}}). * `v0.33.x` drops support for passing settings through the `karpenter-global-settings` ConfigMap. You should pass settings through the container environment variables in the Karpenter deployment manifest. View the [Global Settings Section of the v1beta1 Migration Guide]({{}}) for more details. * `v0.33.x` enables `Drift=true` by default in the `FEATURE_GATES`. If you previously didn't enable the feature gate, Karpenter will now check if there is a difference between the desired state of your nodes declared in your NodePool and the actual state of your nodes. View the [Drift Section of Disruption Conceptual Docs]({{}}) for more details. -* `v0.33.x` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the helm values. If you are setting any values in the `logConfig` from the `v0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. +* `v0.33.x` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the Helm values. If you are setting any values in the `logConfig` from the `v0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. * `v0.33.x` change the default `LOG_LEVEL` from `debug` to `info` by default. If you are still enabling logging configuration through the `zap-logger-config`, no action is required. * `v0.33.x` drops support for comma delimited lists on tags for `SubnetSelectorTerm`, `SecurityGroupsSelectorTerm`, and `AMISelectorTerm`. Karpenter now supports multiple terms for each of the selectors which means that we can specify a more explicit OR-based constraint through separate terms rather than a comma-delimited list of values. @@ -86,7 +86,7 @@ Note that if you are rolling back after upgrading to v0.32.0, note that v0.31.4 * Karpenter now serves the webhook prometheus metrics server on port `8001`. If this port is already in-use on the pod or you are running in `hostNetworking` mode, you may need to change this port value. You can configure this port value through the `WEBHOOK_METRICS_PORT` environment variable or the `webhook.metrics.port` value if installing via Helm. * Karpenter now exposes the ability to disable webhooks through the `webhook.enabled=false` value. This value will disable the webhook server and will prevent any permissions, mutating or validating webhook configurations from being deployed to the cluster. * Karpenter now moves all logging configuration for the Zap logger into the `logConfig` values block. Configuring Karpenter logging with this mechanism _is_ deprecated and will be dropped at v1. Karpenter now only surfaces logLevel through the `logLevel` helm value. If you need more advanced configuration due to log parsing constraints, we recommend configuring your log parser to handle Karpenter's Zap JSON logging. -* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the helm chart. If you were setting the value through `logEncoding`, this value will continue to work until v0.33.x but it is deprecated in favor of `logConfig.logEncoding` +* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the Helm chart. If you were setting the value through `logEncoding`, this value will continue to work until v0.33.x but it is deprecated in favor of `logConfig.logEncoding` * Karpenter now uses the `karpenter.sh/disruption:NoSchedule=disrupting` taint instead of the upstream `node.kubernetes.io/unschedulable` taint for nodes spawned with a NodePool to prevent pods from scheduling to nodes being disrupted. Pods that previously tolerated the `node.kubernetes.io/unschedulable` taint that previously weren't evicted during termination will now be evicted. This most notably affects DaemonSets, which have the `node.kubernetes.io/unschedulable` toleration by default, where Karpenter will now remove these pods during termination. If you want your specific pods to not be evicted when nodes are scaled down, you should add a toleration to the pods with the following: `Key=karpenter.sh/disruption, Effect=NoSchedule, Operator=Equals, Values=disrupting`. * Note: Karpenter will continue to use the old `node.kubernetes.io/unschedulable` taint for nodes spawned with a Provisioner. @@ -98,7 +98,7 @@ Note that if you are rolling back after upgrading to v0.32.0, note that v0.31.4 * Karpenter will now [statically drift]({{}}) on both Provisioner and AWSNodeTemplate Fields. For Provisioner Static Drift, the `karpenter.sh/provisioner-hash` annotation must be present on both the Provisioner and Machine. For AWSNodeTemplate drift, the `karpenter.k8s.aws/nodetemplate-hash` annotation must be present on the AWSNodeTemplate and Machine. Karpenter will not add these annotations to pre-existing nodes, so each of these nodes will need to be recycled one time for the annotations to be added. * Karpenter will now fail validation on AWSNodeTemplates and Provisioner `spec.provider` that have `amiSelectors`, `subnetSelectors`, or `securityGroupSelectors` set with a combination of id selectors (`aws-ids`, `aws::ids`) and other selectors. -* Karpenter now statically sets the `securityContext` at both the pod and container-levels and doesn't allow override values to be passed through the helm chart. This change was made to adhere to [Restricted Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted), which follows pod hardening best practices. +* Karpenter now statically sets the `securityContext` at both the pod and container-levels and doesn't allow override values to be passed through the Helm chart. This change was made to adhere to [Restricted Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted), which follows pod hardening best practices. {{% alert title="Note" color="primary" %}} If you have sidecar containers configured to run alongside Karpenter that cannot tolerate the [pod-wide `securityContext` constraints](https://github.com/aws/karpenter/blob/v0.30.0/charts/karpenter/templates/deployment.yaml#L40), you will need to specify overrides to the sidecar `securityContext` in your deployment. @@ -110,7 +110,7 @@ If you have sidecar containers configured to run alongside Karpenter that cannot Karpenter `v0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `>v0.29.2` fixes this leak. {{% /alert %}} -* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `v0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `v0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. +* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `v0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `v0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` Helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. * Karpenter will now reconcile nodes that are drifted due to their Security Groups or their Subnets. If your AWSNodeTemplate's Security Groups differ from the Security Groups used for an instance, Karpenter will consider it drifted. If the Subnet used by an instance is not contained in the allowed list of Subnets for an AWSNodeTemplate, Karpenter will also consider it drifted. * Since Karpenter uses tags for discovery of Subnets and SecurityGroups, check the [Threat Model]({{}}) to see how to manage this IAM Permission. @@ -121,7 +121,7 @@ Karpenter `v0.29.1` contains a [file descriptor and memory leak bug](https://git Karpenter `v0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `>v0.28.1` fixes this issue and is compatible with Kubernetes version 1.26+. {{% /alert %}} -* The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. +* The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter Helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. * The `aws.nodeNameConvention` setting is now removed from the [`karpenter-global-settings`]({{}}) ConfigMap. Because Karpenter is now driving its orchestration of capacity through Machines, it no longer needs to know the node name, making this setting obsolete. Karpenter ignores configuration that it doesn't recognize in the [`karpenter-global-settings`]({{}}) ConfigMap, so leaving the `aws.nodeNameConvention` in the ConfigMap will simply cause this setting to be ignored. * Karpenter now defines a set of "restricted tags" which can't be overridden with custom tagging in the AWSNodeTemplate or in the [`karpenter-global-settings`]({{}}) ConfigMap. If you are currently using any of these tag overrides when tagging your instances, webhook validation will now fail. These tags include: @@ -180,8 +180,8 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh ### Upgrading to v0.26.0+ * The `karpenter.sh/do-not-evict` annotation no longer blocks node termination when running `kubectl delete node`. This annotation on pods will only block automatic deprovisioning that is considered "voluntary," that is, disruptions that can be avoided. Disruptions that Karpenter deems as "involuntary" and will ignore the `karpenter.sh/do-not-evict` annotation include spot interruption and manual deletion of the node. See [Disabling Deprovisioning]({{}}) for more details. -* Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your helm values and are using Karpenter's defaults, you will now need to set these values in your helm chart deployment. -* The `controller.image` value in the helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. +* Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your Helm values and are using Karpenter's defaults, you will now need to set these values in your Helm chart deployment. +* The `controller.image` value in the Helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. ### Upgrading to v0.25.0+ * Cluster Endpoint can now be automatically discovered. If you are using Amazon Elastic Kubernetes Service (EKS), you can now omit the `clusterEndpoint` field in your configuration. In order to allow the resolving, you have to add the permission `eks:DescribeCluster` to the Karpenter Controller IAM role. @@ -198,7 +198,7 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh * Prior to v0.20.0, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. v0.20.0+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. ### Upgrading to v0.19.0+ -* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the helm chart. If your Karpenter installation (helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. +* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the Helm chart. If your Karpenter installation (Helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. * Karpenter now supports native interruption handling. If you were previously using Node Termination Handler for spot interruption handling and health events, you will need to remove the component from your cluster before enabling `aws.interruptionQueueName`. For more details on Karpenter's interruption handling, see the [Interruption Handling Docs]({{< ref "../concepts/disruption/#interruption" >}}). * Instance category defaults are now explicitly persisted in the Provisioner, rather than handled implicitly in memory. By default, Provisioners will limit instance category to c,m,r. If any instance type constraints are applied, it will override this default. If you have created Provisioners in the past with unconstrained instance type, family, or category, Karpenter will now more flexibly use instance types than before. If you would like to apply these constraints, they must be included in the Provisioner CRD. * Karpenter CRD raw YAML URLs have migrated from `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/charts/karpenter/crds/...` to `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/pkg/apis/crds/...`. If you reference static Karpenter CRDs or rely on `kubectl replace -f` to apply these CRDs from their remote location, you will need to migrate to the new location. @@ -219,11 +219,11 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh ### Upgrading to v0.17.0+ Karpenter's Helm chart package is now stored in [Karpenter's OCI (Open Container Initiative) registry](https://gallery.ecr.aws/karpenter/karpenter). The Helm CLI supports the new format since [v3.8.0+](https://helm.sh/docs/topics/registries/). -With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter helm charts look at [Install Karpenter Helm Chart]({{< ref "../getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). +With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter Helm charts look at [Install Karpenter Helm Chart]({{< ref "../getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). Users who have scripted the installation or upgrading of Karpenter need to adjust their scripts with the following changes: -1. There is no longer a need to add the Karpenter helm repo to helm -2. The full URL of the Helm chart needs to be present when using the helm commands +1. There is no longer a need to add the Karpenter Helm repo with `helm repo add` +2. The full URL of the Helm chart needs to be present when using the `helm` CLI 3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart, `v0.17.0`. ### Upgrading to v0.16.2+ @@ -270,14 +270,14 @@ aws ec2 delete-launch-template --launch-template-id ``` * v0.14.0 introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started -* v0.14.0 adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. +* v0.14.0 adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter Helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. * v0.14.0 deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. ### Upgrading to v0.13.0+ * v0.13.0 introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. v0.13.0 also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. - If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: + If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. 2. `kubectl delete crd awsnodetemplate` 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.13.2/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` @@ -287,7 +287,7 @@ aws ec2 delete-launch-template --launch-template-id ### Upgrading to v0.12.0+ * v0.12.0 adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. -* If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: +* If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. 2. `kubectl delete crd awsnodetemplate` 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.12.1/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` diff --git a/website/hugo.yaml b/website/hugo.yaml index 1c246e32a722..fd4ccfa685f6 100644 --- a/website/hugo.yaml +++ b/website/hugo.yaml @@ -46,8 +46,8 @@ imaging: quality: 75 anchor: smart params: - copyright: 'Amazon.com, Inc. or its affiliates.' - github_repo: 'https://github.com/aws/karpenter-provider-aws' + copyright: "Amazon.com, Inc. or its affiliates." + github_repo: "https://github.com/aws/karpenter-provider-aws" github_subdir: website github_branch: main images: @@ -69,13 +69,13 @@ params: links: developer: - name: GitHub - url: 'https://github.com/aws/karpenter-provider-aws' + url: "https://github.com/aws/karpenter-provider-aws" icon: fab fa-github desc: Development takes place here! - name: Slack - url: 'https://slack.k8s.io/' + url: "https://slack.k8s.io/" icon: fab fa-slack - desc: 'Chat with us on Slack in the #aws-provider channel' + desc: "Chat with us on Slack in the #aws-provider channel" latest_release_version: v0.34 latest_k8s_version: 1.29 versions: @@ -88,9 +88,9 @@ menu: main: - name: GitHub weight: 99 - url: 'https://github.com/aws/karpenter-provider-aws' + url: "https://github.com/aws/karpenter-provider-aws" pre: - name: Docs weight: 20 - url: 'docs' + url: "docs" pre: