From 57d33eede83e53998c648ac8f3cb21d72d3a5b56 Mon Sep 17 00:00:00 2001 From: Steve Hipwell Date: Fri, 16 Feb 2024 16:59:25 +0000 Subject: [PATCH 1/3] feat: Changed OCI tag format Signed-off-by: Steve Hipwell --- .ko.yaml | 3 + Makefile | 2 +- hack/release/common.sh | 267 +++++++++--------- hack/release/prepare-website.sh | 16 +- hack/release/release-crd.sh | 18 +- hack/release/release.sh | 22 +- hack/release/snapshot.sh | 12 +- hack/release/stable-pr.sh | 37 +-- website/content/en/preview/faq.md | 2 +- .../content/en/preview/reference/settings.md | 10 +- .../en/preview/reference/threat-model.md | 6 +- website/content/en/preview/troubleshooting.md | 24 +- .../en/preview/upgrading/compatibility.md | 15 +- .../en/preview/upgrading/upgrade-guide.md | 166 ++++++----- 14 files changed, 307 insertions(+), 293 deletions(-) diff --git a/.ko.yaml b/.ko.yaml index aafa60d0e357..7a663228cd39 100644 --- a/.ko.yaml +++ b/.ko.yaml @@ -1 +1,4 @@ defaultBaseImage: public.ecr.aws/eks-distro-build-tooling/eks-distro-minimal-base +defaultPlatforms: + - linux/arm64 + - linux/amd64 diff --git a/Makefile b/Makefile index 2457789ec704..1c9440021654 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ CLUSTER_NAME ?= $(shell kubectl config view --minify -o jsonpath='{.clusters[].name}' | rev | cut -d"/" -f1 | rev | cut -d"." -f1) ## Inject the app version into operator.Version -LDFLAGS ?= -ldflags=-X=sigs.k8s.io/karpenter/pkg/operator.Version=$(shell git describe --tags --always) +LDFLAGS ?= -ldflags=-X=sigs.k8s.io/karpenter/pkg/operator.Version=$(shell git describe --tags --always | cut -d"v" -f2) GOFLAGS ?= $(LDFLAGS) WITH_GOFLAGS = GOFLAGS="$(GOFLAGS)" diff --git a/hack/release/common.sh b/hack/release/common.sh index 04c8e3e9e9e0..1f94dff5bd8c 100644 --- a/hack/release/common.sh +++ b/hack/release/common.sh @@ -1,61 +1,48 @@ #!/usr/bin/env bash set -euo pipefail -config(){ - GITHUB_ACCOUNT="aws" - ECR_GALLERY_NAME="karpenter" - RELEASE_REPO_ECR=${RELEASE_REPO_ECR:-public.ecr.aws/${ECR_GALLERY_NAME}/} - RELEASE_REPO_GH=${RELEASE_REPO_GH:-ghcr.io/${GITHUB_ACCOUNT}/karpenter} +GITHUB_ACCOUNT="aws" +ECR_GALLERY_NAME="karpenter" +RELEASE_REPO_ECR="${RELEASE_REPO_ECR:-public.ecr.aws/${ECR_GALLERY_NAME}/}" +RELEASE_REPO_GH="${RELEASE_REPO_GH:-ghcr.io/${GITHUB_ACCOUNT}/karpenter}" - SNAPSHOT_ECR="021119463062.dkr.ecr.us-east-1.amazonaws.com" - SNAPSHOT_REPO_ECR=${SNAPSHOT_REPO_ECR:-${SNAPSHOT_ECR}/karpenter/snapshot/} +SNAPSHOT_ECR="021119463062.dkr.ecr.us-east-1.amazonaws.com" +SNAPSHOT_REPO_ECR="${SNAPSHOT_REPO_ECR:-${SNAPSHOT_ECR}/karpenter/snapshot/}" - CURRENT_MAJOR_VERSION="0" - RELEASE_PLATFORM="--platform=linux/amd64,linux/arm64" +CURRENT_MAJOR_VERSION="0" - MAIN_GITHUB_ACCOUNT="aws" - RELEASE_TYPE_STABLE="stable" - RELEASE_TYPE_SNAPSHOT="snapshot" -} - -# versionData sets all the version properties for the passed release version. It sets the values -# RELEASE_VERSION_MAJOR, RELEASE_VERSION_MINOR, and RELEASE_VERSION_PATCH to be used by other scripts -versionData(){ - local VERSION="$1" - local VERSION="${VERSION#[vV]}" - RELEASE_VERSION_MAJOR="${VERSION%%\.*}" - RELEASE_VERSION_MINOR="${VERSION#*.}" - RELEASE_VERSION_MINOR="${RELEASE_VERSION_MINOR%.*}" - RELEASE_VERSION_PATCH="${VERSION##*.}" - RELEASE_MINOR_VERSION="v${RELEASE_VERSION_MAJOR}.${RELEASE_VERSION_MINOR}" -} +MAIN_GITHUB_ACCOUNT="aws" snapshot() { - RELEASE_VERSION=$1 + local commit_sha version helm_chart_version + + commit_sha="${1}" + version="${commit_sha}" + helm_chart_version="${CURRENT_MAJOR_VERSION}-${commit_sha}" + echo "Release Type: snapshot -Release Version: ${RELEASE_VERSION} -Commit: $(git rev-parse HEAD) -Helm Chart Version $(helmChartVersion "${RELEASE_VERSION}")" +Release Version: ${version} +Commit: ${commit_sha} +Helm Chart Version ${helm_chart_version}" authenticatePrivateRepo - buildImages "${SNAPSHOT_REPO_ECR}" - cosignImages - publishHelmChart "karpenter" "${RELEASE_VERSION}" "${SNAPSHOT_REPO_ECR}" - publishHelmChart "karpenter-crd" "${RELEASE_VERSION}" "${SNAPSHOT_REPO_ECR}" + build "${SNAPSHOT_REPO_ECR}" "${version}" "${helm_chart_version}" "${commit_sha}" } release() { - RELEASE_VERSION=$1 + local commit_sha version helm_chart_version + + commit_sha="${1}" + version="${2}" + helm_chart_version="${version}" + echo "Release Type: stable -Release Version: ${RELEASE_VERSION} -Commit: $(git rev-parse HEAD) -Helm Chart Version $(helmChartVersion "${RELEASE_VERSION}")" +Release Version: ${version} +Commit: ${commit_sha} +Helm Chart Version ${helm_chart_version}" authenticate - buildImages "${RELEASE_REPO_ECR}" - cosignImages - publishHelmChart "karpenter" "${RELEASE_VERSION}" "${RELEASE_REPO_ECR}" - publishHelmChart "karpenter-crd" "${RELEASE_VERSION}" "${RELEASE_REPO_ECR}" + build "${RELEASE_REPO_ECR}" "${version}" "${helm_chart_version}" "${commit_sha}" } authenticate() { @@ -66,136 +53,158 @@ authenticatePrivateRepo() { aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "${SNAPSHOT_ECR}" } -buildImages() { - RELEASE_REPO=$1 - # Set the SOURCE_DATE_EPOCH and KO_DATA_DATE_EPOCH values for reproducable builds with timestamps - # https://ko.build/advanced/faq/ - CONTROLLER_IMG=$(GOFLAGS=${GOFLAGS} SOURCE_DATE_EPOCH=$(git log -1 --format='%ct') KO_DATA_DATE_EPOCH=$(git log -1 --format='%ct') KO_DOCKER_REPO=${RELEASE_REPO} ko publish -B -t "${RELEASE_VERSION}" "${RELEASE_PLATFORM}" ./cmd/controller) - HELM_CHART_VERSION=$(helmChartVersion "$RELEASE_VERSION") - IMG_REPOSITORY=$(echo "$CONTROLLER_IMG" | cut -d "@" -f 1 | cut -d ":" -f 1) - IMG_TAG=$(echo "$CONTROLLER_IMG" | cut -d "@" -f 1 | cut -d ":" -f 2 -s) - IMG_DIGEST=$(echo "$CONTROLLER_IMG" | cut -d "@" -f 2) - yq e -i ".controller.image.repository = \"${IMG_REPOSITORY}\"" charts/karpenter/values.yaml - yq e -i ".controller.image.tag = \"${IMG_TAG}\"" charts/karpenter/values.yaml - yq e -i ".controller.image.digest = \"${IMG_DIGEST}\"" charts/karpenter/values.yaml - yq e -i ".appVersion = \"${RELEASE_VERSION#v}\"" charts/karpenter/Chart.yaml - yq e -i ".version = \"${HELM_CHART_VERSION#v}\"" charts/karpenter/Chart.yaml - yq e -i ".appVersion = \"${RELEASE_VERSION#v}\"" charts/karpenter-crd/Chart.yaml - yq e -i ".version = \"${HELM_CHART_VERSION#v}\"" charts/karpenter-crd/Chart.yaml -} +build() { + local oci_repo version helm_chart_version commit_sha date_epoch build_date img img_repo img_tag img_digest -releaseType(){ - RELEASE_VERSION=$1 + oci_repo="${1}" + version="${2}" + helm_chart_version="${3}" + commit_sha="${4}" - if [[ "${RELEASE_VERSION}" == v* ]]; then - echo "${RELEASE_TYPE_STABLE}" - else - echo "${RELEASE_TYPE_SNAPSHOT}" - fi + date_epoch="$(dateEpoch)" + build_date="$(buildDate "${date_epoch}")" + + img="$(GOFLAGS=${GOFLAGS:-} SOURCE_DATE_EPOCH="${date_epoch}" KO_DATA_DATE_EPOCH="${date_epoch}" KO_DOCKER_REPO="${oci_repo}" ko publish -B -t "${version}" ./cmd/controller)" + img_repo="$(echo "${img}" | cut -d "@" -f 1 | cut -d ":" -f 1)" + img_tag="$(echo "${img}" | cut -d "@" -f 1 | cut -d ":" -f 2 -s)" + img_digest="$(echo "${img}" | cut -d "@" -f 2)" + + cosignOciArtifact "${version}" "${commit_sha}" "${build_date}" "${img}" + + yq e -i ".controller.image.repository = \"${img_repo}\"" charts/karpenter/values.yaml + yq e -i ".controller.image.tag = \"${img_tag}\"" charts/karpenter/values.yaml + yq e -i ".controller.image.digest = \"${img_digest}\"" charts/karpenter/values.yaml + + publishHelmChart "${oci_repo}" "karpenter" "${helm_chart_version}" "${commit_sha}" "${build_date}" + publishHelmChart "${oci_repo}" "karpenter-crd" "${helm_chart_version}" "${commit_sha}" "${build_date}" } -helmChartVersion(){ - RELEASE_VERSION=$1 - if [[ $(releaseType "$RELEASE_VERSION") == "$RELEASE_TYPE_STABLE" ]]; then - echo "${RELEASE_VERSION#v}" - fi +publishHelmChart() { + local oci_repo helm_chart version commit_sha build_date ah_config_file_name helm_chart_artifact helm_chart_digest + + oci_repo="${1}" + helm_chart="${2}" + version="${3}" + commit_sha="${4}" + build_date="${5}" + + ah_config_file_name"${helm_chart}/artifacthub-repo.yaml" + helm_chart_artifact="${helm_chart}-${version}.tgz" - if [[ $(releaseType "$RELEASE_VERSION") == "$RELEASE_TYPE_SNAPSHOT" ]]; then - echo "${CURRENT_MAJOR_VERSION}-${RELEASE_VERSION}" - fi + yq e -i ".appVersion = \"${version}\"" "charts/${helm_chart}/Chart.yaml" + yq e -i ".version = \"${version}\"" "charts/${helm_chart}/Chart.yaml" + + cd charts + [[ -s "${ah_config_file_name}" ]] && oras push "${oci_repo}:artifacthub.io" --config /dev/null:application/vnd.cncf.artifacthub.config.v1+yaml "${ah_config_file_name}:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml" + helm dependency update "${helm_chart}" + helm lint "${helm_chart}" + helm package "${helm_chart}" --version "${version}" + helm push "${helm_chart_artifact}" "oci://${oci_repo}" + rm "${helm_chart_artifact}" + cd .. + + helm_chart_digest="$(crane digest "${oci_repo}:${version}")" + cosignOciArtifact "${version}" "${commit_sha}" "${build_date}" "${oci_repo}:${version}@${helm_chart_digest}" } -buildDate(){ - # Set the SOURCE_DATE_EPOCH and KO_DATA_DATE_EPOCH values for reproducable builds with timestamps - # https://ko.build/advanced/faq/ - DATE_FMT="+%Y-%m-%dT%H:%M:%SZ" - SOURCE_DATE_EPOCH=$(git log -1 --format='%ct') - echo "$(date -u -r "${SOURCE_DATE_EPOCH}" $DATE_FMT 2>/dev/null)" +cosignOciArtifact() { + local version commit_sha build_date artifact + + version="${1}" + commit_sha="${2}" + build_date="${3}" + artifact="${4}" + + cosign sign --yes -a version="${version}" -a commitSha="${commit_sha}" -a buildDate="${build_date}" "${artifact}" } -cosignImages() { - cosign sign --yes \ - -a GIT_HASH="$(git rev-parse HEAD)" \ - -a GIT_VERSION="${RELEASE_VERSION}" \ - -a BUILD_DATE="$(buildDate)" \ - "${CONTROLLER_IMG}" +dateEpoch() { + git log -1 --format='%ct' } -publishHelmChart() { - CHART_NAME=$1 - RELEASE_VERSION=$2 - RELEASE_REPO=$3 - HELM_CHART_VERSION=$(helmChartVersion "$RELEASE_VERSION") - HELM_CHART_FILE_NAME="${CHART_NAME}-${HELM_CHART_VERSION}.tgz" - AH_CONFIG_FILE_NAME="${CHART_NAME}/artifacthub-repo.yaml" - - cd charts - [[ -s "${AH_CONFIG_FILE_NAME}" ]] && oras push "${RELEASE_REPO}:artifacthub.io" --config /dev/null:application/vnd.cncf.artifacthub.config.v1+yaml "${AH_CONFIG_FILE_NAME}:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml" - helm dependency update "${CHART_NAME}" - helm lint "${CHART_NAME}" - helm package "${CHART_NAME}" --version "${HELM_CHART_VERSION}" - helm push "${HELM_CHART_FILE_NAME}" "oci://${RELEASE_REPO}" - rm "${HELM_CHART_FILE_NAME}" - cd .. - - cosignHelmChart "${RELEASE_REPO}${CHART_NAME}" "${HELM_CHART_VERSION}" +buildDate() { + local date_epoch + + date_epoch="${1}" + + date -u -r "${date_epoch}" "+%Y-%m-%dT%H:%M:%SZ" 2>/dev/null } -cosignHelmChart() { - RELEASE_REPO=$1 - HELM_CHART_VERSION=$2 - digest="$(crane digest "${RELEASE_REPO}:${HELM_CHART_VERSION}")" - cosign sign --yes "${RELEASE_REPO}:${HELM_CHART_VERSION}@${digest}" +prepareWebsite() { + local version version_parts short_version + + version="${1}" + # shellcheck disable=SC2206 + version_parts=(${version//./ }) + short_version="${version_parts[0]}.${version_parts[1]}" + + createNewWebsiteDirectory "${short_version}" + removeOldWebsiteDirectories + editWebsiteConfig "${version}" + editWebsiteVersionsMenu } createNewWebsiteDirectory() { - RELEASE_VERSION=$1 - versionData "${RELEASE_VERSION}" + local short_version version - mkdir -p "website/content/en/${RELEASE_MINOR_VERSION}" - cp -r website/content/en/preview/* "website/content/en/${RELEASE_MINOR_VERSION}/" + short_version="${1}" + version="${2}" - # Update parameterized variables in the preview documentation to be statically set in the versioned documentation - find "website/content/en/${RELEASE_MINOR_VERSION}/" -type f | xargs perl -i -p -e "s/{{< param \"latest_release_version\" >}}/${RELEASE_VERSION}/g;" - find "website/content/en/${RELEASE_MINOR_VERSION}/" -type f | xargs perl -i -p -e "s/{{< param \"latest_k8s_version\" >}}/$(yq .params.latest_k8s_version website/hugo.yaml)/g;" - find website/content/en/${RELEASE_MINOR_VERSION}/*/*/*.yaml -type f | xargs perl -i -p -e "s/preview/${RELEASE_MINOR_VERSION}/g;" - find "website/content/en/${RELEASE_MINOR_VERSION}/" -type f | xargs perl -i -p -e "s/{{< githubRelRef >}}/\/${RELEASE_VERSION}\//g;" + mkdir -p "website/content/en/v${short_version}" + cp -r website/content/en/preview/* "website/content/en/v${short_version}/" - rm -rf website/content/en/docs - mkdir -p website/content/en/docs - cp -r website/content/en/${RELEASE_MINOR_VERSION}/* website/content/en/docs/ + # Update parameterized variables in the preview documentation to be statically set in the versioned documentation + # shellcheck disable=SC2038 + find "website/content/en/v${short_version}/" -type f -print | xargs perl -i -p -e "s/{{< param \"latest_release_version\" >}}/${version}/g;" + # shellcheck disable=SC2038 + find "website/content/en/v${short_version}/" -type f | xargs perl -i -p -e "s/{{< param \"latest_k8s_version\" >}}/$(yq .params.latest_k8s_version website/hugo.yaml)/g;" + # shellcheck disable=SC2038 + find "website/content/en/v${short_version}/"*/*/*.yaml -type f | xargs perl -i -p -e "s/preview/v${short_version}/g;" + # shellcheck disable=SC2038 + find "website/content/en/v${short_version}/" -type f | xargs perl -i -p -e "s/{{< githubRelRef >}}/\/v${version}\//g;" + + rm -rf website/content/en/docs + mkdir -p website/content/en/docs + cp -r "website/content/en/v${short_version}/"* website/content/en/docs/ } removeOldWebsiteDirectories() { - local n=3 + local n=3 last_n_versions all + # Get all the directories except the last n directories sorted from earliest to latest version # preview, docs, and v0.32 are special directories that we always propagate into the set of directory options # Keep the v0.32 version around while we are supporting v1beta1 migration # Drop it once we no longer want to maintain the v0.32 version in the docs - last_n_versions=$(find website/content/en/* -maxdepth 0 -type d -name "*" | grep -v "preview\|docs\|v0.32" | sort | tail -n "$n") + last_n_versions=$(find website/content/en/* -maxdepth 0 -type d -name "*" | grep -v "preview\|docs\|v0.32" | sort | tail -n "${n}") last_n_versions+=$(echo -e "\nwebsite/content/en/preview") last_n_versions+=$(echo -e "\nwebsite/content/en/docs") last_n_versions+=$(echo -e "\nwebsite/content/en/v0.32") all=$(find website/content/en/* -maxdepth 0 -type d -name "*") + ## symmetric difference - comm -3 <(sort <<< $last_n_versions) <(sort <<< $all) | tr -d '\t' | xargs -r -n 1 rm -r + # shellcheck disable=SC2086 + comm -3 <(sort <<< ${last_n_versions}) <(sort <<< ${all}) | tr -d '\t' | xargs -r -n 1 rm -r } editWebsiteConfig() { - RELEASE_VERSION=$1 - yq -i ".params.latest_release_version = \"${RELEASE_VERSION}\"" website/hugo.yaml + local version="${1}" + + yq -i ".params.latest_release_version = \"v${version}\"" website/hugo.yaml } # editWebsiteVersionsMenu sets relevant releases in the version dropdown menu of the website # without increasing the size of the set. # It uses the current version directories (ignoring the docs directory) to generate this list editWebsiteVersionsMenu() { - VERSIONS=($(find website/content/en/* -maxdepth 0 -type d -name "*" | xargs -r -n 1 basename | grep -v "docs\|preview")) - VERSIONS+=('preview') + local versions version + + # shellcheck disable=SC2207 + versions=($(find website/content/en/* -maxdepth 0 -type d -name "*" -print0 | xargs -r -n 1 basename | grep -v "docs\|preview")) + versions+=('preview') yq -i '.params.versions = []' website/hugo.yaml - for VERSION in "${VERSIONS[@]}"; do - yq -i ".params.versions += \"${VERSION}\"" website/hugo.yaml + for version in "${versions[@]}"; do + yq -i ".params.versions += \"${version}\"" website/hugo.yaml done } diff --git a/hack/release/prepare-website.sh b/hack/release/prepare-website.sh index 96b396b2bc84..80bfb8e41672 100755 --- a/hack/release/prepare-website.sh +++ b/hack/release/prepare-website.sh @@ -1,19 +1,15 @@ #!/usr/bin/env bash set -euo pipefail -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +# shellcheck source=hack/release/common.sh source "${SCRIPT_DIR}/common.sh" -config - -GIT_TAG=${GIT_TAG:-$(git describe --exact-match --tags || echo "none")} -if [[ $(releaseType "$GIT_TAG") != $RELEASE_TYPE_STABLE ]]; then +git_tag="${GIT_TAG:-$(git describe --exact-match --tags || echo "none")}" +if [[ "${git_tag}" != v* ]]; then echo "Not a stable release. Missing required git tag." exit 1 fi -echo "RenderingPrep website files for ${GIT_TAG}" +echo "RenderingPrep website files for ${git_tag}" -createNewWebsiteDirectory "$GIT_TAG" -removeOldWebsiteDirectories -editWebsiteConfig "$GIT_TAG" -editWebsiteVersionsMenu +prepareWebsite "${git_tag#v}" diff --git a/hack/release/release-crd.sh b/hack/release/release-crd.sh index fb3ab0a074ef..cced2a5d79a0 100755 --- a/hack/release/release-crd.sh +++ b/hack/release/release-crd.sh @@ -1,15 +1,17 @@ #!/usr/bin/env bash set -euo pipefail -HEAD_HASH=$(git rev-parse HEAD) -GIT_TAG=$(git describe --exact-match --tags || echo "no tag") - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +# shellcheck source=hack/release/common.sh source "${SCRIPT_DIR}/common.sh" -config -publishHelmChart "karpenter-crd" "${HEAD_HASH}" "${RELEASE_REPO_GH}" +commit_sha="$(git rev-parse HEAD)" +git_tag="$(git describe --exact-match --tags || echo "no tag")" + +BUILD_DATE="$(buildDate "$(dateEpoch)")" + +publishHelmChart "${RELEASE_REPO_GH}" "karpenter-crd" "${commit_sha}" "${commit_sha}" "${BUILD_DATE}" -if [[ $(releaseType $GIT_TAG) == $RELEASE_TYPE_STABLE ]]; then - publishHelmChart "karpenter-crd" "${GIT_TAG}" "${RELEASE_REPO_GH}" +if [[ "${git_tag}" == v* ]]; then + publishHelmChart "${RELEASE_REPO_GH}" "karpenter-crd" "${git_tag#v}" "${commit_sha}" "${BUILD_DATE}" fi diff --git a/hack/release/release.sh b/hack/release/release.sh index 2775e026fb4c..d058460eef9e 100755 --- a/hack/release/release.sh +++ b/hack/release/release.sh @@ -1,20 +1,20 @@ #!/usr/bin/env bash set -euo pipefail -GIT_TAG=$(git describe --exact-match --tags || echo "no tag") -if [[ "$GIT_TAG" == "no tag" ]]; then - echo "Failed to release: commit is untagged" - exit 1 -fi -HEAD_HASH=$(git rev-parse HEAD) - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +# shellcheck source=hack/release/common.sh source "${SCRIPT_DIR}/common.sh" -config + +git_tag="$(git describe --exact-match --tags || echo "no tag")" +if [[ "${git_tag}" == "no tag" ]]; then + echo "Failed to release: commit is untagged" + exit 1 +fi +commit_sha="$(git rev-parse HEAD)" # Don't release with a dirty commit! if [[ "$(git status --porcelain)" != "" ]]; then - exit 1 + exit 1 fi -release "$GIT_TAG" +release "${commit_sha}" "${git_tag#v}" diff --git a/hack/release/snapshot.sh b/hack/release/snapshot.sh index 3213f5bf3bb2..14626271950b 100755 --- a/hack/release/snapshot.sh +++ b/hack/release/snapshot.sh @@ -1,15 +1,15 @@ #!/usr/bin/env bash set -euo pipefail -HEAD_HASH=$(git rev-parse HEAD) - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +# shellcheck source=hack/release/common.sh source "${SCRIPT_DIR}/common.sh" -config + +commit_sha="$(git rev-parse HEAD)" # Don't release with a dirty commit! if [[ "$(git status --porcelain)" != "" ]]; then - exit 1 + exit 1 fi -snapshot "$HEAD_HASH" +snapshot "${commit_sha}" diff --git a/hack/release/stable-pr.sh b/hack/release/stable-pr.sh index 2be70caf724d..eee4a5d69282 100755 --- a/hack/release/stable-pr.sh +++ b/hack/release/stable-pr.sh @@ -1,42 +1,23 @@ #!/usr/bin/env bash set -euo pipefail -# updateKarpenterCoreGoMod bumps the karpenter-core go.mod to the release version so that the -# karpenter and karpenter-core release versions match -updateKarpenterCoreGoMod(){ - RELEASE_VERSION=$1 - if [[ $GITHUB_ACCOUNT != $MAIN_GITHUB_ACCOUNT ]]; then - echo "not updating go mod for a repo other than the main repo" - return - fi - go get -u "sigs.k8s.io/karpenter@${RELEASE_VERSION}" - cd test - go get -u "sigs.k8s.io/karpenter@${RELEASE_VERSION}" - cd .. - make tidy -} - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +# shellcheck source=hack/release/common.sh source "${SCRIPT_DIR}/common.sh" -config - -GIT_TAG=$(git describe --exact-match --tags || echo "none") -if [[ $(releaseType $GIT_TAG) != $RELEASE_TYPE_STABLE ]]; then +git_tag="$(git describe --exact-match --tags || echo "none")" +if [[ "${git_tag}" != v* ]]; then echo "Not a stable release. Missing required git tag." exit 1 fi -versionData "$GIT_TAG" -updateKarpenterCoreGoMod "$GIT_TAG" - git config user.name "StableRelease" git config user.email "StableRelease@users.noreply.github.com" -git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPO} +git remote set-url origin "https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPO}" git config pull.rebase false -BRANCH_NAME="release-${GIT_TAG}" -git checkout -b "${BRANCH_NAME}" +branch_name="release-${git_tag}" +git checkout -b "${branch_name}" git add go.mod git add go.sum git add hack/docs @@ -46,5 +27,5 @@ git add charts/karpenter/Chart.yaml git add charts/karpenter/Chart.lock git add charts/karpenter/values.yaml git add charts/karpenter/README.md -git commit -m "Stable Release updates Release ${GIT_TAG}." -git push --set-upstream origin "${BRANCH_NAME}" +git commit -m "Stable Release updates Release ${git_tag}." +git push --set-upstream origin "${branch_name}" diff --git a/website/content/en/preview/faq.md b/website/content/en/preview/faq.md index e8c6ad455bbf..4549a26bef94 100644 --- a/website/content/en/preview/faq.md +++ b/website/content/en/preview/faq.md @@ -211,7 +211,7 @@ For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgra ### How do I upgrade an EKS Cluster with Karpenter? -When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift is enabled by default starting v0.33.x. +When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift is enabled by default starting `0.33.0`. {{% alert title="Note" color="primary" %}} Karpenter's default [EC2NodeClass `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane, meaning that an upgrade of the control plane will cause Karpenter to auto-discover the new AMIs for that version. diff --git a/website/content/en/preview/reference/settings.md b/website/content/en/preview/reference/settings.md index 4150586483ea..8f916edc0eb1 100644 --- a/website/content/en/preview/reference/settings.md +++ b/website/content/en/preview/reference/settings.md @@ -43,11 +43,11 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. -| Feature | Default | Stage | Since | Until | -|-------------------------|---------|-------|---------|---------| -| Drift | false | Alpha | v0.21.x | v0.32.x | -| Drift | true | Beta | v0.33.x | | -| SpotToSpotConsolidation | false | Beta | v0.34.x | | +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|-------|----------|----------| +| Drift | false | Alpha | `0.21.0` | `0.32.x` | +| Drift | true | Beta | `0.33.0` | | +| SpotToSpotConsolidation | false | Beta | `0.34.0` | | ### Batching Parameters diff --git a/website/content/en/preview/reference/threat-model.md b/website/content/en/preview/reference/threat-model.md index 316ca5cff181..e316ac40788d 100644 --- a/website/content/en/preview/reference/threat-model.md +++ b/website/content/en/preview/reference/threat-model.md @@ -63,16 +63,16 @@ Karpenter has permissions to create and manage cloud instances. Karpenter has Ku ### Threat: Using EC2 CreateTag/DeleteTag Permissions to Orchestrate Instance Creation/Deletion -**Background**: As of v0.28.0, Karpenter creates a mapping between CloudProvider instances and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: +**Background**: As of `0.28.0`, Karpenter creates a mapping between CloudProvider instances and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: * `karpenter.sh/managed-by` * `karpenter.sh/nodepool` * `kubernetes.io/cluster/${CLUSTER_NAME}` -* `karpenter.sh/provisioner-name` (prior to `v0.32.0`) +* `karpenter.sh/provisioner-name` (prior to `0.32.0`) Any user that has the ability to Create/Delete tags on CloudProvider instances will have the ability to orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. -In addition, as of v0.29.0, Karpenter will Drift on Security Groups and Subnets. If a user has the Create/Delete tags permission for either of resources, they can orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. +In addition, as of `0.29.0`, Karpenter will Drift on Security Groups and Subnets. If a user has the Create/Delete tags permission for either of resources, they can orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. **Threat:** A Cluster Operator attempts to create or delete a tag on a resource discovered by Karpenter. If it has the ability to create a tag it can effectively create or delete CloudProvider instances associated with the tagged resources. diff --git a/website/content/en/preview/troubleshooting.md b/website/content/en/preview/troubleshooting.md index 7824b1f02b59..9bab69ef22ff 100644 --- a/website/content/en/preview/troubleshooting.md +++ b/website/content/en/preview/troubleshooting.md @@ -77,7 +77,7 @@ Info on whether there has been a change to the CRD between versions of Karpenter ### Unable to schedule pod due to insufficient node group instances -v0.16.0 changed the default replicas from 1 to 2. +`0.16.0` changed the default replicas from 1 to 2. Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/provisioner-name DoesNotExist requirement`) so it can't provision for the second Karpenter pod. @@ -91,16 +91,16 @@ To do so on AWS increase the `minimum` and `desired` parameters on the node grou If Helm is showing an error when trying to install Karpenter Helm charts: -- Ensure you are using a newer Helm version, Helm started supporting OCI images since v3.8.0. -- Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this +- Ensure you are using a newer Helm version, Helm started supporting OCI images since `3.8.0`. +- Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this. +- If you get an error like `Error: public.ecr.aws/karpenter/karpenter:0.34.0: not found` make sure you're adding a `v` prefix for Karpenter versions between `0.17.0` & `0.34.x`. - Verify that the image you are trying to pull actually exists in [gallery.ecr.aws/karpenter](https://gallery.ecr.aws/karpenter/karpenter) - Sometimes Helm generates a generic error, you can add the --debug switch to any of the Helm commands in this doc for more verbose error messages -- If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html) -- If you are receiving this error: `Error: failed to download "oci://public.ecr.aws/karpenter/karpenter" at version "0.17.0"`, then you need to prepend a `v` to the version number: `v0.17.0`. Before Karpenter moved to OCI Helm charts (pre-v0.17.0), both `v0.16.0` and `0.16.0` would work, but OCI charts require an exact version match. +- If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html). ### Helm Error when installing the `karpenter-crd` chart -Karpenter v0.26.1+ introduced the `karpenter-crd` Helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. +Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. - In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run: @@ -137,7 +137,7 @@ kubectl get nodes -ojsonpath='{range .items[*].metadata}{@.name}:{@.finalizers}{ If you are not able to create a provisioner due to `Internal error occurred: failed calling webhook "validation.webhook.provisioners.karpenter.sh":` -Webhooks were renamed in `v0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. +Webhooks were renamed in `0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. Delete the stale webhooks. @@ -148,7 +148,7 @@ kubectl delete validatingwebhookconfiguration validation.webhook.provisioners.ka ### Failed calling webhook "defaulting.webhook.karpenter.sh" -The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `v0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. +The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. ```text kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh @@ -192,11 +192,11 @@ Disabling swap will allow kubelet to join the cluster successfully, however user ### DaemonSets can result in deployment failures -For Karpenter versions 0.5.3 and earlier, DaemonSets were not properly considered when provisioning nodes. +For Karpenter versions `0.5.3` and earlier, DaemonSets were not properly considered when provisioning nodes. This sometimes caused nodes to be deployed that could not meet the needs of the requested DaemonSets and workloads. -This issue no longer occurs after Karpenter version 0.5.3 (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). +This issue no longer occurs after Karpenter version `0.5.3` (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). -If you are using a pre-0.5.3 version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. +If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. For more information, see [Issue #1084](https://github.com/aws/karpenter/issues/1084). Examples of this behavior are included in [Issue #1180](https://github.com/aws/karpenter/issues/1180). @@ -213,7 +213,7 @@ See the Karpenter [Best Practices Guide](https://aws.github.io/aws-eks-best-prac ### Missing subnetSelector and securityGroupSelector tags causes provisioning failures -Starting with Karpenter v0.5.5, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. +Starting with Karpenter `0.5.5`, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. The [Provisioner]({{}}) section in the Karpenter Getting Started Guide uses the following example: ```text diff --git a/website/content/en/preview/upgrading/compatibility.md b/website/content/en/preview/upgrading/compatibility.md index e2e8c55a3700..6c3adf8d4f1d 100644 --- a/website/content/en/preview/upgrading/compatibility.md +++ b/website/content/en/preview/upgrading/compatibility.md @@ -15,9 +15,9 @@ Before you begin upgrading Karpenter, consider Karpenter compatibility issues re [comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) -| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | -|------------|---------|---------|---------|---------|---------|---------|--------| -| karpenter | 0.21.x+ | 0.21.x+ | 0.25.x+ | 0.28.x+ | 0.28.x+ | 0.31.x+ | 0.34.0 | +| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | +| ---------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | +| karpenter | `0.21.0`+ | `0.21.0`+ | `0.25.0`+ | `0.28.0`+ | `0.28.0`+ | `0.31.0`+ | `0.34.0`+ | [comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) @@ -43,7 +43,7 @@ Karpenter supports using [Kubernetes Common Expression Language](https://kuberne When we introduce a breaking change, we do so only as described in this document. Karpenter follows [Semantic Versioning 2.0.0](https://semver.org/) in its stable release versions, while in -major version zero (v0.y.z) [anything may change at any time](https://semver.org/#spec-item-4). +major version zero (`0.y.z`) [anything may change at any time](https://semver.org/#spec-item-4). However, to further protect users during this phase we will only introduce breaking changes in minor releases (releases that increment y in x.y.z). Note this does not mean every minor upgrade has a breaking change as we will also increment the minor version when we release a new feature. @@ -55,7 +55,7 @@ Users should therefore check to see if there is a breaking change every time the When there is a breaking change we will: * Increment the minor version when in major version 0 -* Add a permanent separate section named `upgrading to vx.y.z+` under [release upgrade notes](#release-upgrade-notes) +* Add a permanent separate section named `upgrading to x.y.z+` under [release upgrade notes](#release-upgrade-notes) clearly explaining the breaking change and what needs to be done on the user side to ensure a safe upgrade * Add the sentence “This is a breaking change, please refer to the above link for upgrade instructions” to the top of the release notes and in all our announcements @@ -81,7 +81,7 @@ Karpenter offers three types of releases. This section explains the purpose of e Stable releases are the most reliable releases that are released with weekly cadence. Stable releases are our only recommended versions for production environments. Sometimes we skip a stable release because we find instability or problems that need to be fixed before having a stable release. -Stable releases are tagged with Semantic Versioning. For example `v0.13.0`. +Stable releases are tagged with a semantic version prefixed by a `v`. For example `v0.13.0`. ### Release Candidates @@ -93,7 +93,7 @@ By adopting this practice we allow our users who are early adopters to test out We release a snapshot release for every commit that gets merged into [`aws/karpenter-provider-aws`](https://www.github.com/aws/karpenter-provider-aws). This enables users to immediately try a new feature or fix right after it's merged rather than waiting days or weeks for release. Snapshot releases are not made available in the same public ECR repository as other release types, they are instead published to a separate private ECR repository. -Helm charts are published to `oci://{{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com/karpenter/snapshot/karpenter` and are tagged with the git commit hash prefixed by the Karpenter major version (e.g. `v0-fc17bfc89ebb30a3b102a86012b3e3992ec08adf`). +Helm charts are published to `oci://{{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com/karpenter/snapshot/karpenter` and are tagged with the git commit hash prefixed by the Karpenter major version (e.g. `0-fc17bfc89ebb30a3b102a86012b3e3992ec08adf`). Anyone with an AWS account can pull from this repository, but must first authenticate: ```bash @@ -103,4 +103,3 @@ aws ecr get-login-password --region {{< param "snapshot_repo.region" >}} | docke {{% alert title="Note" color="warning" %}} Snapshot releases are suitable for testing, and troubleshooting but they should not be used in production environments. Snapshot releases are ephemeral and will be removed 90 days after they were published. {{% /alert %}} - diff --git a/website/content/en/preview/upgrading/upgrade-guide.md b/website/content/en/preview/upgrading/upgrade-guide.md index df25afe28976..2c6f86930005 100644 --- a/website/content/en/preview/upgrading/upgrade-guide.md +++ b/website/content/en/preview/upgrading/upgrade-guide.md @@ -33,12 +33,22 @@ kubectl apply -f https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef kubectl apply -f https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef >}}pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml ``` -### Upgrading to v0.34.1+ + -[comment]: <> (WHEN CREATING A NEW SECTION OF THE UPGRADE GUIDANCE FOR NEWER VERSIONS, ENSURE THAT YOU COPY THE ALERT SECTION BELOW TO PROPERLY WARN USERS OF THE RISK OF UPGRADING WITHOUT GOING TO v0.32 FIRST) +### Upgrading to `0.35.0`+ {{% alert title="Warning" color="warning" %}} -v0.33.0+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to v0.33.0+ without first [upgrading to v0.32.x]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.35.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +* Karpenter OCI tags and Helm chart version are now valid semantic versions, meaning that the `v` prefix from the git tag has been removed and they now follow the `x.y.z` pattern. + +### Upgrading to `0.34.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.34.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. {{% /alert %}} {{% alert title="Warning" color="warning" %}} @@ -46,55 +56,53 @@ The Ubuntu EKS optimized AMI has moved from 20.04 to 22.04 for Kubernetes 1.29+. {{% /alert %}} * Karpenter now supports `nodepool.spec.disruption.budgets`, which allows users to control the speed of disruption in the cluster. Since this requires an update to the Custom Resource, before upgrading, you should re-apply the new updates to the CRDs. Check out [Disruption Budgets]({{}}) for more. -* With Disruption Budgets, Karpenter will disrupt multiple batches of nodes simultaneously, which can result in overall quicker scale-down of your cluster. Before v0.34, Karpenter had a hard-coded parallelism limit for each type of disruption. In v0.34, Karpenter will now disrupt at most 10% of nodes for a given NodePool. There is no setting that will be perfectly equivalent with the behavior prior to v0.34. When considering how to configure your budgets, please refer to the following limits for versions prior to v0.34: +* With Disruption Budgets, Karpenter will disrupt multiple batches of nodes simultaneously, which can result in overall quicker scale-down of your cluster. Before `0.34.0`, Karpenter had a hard-coded parallelism limit for each type of disruption. In `0.34.0`+, Karpenter will now disrupt at most 10% of nodes for a given NodePool. There is no setting that will be perfectly equivalent with the behavior prior to `0.34.0`. When considering how to configure your budgets, please refer to the following limits for versions prior to `0.34.0`: * `Empty Expiration / Empty Drift / Empty Consolidation`: infinite parallelism * `Non-Empty Expiration / Non-Empty Drift / Single-Node Consolidation`: one node at a time * `Multi-Node Consolidation`: max 100 nodes -* To support Disruption Budgets, v0.34+ includes critical changes to Karpenter's core controllers, which allows Karpenter to consider multiple batches of disrupting nodes simultaneously. This increases Karpenter's performance with the potential downside of higher CPU and memory utilization from the Karpenter pod. While the magnitude of this difference varies on a case-by-case basis, when upgrading to Karpenter v0.34+, please note that you may need to increase the resources allocated to the Karpenter controller pods. +* To support Disruption Budgets, `0.34.0`+ includes critical changes to Karpenter's core controllers, which allows Karpenter to consider multiple batches of disrupting nodes simultaneously. This increases Karpenter's performance with the potential downside of higher CPU and memory utilization from the Karpenter pod. While the magnitude of this difference varies on a case-by-case basis, when upgrading to Karpenter `0.34.0`+, please note that you may need to increase the resources allocated to the Karpenter controller pods. * Karpenter now adds a default `podSecurityContext` that configures the `fsgroup: 65536` of volumes in the pod. If you are using sidecar containers, you should review if this configuration is compatible for them. You can disable this default `podSecurityContext` through helm by performing `--set podSecurityContext=null` when installing/upgrading the chart. * The `dnsPolicy` for the Karpenter controller pod has been changed back to the Kubernetes cluster default of `ClusterFirst`. Setting our `dnsPolicy` to `Default` (confusingly, this is not the Kubernetes cluster default) caused more confusion for any users running IPv6 clusters with dual-stack nodes or anyone running Karpenter with dependencies on cluster services (like clusters running service meshes). If you still want the old behavior here, you can change the `dnsPolicy` to point to use `Default` by setting the helm value on install/upgrade with `--set dnsPolicy=Default`. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). * Karpenter now disallows `nodepool.spec.template.spec.resources` to be set. The webhook validation never allowed `nodepool.spec.template.spec.resources`. We are now ensuring that CEL validation also disallows `nodepool.spec.template.spec.resources` to be set. If you were previously setting the resources field on your NodePool, ensure that you remove this field before upgrading to the newest version of Karpenter or else updates to the resource may fail on the new version. -### Upgrading to v0.33.0+ - -[comment]: <> (WHEN CREATING A NEW SECTION OF THE UPGRADE GUIDANCE FOR NEWER VERSIONS, ENSURE THAT YOU COPY THE ALERT SECTION BELOW TO PROPERLY WARN USERS OF THE RISK OF UPGRADING WITHOUT GOING TO v0.32 FIRST) +### Upgrading to `0.33.0`+ {{% alert title="Warning" color="warning" %}} -v0.33.0+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. **Do not** upgrade to v0.33.0+ without first [upgrading to v0.32.x]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. **Do not** upgrade to `0.33.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. {{% /alert %}} * Karpenter no longer supports using the `karpenter.sh/provisioner-name` label in NodePool labels and requirements or in application node selectors, affinities, or topologySpreadConstraints. If you were previously using this label to target applications to specific Provisioners, you should update your applications to use the `karpenter.sh/nodepool` label instead before upgrading. If you upgrade without changing these labels, you may begin to see pod scheduling failures for these applications. * Karpenter now tags `spot-instances-request` with the same tags that it tags instances, volumes, and primary ENIs. This means that you will now need to add `ec2:CreateTags` permission for `spot-instances-request`. You can also further scope your controller policy for the `ec2:RunInstances` action to require that it launches the `spot-instances-request` with these specific tags. You can view an example of scoping these actions in the [Getting Started Guide's default CloudFormation controller policy](https://github.com/aws/karpenter/blob/v0.33.0/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml#L61). * We now recommend that you set the installation namespace for your Karpenter controllers to `kube-system` to denote Karpenter as a critical cluster component. This ensures that requests from the Karpenter controllers are treated with higher priority by assigning them to a different [PriorityLevelConfiguration](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration) than generic requests from other namespaces. For more details on API Priority and Fairness, read the [Kubernetes API Priority and Fairness Conceptual Docs](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/). Note: Changing the namespace for your Karpenter release will cause the service account namespace to change. If you are using IRSA for authentication with AWS, you will need to change scoping set in the controller's trust policy from `karpenter:karpenter` to `kube-system:karpenter`. -* `v0.33.x` disables mutating and validating webhooks by default in favor of using [Common Expression Language for CRD validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). The Common Expression Language Validation Feature [is enabled by default on EKS 1.25](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). If you are using Kubernetes version >= 1.25, no further action is required. If you are using a Kubernetes version below 1.25, you now need to set `DISABLE_WEBHOOK=false` in your container environment variables or `--set webhook.enabled=true` if using Helm. View the [Webhook Support Deprecated in Favor of CEL Section of the v1beta1 Migration Guide]({{}}). -* `v0.33.x` drops support for passing settings through the `karpenter-global-settings` ConfigMap. You should pass settings through the container environment variables in the Karpenter deployment manifest. View the [Global Settings Section of the v1beta1 Migration Guide]({{}}) for more details. -* `v0.33.x` enables `Drift=true` by default in the `FEATURE_GATES`. If you previously didn't enable the feature gate, Karpenter will now check if there is a difference between the desired state of your nodes declared in your NodePool and the actual state of your nodes. View the [Drift Section of Disruption Conceptual Docs]({{}}) for more details. -* `v0.33.x` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the Helm values. If you are setting any values in the `logConfig` from the `v0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. -* `v0.33.x` change the default `LOG_LEVEL` from `debug` to `info` by default. If you are still enabling logging configuration through the `zap-logger-config`, no action is required. -* `v0.33.x` drops support for comma delimited lists on tags for `SubnetSelectorTerm`, `SecurityGroupsSelectorTerm`, and `AMISelectorTerm`. Karpenter now supports multiple terms for each of the selectors which means that we can specify a more explicit OR-based constraint through separate terms rather than a comma-delimited list of values. +* `0.33.0` disables mutating and validating webhooks by default in favor of using [Common Expression Language for CRD validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). The Common Expression Language Validation Feature [is enabled by default on EKS 1.25](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). If you are using Kubernetes version >= 1.25, no further action is required. If you are using a Kubernetes version below 1.25, you now need to set `DISABLE_WEBHOOK=false` in your container environment variables or `--set webhook.enabled=true` if using Helm. View the [Webhook Support Deprecated in Favor of CEL Section of the v1beta1 Migration Guide]({{}}). +* `0.33.0` drops support for passing settings through the `karpenter-global-settings` ConfigMap. You should pass settings through the container environment variables in the Karpenter deployment manifest. View the [Global Settings Section of the v1beta1 Migration Guide]({{}}) for more details. +* `0.33.0` enables `Drift=true` by default in the `FEATURE_GATES`. If you previously didn't enable the feature gate, Karpenter will now check if there is a difference between the desired state of your nodes declared in your NodePool and the actual state of your nodes. View the [Drift Section of Disruption Conceptual Docs]({{}}) for more details. +* `0.33.0` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the Helm values. If you are setting any values in the `logConfig` from the `0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. +* `0.33.0` change the default `LOG_LEVEL` from `debug` to `info` by default. If you are still enabling logging configuration through the `zap-logger-config`, no action is required. +* `0.33.0` drops support for comma delimited lists on tags for `SubnetSelectorTerm`, `SecurityGroupsSelectorTerm`, and `AMISelectorTerm`. Karpenter now supports multiple terms for each of the selectors which means that we can specify a more explicit OR-based constraint through separate terms rather than a comma-delimited list of values. -### Upgrading to v0.32.0+ +### Upgrading to `0.32.0`+ {{% alert title="Warning" color="warning" %}} -Karpenter v0.32.0 introduces v1beta1 APIs, including _significant_ changes to the API and installation procedures for the Karpenter controllers. **Do not** upgrade to v0.32.0+ without referencing the [v1beta1 Migration Upgrade Procedure]({{}}). +Karpenter `0.32.0` introduces v1beta1 APIs, including _significant_ changes to the API and installation procedures for the Karpenter controllers. **Do not** upgrade to `0.32.0`+ without referencing the [v1beta1 Migration Upgrade Procedure]({{}}). This version includes **dual support** for both alpha and beta APIs to ensure that you can slowly migrate your existing Provisioner, AWSNodeTemplate, and Machine alpha APIs to the newer NodePool, EC2NodeClass, and NodeClaim beta APIs. -Note that if you are rolling back after upgrading to v0.32.0, note that v0.31.4 is the only version that supports handling rollback after you have deployed the v1beta1 APIs to your cluster. +Note that if you are rolling back after upgrading to `0.32.0`, note that `0.31.4` is the only version that supports handling rollback after you have deployed the v1beta1 APIs to your cluster. {{% /alert %}} * Karpenter now serves the webhook prometheus metrics server on port `8001`. If this port is already in-use on the pod or you are running in `hostNetworking` mode, you may need to change this port value. You can configure this port value through the `WEBHOOK_METRICS_PORT` environment variable or the `webhook.metrics.port` value if installing via Helm. * Karpenter now exposes the ability to disable webhooks through the `webhook.enabled=false` value. This value will disable the webhook server and will prevent any permissions, mutating or validating webhook configurations from being deployed to the cluster. * Karpenter now moves all logging configuration for the Zap logger into the `logConfig` values block. Configuring Karpenter logging with this mechanism _is_ deprecated and will be dropped at v1. Karpenter now only surfaces logLevel through the `logLevel` helm value. If you need more advanced configuration due to log parsing constraints, we recommend configuring your log parser to handle Karpenter's Zap JSON logging. -* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the Helm chart. If you were setting the value through `logEncoding`, this value will continue to work until v0.33.x but it is deprecated in favor of `logConfig.logEncoding` +* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the Helm chart. If you were setting the value through `logEncoding`, this value will continue to work until `0.33.x` but it is deprecated in favor of `logConfig.logEncoding` * Karpenter now uses the `karpenter.sh/disruption:NoSchedule=disrupting` taint instead of the upstream `node.kubernetes.io/unschedulable` taint for nodes spawned with a NodePool to prevent pods from scheduling to nodes being disrupted. Pods that previously tolerated the `node.kubernetes.io/unschedulable` taint that previously weren't evicted during termination will now be evicted. This most notably affects DaemonSets, which have the `node.kubernetes.io/unschedulable` toleration by default, where Karpenter will now remove these pods during termination. If you want your specific pods to not be evicted when nodes are scaled down, you should add a toleration to the pods with the following: `Key=karpenter.sh/disruption, Effect=NoSchedule, Operator=Equals, Values=disrupting`. * Note: Karpenter will continue to use the old `node.kubernetes.io/unschedulable` taint for nodes spawned with a Provisioner. -### Upgrading to v0.31.0+ +### Upgrading to `0.31.0`+ * Karpenter moved its `securityContext` constraints from pod-wide to only applying to the Karpenter container exclusively. If you were previously relying on the pod-wide `securityContext` for your sidecar containers, you will now need to set these values explicitly in your sidecar container configuration. -### Upgrading to v0.30.0+ +### Upgrading to `0.30.0`+ * Karpenter will now [statically drift]({{}}) on both Provisioner and AWSNodeTemplate Fields. For Provisioner Static Drift, the `karpenter.sh/provisioner-hash` annotation must be present on both the Provisioner and Machine. For AWSNodeTemplate drift, the `karpenter.k8s.aws/nodetemplate-hash` annotation must be present on the AWSNodeTemplate and Machine. Karpenter will not add these annotations to pre-existing nodes, so each of these nodes will need to be recycled one time for the annotations to be added. * Karpenter will now fail validation on AWSNodeTemplates and Provisioner `spec.provider` that have `amiSelectors`, `subnetSelectors`, or `securityGroupSelectors` set with a combination of id selectors (`aws-ids`, `aws::ids`) and other selectors. @@ -104,21 +112,21 @@ Note that if you are rolling back after upgrading to v0.32.0, note that v0.31.4 If you have sidecar containers configured to run alongside Karpenter that cannot tolerate the [pod-wide `securityContext` constraints](https://github.com/aws/karpenter/blob/v0.30.0/charts/karpenter/templates/deployment.yaml#L40), you will need to specify overrides to the sidecar `securityContext` in your deployment. {{% /alert %}} -### Upgrading to v0.29.0+ +### Upgrading to `0.29.0`+ {{% alert title="Warning" color="warning" %}} -Karpenter `v0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `>v0.29.2` fixes this leak. +Karpenter `0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `0.29.2`+ fixes this leak. {{% /alert %}} -* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `v0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `v0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` Helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. +* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` Helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. * Karpenter will now reconcile nodes that are drifted due to their Security Groups or their Subnets. If your AWSNodeTemplate's Security Groups differ from the Security Groups used for an instance, Karpenter will consider it drifted. If the Subnet used by an instance is not contained in the allowed list of Subnets for an AWSNodeTemplate, Karpenter will also consider it drifted. * Since Karpenter uses tags for discovery of Subnets and SecurityGroups, check the [Threat Model]({{}}) to see how to manage this IAM Permission. -### Upgrading to v0.28.0+ +### Upgrading to `0.28.0`+ {{% alert title="Warning" color="warning" %}} -Karpenter `v0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `>v0.28.1` fixes this issue and is compatible with Kubernetes version 1.26+. +Karpenter `0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `0.28.1`+ fixes this issue and is compatible with Kubernetes version 1.26+. {{% /alert %}} * The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter Helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. @@ -133,7 +141,7 @@ Karpenter `v0.28.0` is incompatible with Kubernetes version 1.26+, which can res * `karpenter_nodes_terminated`: Use `karpenter_machines_terminated` if you are interested in the reason why a Karpenter machine was deleted. `karpenter_nodes_terminated` now only tracks the count of terminated nodes without any additional labels. * `karpenter_nodes_created`: Use `karpenter_machines_created` if you are interested in the reason why a Karpenter machine was created. `karpenter_nodes_created` now only tracks the count of created nodes without any additional labels. * `karpenter_deprovisioning_replacement_node_initialized_seconds`: This metric has been replaced in favor of `karpenter_deprovisioning_replacement_machine_initialized_seconds`. -* `v0.28.0` introduces the Machine CustomResource into the `karpenter.sh` API Group and requires this CustomResourceDefinition to run properly. Karpenter now orchestrates its CloudProvider capacity through these in-cluster Machine CustomResources. When performing a scheduling decision, Karpenter will create a Machine, resulting in launching CloudProvider capacity. The kubelet running on the new capacity will then register the node to the cluster shortly after launch. +* `0.28.0` introduces the Machine CustomResource into the `karpenter.sh` API Group and requires this CustomResourceDefinition to run properly. Karpenter now orchestrates its CloudProvider capacity through these in-cluster Machine CustomResources. When performing a scheduling decision, Karpenter will create a Machine, resulting in launching CloudProvider capacity. The kubelet running on the new capacity will then register the node to the cluster shortly after launch. * If you are using Helm to upgrade between versions of Karpenter, note that [Helm does not automate the process of upgrading or install the new CRDs into your cluster](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations). To install or upgrade the existing CRDs, follow the guidance under the [Custom Resource Definition (CRD) Upgrades]({{< relref "#custom-resource-definition-crd-upgrades" >}}) section of the upgrade guide. * Karpenter will hydrate Machines on startup for existing capacity managed by Karpenter into the cluster. Existing capacity launched by an older version of Karpenter is discovered by finding CloudProvider capacity with the `karpenter.sh/provisioner-name` tag or the `karpenter.sh/provisioner-name` label on nodes. * The metrics port for the Karpenter deployment was changed from 8080 to 8000. Users who scrape the pod directly for metrics rather than the service will need to adjust the commands they use to reference port 8000. Any users who scrape metrics from the service should be unaffected. @@ -149,19 +157,21 @@ Because Karpenter takes this dependency, any user that has the ability to Create {{% /alert %}} {{% alert title="Rolling Back" color="warning" %}} -If, after upgrading to `v0.28.0+`, a rollback to an older version of Karpenter needs to be performed, Karpenter will continue to function normally, though you will still have the Machine CustomResources on your cluster. You will need to manually delete the Machines and patch out the finalizers to fully complete the rollback. +If, after upgrading to `0.28.0`+, a rollback to an older version of Karpenter needs to be performed, Karpenter will continue to function normally, though you will still have the Machine CustomResources on your cluster. You will need to manually delete the Machines and patch out the finalizers to fully complete the rollback. -Karpenter marks CloudProvider capacity as "managed by" a Machine using the `karpenter-sh/managed-by` tag on the CloudProvider machine. It uses this tag to ensure that the Machine CustomResources in the cluster match the CloudProvider capacity managed by Karpenter. If these states don't match, Karpenter will garbage collect the capacity. Because of this, if performing an upgrade, followed by a rollback, followed by another upgrade to `v0.28.0+`, ensure you remove the `karpenter.sh/managed-by` tags from existing capacity; otherwise, Karpenter will deprovision the capacity without a Machine CR counterpart. +Karpenter marks CloudProvider capacity as "managed by" a Machine using the `karpenter-sh/managed-by` tag on the CloudProvider machine. It uses this tag to ensure that the Machine CustomResources in the cluster match the CloudProvider capacity managed by Karpenter. If these states don't match, Karpenter will garbage collect the capacity. Because of this, if performing an upgrade, followed by a rollback, followed by another upgrade to `0.28.0`+, ensure you remove the `karpenter.sh/managed-by` tags from existing capacity; otherwise, Karpenter will deprovision the capacity without a Machine CR counterpart. {{% /alert %}} -### Upgrading to v0.27.3+ -* The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `v0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. +### Upgrading to `0.27.3`+ + +* The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. ```bash kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh ``` -### Upgrading to v0.27.0+ +### Upgrading to `0.27.0`+ + * The Karpenter controller pods now deploy with `kubernetes.io/hostname` self anti-affinity by default. If you are running Karpenter in HA (high-availability) mode and you do not have enough nodes to match the number of pod replicas you are deploying with, you will need to scale-out your nodes for Karpenter. * The following controller metrics changed and moved under the `controller_runtime` metrics namespace: * `karpenter_metricscraper_...` @@ -178,26 +188,32 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh * `provisioner-state` -> `provisioner_state` * The `karpenter_allocation_controller_scheduling_duration_seconds` metric name changed to `karpenter_provisioner_scheduling_duration_seconds` -### Upgrading to v0.26.0+ +### Upgrading to `0.26.0`+ + * The `karpenter.sh/do-not-evict` annotation no longer blocks node termination when running `kubectl delete node`. This annotation on pods will only block automatic deprovisioning that is considered "voluntary," that is, disruptions that can be avoided. Disruptions that Karpenter deems as "involuntary" and will ignore the `karpenter.sh/do-not-evict` annotation include spot interruption and manual deletion of the node. See [Disabling Deprovisioning]({{}}) for more details. * Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your Helm values and are using Karpenter's defaults, you will now need to set these values in your Helm chart deployment. * The `controller.image` value in the Helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. -### Upgrading to v0.25.0+ +### Upgrading to `0.25.0`+ + * Cluster Endpoint can now be automatically discovered. If you are using Amazon Elastic Kubernetes Service (EKS), you can now omit the `clusterEndpoint` field in your configuration. In order to allow the resolving, you have to add the permission `eks:DescribeCluster` to the Karpenter Controller IAM role. -### Upgrading to v0.24.0+ +### Upgrading to `0.24.0`+ + * Settings are no longer updated dynamically while Karpenter is running. If you manually make a change to the [`karpenter-global-settings`]({{}}) ConfigMap, you will need to reload the containers by restarting the deployment with `kubectl rollout restart -n karpenter deploy/karpenter` * Karpenter no longer filters out instance types internally. Previously, `g2` (not supported by the NVIDIA device plugin) and FPGA instance types were filtered. The only way to filter instance types now is to set requirements on your provisioner or pods using well-known node labels described [here]({{}}). If you are currently using overly broad requirements that allows all of the `g` instance-category, you will want to tighten the requirement, or add an instance-generation requirement. * `aws.tags` in [`karpenter-global-settings`]({{}}) ConfigMap is now a top-level field and expects the value associated with this key to be a JSON object of string to string. This is change from previous versions where keys were given implicitly by providing the key-value pair `aws.tags.: value` in the ConfigMap. -### Upgrading to v0.22.0+ +### Upgrading to `0.22.0`+ + * Do not upgrade to this version unless you are on Kubernetes >= v1.21. Karpenter no longer supports Kubernetes v1.20, but now supports Kubernetes v1.25. This change is due to the v1 PDB API, which was introduced in K8s v1.20 and subsequent removal of the v1beta1 API in K8s v1.25. -### Upgrading to v0.20.0+ -* Prior to v0.20.0, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. v0.20.0+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. +### Upgrading to `0.20.0`+ + +* Prior to `0.20.0`, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. `0.20.0`+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. + +### Upgrading to `0.19.0`+ -### Upgrading to v0.19.0+ * The karpenter webhook and controller containers are combined into a single binary, which requires changes to the Helm chart. If your Karpenter installation (Helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. * Karpenter now supports native interruption handling. If you were previously using Node Termination Handler for spot interruption handling and health events, you will need to remove the component from your cluster before enabling `aws.interruptionQueueName`. For more details on Karpenter's interruption handling, see the [Interruption Handling Docs]({{< ref "../concepts/disruption/#interruption" >}}). * Instance category defaults are now explicitly persisted in the Provisioner, rather than handled implicitly in memory. By default, Provisioners will limit instance category to c,m,r. If any instance type constraints are applied, it will override this default. If you have created Provisioners in the past with unconstrained instance type, family, or category, Karpenter will now more flexibly use instance types than before. If you would like to apply these constraints, they must be included in the Provisioner CRD. @@ -214,38 +230,44 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh * `AWS_NODE_NAME_CONVENTION` -> `settings.aws.nodeNameConvention` * `VM_MEMORY_OVERHEAD` -> `settings.aws.vmMemoryOverheadPercent` -### Upgrading to v0.18.0+ -* v0.18.0 removes the `karpenter_consolidation_nodes_created` and `karpenter_consolidation_nodes_terminated` prometheus metrics in favor of the more generic `karpenter_nodes_created` and `karpenter_nodes_terminated` metrics. You can still see nodes created and terminated by consolidation by checking the `reason` label on the metrics. Check out all the metrics published by Karpenter [here]({{}}). +### Upgrading to `0.18.0`+ + +* `0.18.0` removes the `karpenter_consolidation_nodes_created` and `karpenter_consolidation_nodes_terminated` prometheus metrics in favor of the more generic `karpenter_nodes_created` and `karpenter_nodes_terminated` metrics. You can still see nodes created and terminated by consolidation by checking the `reason` label on the metrics. Check out all the metrics published by Karpenter [here]({{}}). + +### Upgrading to `0.17.0`+ -### Upgrading to v0.17.0+ Karpenter's Helm chart package is now stored in [Karpenter's OCI (Open Container Initiative) registry](https://gallery.ecr.aws/karpenter/karpenter). The Helm CLI supports the new format since [v3.8.0+](https://helm.sh/docs/topics/registries/). With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter Helm charts look at [Install Karpenter Helm Chart]({{< ref "../getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). Users who have scripted the installation or upgrading of Karpenter need to adjust their scripts with the following changes: 1. There is no longer a need to add the Karpenter Helm repo with `helm repo add` 2. The full URL of the Helm chart needs to be present when using the `helm` CLI -3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart, `v0.17.0`. +3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart (i.e `v0.17.0`). + +### Upgrading to `0.16.2`+ -### Upgrading to v0.16.2+ -* v0.16.2 adds new kubeletConfiguration fields to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +* `0.16.2` adds new kubeletConfiguration fields to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.2/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.16.0+ -* v0.16.0 adds a new weight field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +### Upgrading to `0.16.0`+ + +* `0.16.0` adds a new weight field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.15.0+ -* v0.15.0 adds a new consolidation field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +### Upgrading to `0.15.0`+ + +* `0.15.0` adds a new consolidation field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.15.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.14.0+ -* v0.14.0 adds new fields to the `provisioners.karpenter.sh` v1alpha5 and `awsnodetemplates.karpenter.k8s.aws` v1alpha1 CRDs. The CRDs will need to be updated to use the new parameters: +### Upgrading to `0.14.0`+ + +* `0.14.0` adds new fields to the `provisioners.karpenter.sh` v1alpha5 and `awsnodetemplates.karpenter.k8s.aws` v1alpha1 CRDs. The CRDs will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml @@ -253,7 +275,7 @@ kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/ kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml ``` -* v0.14.0 changes the way Karpenter discovers its dynamically generated AWS launch templates to use a tag rather than a Name scheme. The previous name scheme was `Karpenter-${CLUSTER_NAME}-*` which could collide with user created launch templates that Karpenter should not manage. The new scheme uses a tag on the launch template `karpenter.k8s.aws/cluster: ${CLUSTER_NAME}`. As a result, Karpenter will not clean-up dynamically generated launch templates using the old name scheme. You can manually clean these up with the following commands: +* `0.14.0` changes the way Karpenter discovers its dynamically generated AWS launch templates to use a tag rather than a Name scheme. The previous name scheme was `Karpenter-${CLUSTER_NAME}-*` which could collide with user created launch templates that Karpenter should not manage. The new scheme uses a tag on the launch template `karpenter.k8s.aws/cluster: ${CLUSTER_NAME}`. As a result, Karpenter will not clean-up dynamically generated launch templates using the old name scheme. You can manually clean these up with the following commands: ```bash ## Find launch templates that match the naming pattern and you do not want to keep @@ -263,52 +285,54 @@ aws ec2 describe-launch-templates --filters="Name=launch-template-name,Values=Ka aws ec2 delete-launch-template --launch-template-id ``` -* v0.14.0 introduces additional instance type filtering if there are no `node.kubernetes.io/instance-type` or `karpenter.k8s.aws/instance-family` or `karpenter.k8s.aws/instance-category` requirements that restrict instance types specified on the provisioner. This prevents Karpenter from launching bare metal and some older non-current generation instance types unless the provisioner has been explicitly configured to allow them. If you specify an instance type or family requirement that supplies a list of instance-types or families, that list will be used regardless of filtering. The filtering can also be completely eliminated by adding an `Exists` requirement for instance type or family. +* `0.14.0` introduces additional instance type filtering if there are no `node.kubernetes.io/instance-type` or `karpenter.k8s.aws/instance-family` or `karpenter.k8s.aws/instance-category` requirements that restrict instance types specified on the provisioner. This prevents Karpenter from launching bare metal and some older non-current generation instance types unless the provisioner has been explicitly configured to allow them. If you specify an instance type or family requirement that supplies a list of instance-types or families, that list will be used regardless of filtering. The filtering can also be completely eliminated by adding an `Exists` requirement for instance type or family. ```yaml - key: node.kubernetes.io/instance-type operator: Exists ``` -* v0.14.0 introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started -* v0.14.0 adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter Helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. +* `0.14.0` introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started +* `0.14.0` adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter Helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. -* v0.14.0 deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. +* `0.14.0` deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. -### Upgrading to v0.13.0+ -* v0.13.0 introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. v0.13.0 also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. +### Upgrading to `0.13.0`+ - If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: +* `0.13.0` introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. `0.13.0` also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. + + If you are upgrading from `0.10.1` - `0.11.1`, a new CRD `awsnodetemplate` was added. In `0.12.0`, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. 2. `kubectl delete crd awsnodetemplate` 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.13.2/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` - 4. Perform the Karpenter upgrade to v0.13.x, which will install the new `awsnodetemplates` CRD. + 4. Perform the Karpenter upgrade to `0.13.0`+, which will install the new `awsnodetemplates` CRD. 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. -* v0.13.0 also adds EC2/spot price fetching to Karpenter to allow making more accurate decisions regarding node deployments. Our [getting started guide]({{< ref "../getting-started/getting-started-with-karpenter" >}}) documents this, but if you are upgrading Karpenter you will need to modify your Karpenter controller policy to add the `pricing:GetProducts` and `ec2:DescribeSpotPriceHistory` permissions. +* `0.13.0` also adds EC2/spot price fetching to Karpenter to allow making more accurate decisions regarding node deployments. Our [getting started guide]({{< ref "../getting-started/getting-started-with-karpenter" >}}) documents this, but if you are upgrading Karpenter you will need to modify your Karpenter controller policy to add the `pricing:GetProducts` and `ec2:DescribeSpotPriceHistory` permissions. + +### Upgrading to `0.12.0`+ -### Upgrading to v0.12.0+ -* v0.12.0 adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. -* If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: +* `0.12.0` adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. +* If you are upgrading from `0.10.1` - `0.11.1`, a new CRD `awsnodetemplate` was added. In `0.12.0`, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. 2. `kubectl delete crd awsnodetemplate` 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.12.1/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` - 4. Perform the Karpenter upgrade to v0.12.x, which will install the new `awsnodetemplates` CRD. + 4. Perform the Karpenter upgrade to `0.12.0`+, which will install the new `awsnodetemplates` CRD. 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. -### Upgrading to v0.11.0+ +### Upgrading to `0.11.0`+ -v0.11.0 changes the way that the `vpc.amazonaws.com/pod-eni` resource is reported. Instead of being reported for all nodes that could support the resources regardless of if the cluster is configured to support it, it is now controlled by a command line flag or environment variable. The parameter defaults to false and must be set if your cluster uses [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). This can be enabled by setting the environment variable `AWS_ENABLE_POD_ENI` to true via the helm value `controller.env`. +`0.11.0` changes the way that the `vpc.amazonaws.com/pod-eni` resource is reported. Instead of being reported for all nodes that could support the resources regardless of if the cluster is configured to support it, it is now controlled by a command line flag or environment variable. The parameter defaults to false and must be set if your cluster uses [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). This can be enabled by setting the environment variable `AWS_ENABLE_POD_ENI` to true via the helm value `controller.env`. Other extended resources must be registered on nodes by their respective device plugins which are typically installed as DaemonSets (e.g. the `nvidia.com/gpu` resource will be registered by the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). Previously, Karpenter would register these resources on nodes at creation and they would be zeroed out by `kubelet` at startup. By allowing the device plugins to register the resources, pods will not bind to the nodes before any device plugin initialization has occurred. -v0.11.0 adds a `providerRef` field in the Provisioner CRD. To use this new field you will need to replace the Provisioner CRD manually: +`0.11.0` adds a `providerRef` field in the Provisioner CRD. To use this new field you will need to replace the Provisioner CRD manually: ```shell kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.11.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.10.0+ +### Upgrading to `0.10.0`+ -v0.10.0 adds a new field, `startupTaints` to the provisioner spec. Standard Helm upgrades [do not upgrade CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) so the field will not be available unless the CRD is manually updated. This can be performed prior to the standard upgrade by applying the new CRD manually: +`0.10.0` adds a new field, `startupTaints` to the provisioner spec. Standard Helm upgrades [do not upgrade CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) so the field will not be available unless the CRD is manually updated. This can be performed prior to the standard upgrade by applying the new CRD manually: ```shell kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.10.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml @@ -316,7 +340,7 @@ kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/ 📝 If you don't perform this manual CRD update, Karpenter will work correctly except for rejecting the creation/update of provisioners that use `startupTaints`. -### Upgrading to v0.6.2+ +### Upgrading to `0.6.2`+ If using Helm, the variable names have changed for the cluster's name and endpoint. You may need to update any configuration that sets the old variable names. From f34358018cf841d34198e227f10e8912206bf0b1 Mon Sep 17 00:00:00 2001 From: Jonathan Innis Date: Fri, 23 Feb 2024 17:28:14 -0800 Subject: [PATCH 2/3] Fix prepareWebsite --- charts/karpenter/README.md | 2 +- hack/docs/compatibility-karpenter.yaml | 3 +++ hack/release/common.sh | 4 ++-- tools/kompat/pkg/kompat/kompat.go | 4 ++-- website/content/en/preview/reference/settings.md | 10 +++++----- website/content/en/preview/upgrading/compatibility.md | 6 +++--- 6 files changed, 16 insertions(+), 13 deletions(-) diff --git a/charts/karpenter/README.md b/charts/karpenter/README.md index 54ee4ff8aea8..5bd21268cc0b 100644 --- a/charts/karpenter/README.md +++ b/charts/karpenter/README.md @@ -15,7 +15,7 @@ You can follow the detailed installation instruction in the [documentation](http ```bash helm upgrade --install --namespace karpenter --create-namespace \ karpenter oci://public.ecr.aws/karpenter/karpenter \ - --version v0.34.0 \ + --version 0.34.0 \ --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set settings.clusterName=${CLUSTER_NAME} \ --set settings.interruptionQueue=${CLUSTER_NAME} \ diff --git a/hack/docs/compatibility-karpenter.yaml b/hack/docs/compatibility-karpenter.yaml index 3a857e77925a..8c2e678c5def 100644 --- a/hack/docs/compatibility-karpenter.yaml +++ b/hack/docs/compatibility-karpenter.yaml @@ -43,5 +43,8 @@ compatibility: minK8sVersion: 1.23 maxK8sVersion: 1.28 - appVersion: 0.34.0 + minK8sVersion: 1.23 + maxK8sVersion: 1.29 + - appVersion: 0.34.1 minK8sVersion: 1.23 maxK8sVersion: 1.29 \ No newline at end of file diff --git a/hack/release/common.sh b/hack/release/common.sh index 1f94dff5bd8c..7c1644a39e3d 100644 --- a/hack/release/common.sh +++ b/hack/release/common.sh @@ -138,7 +138,7 @@ prepareWebsite() { version_parts=(${version//./ }) short_version="${version_parts[0]}.${version_parts[1]}" - createNewWebsiteDirectory "${short_version}" + createNewWebsiteDirectory "${short_version}" "${version}" removeOldWebsiteDirectories editWebsiteConfig "${version}" editWebsiteVersionsMenu @@ -199,7 +199,7 @@ editWebsiteVersionsMenu() { local versions version # shellcheck disable=SC2207 - versions=($(find website/content/en/* -maxdepth 0 -type d -name "*" -print0 | xargs -r -n 1 basename | grep -v "docs\|preview")) + versions=($(find website/content/en/* -maxdepth 0 -type d -name "*" -print0 | xargs -0 -r -n 1 basename | grep -v "docs\|preview")) versions+=('preview') yq -i '.params.versions = []' website/hugo.yaml diff --git a/tools/kompat/pkg/kompat/kompat.go b/tools/kompat/pkg/kompat/kompat.go index 47492cabd828..ef8775ed91eb 100644 --- a/tools/kompat/pkg/kompat/kompat.go +++ b/tools/kompat/pkg/kompat/kompat.go @@ -193,9 +193,9 @@ func (k Kompat) Markdown(_ ...Options) string { data := []string{k.Name} for _, c := range k.Compatibility { if c.MaxK8sVersion == "" || c.MinK8sVersion == c.MaxK8sVersion { - headers = append(headers, fmt.Sprintf("%s+", c.MinK8sVersion)) + headers = append(headers, fmt.Sprintf("`%s`+", c.MinK8sVersion)) } else { - headers = append(headers, fmt.Sprintf("%s - %s", c.MinK8sVersion, c.MaxK8sVersion)) + headers = append(headers, fmt.Sprintf("`%s` - `%s`", c.MinK8sVersion, c.MaxK8sVersion)) } data = append(data, c.AppVersion) } diff --git a/website/content/en/preview/reference/settings.md b/website/content/en/preview/reference/settings.md index 8f916edc0eb1..4150586483ea 100644 --- a/website/content/en/preview/reference/settings.md +++ b/website/content/en/preview/reference/settings.md @@ -43,11 +43,11 @@ Karpenter surfaces environment variables and CLI parameters to allow you to conf Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. -| Feature | Default | Stage | Since | Until | -|-------------------------|---------|-------|----------|----------| -| Drift | false | Alpha | `0.21.0` | `0.32.x` | -| Drift | true | Beta | `0.33.0` | | -| SpotToSpotConsolidation | false | Beta | `0.34.0` | | +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|-------|---------|---------| +| Drift | false | Alpha | v0.21.x | v0.32.x | +| Drift | true | Beta | v0.33.x | | +| SpotToSpotConsolidation | false | Beta | v0.34.x | | ### Batching Parameters diff --git a/website/content/en/preview/upgrading/compatibility.md b/website/content/en/preview/upgrading/compatibility.md index 6c3adf8d4f1d..88507e3c7b8f 100644 --- a/website/content/en/preview/upgrading/compatibility.md +++ b/website/content/en/preview/upgrading/compatibility.md @@ -15,9 +15,9 @@ Before you begin upgrading Karpenter, consider Karpenter compatibility issues re [comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) -| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | -| ---------- | --------- | --------- | --------- | --------- | --------- | --------- | --------- | -| karpenter | `0.21.0`+ | `0.21.0`+ | `0.25.0`+ | `0.28.0`+ | `0.28.0`+ | `0.31.0`+ | `0.34.0`+ | +| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | +|------------|---------|---------|---------|---------|---------|---------|---------| +| karpenter | 0.21.x+ | 0.21.x+ | 0.25.x+ | 0.28.x+ | 0.28.x+ | 0.31.x+ | 0.34.0+ | [comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) From c9a36c279999eac5548a446ecbe7ef036f9e2a92 Mon Sep 17 00:00:00 2001 From: Jonathan Innis Date: Fri, 23 Feb 2024 18:05:19 -0800 Subject: [PATCH 3/3] Small script misses --- hack/release/common.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hack/release/common.sh b/hack/release/common.sh index 7c1644a39e3d..87a0f8602d71 100644 --- a/hack/release/common.sh +++ b/hack/release/common.sh @@ -88,14 +88,14 @@ publishHelmChart() { commit_sha="${4}" build_date="${5}" - ah_config_file_name"${helm_chart}/artifacthub-repo.yaml" + ah_config_file_name="${helm_chart}/artifacthub-repo.yaml" helm_chart_artifact="${helm_chart}-${version}.tgz" yq e -i ".appVersion = \"${version}\"" "charts/${helm_chart}/Chart.yaml" yq e -i ".version = \"${version}\"" "charts/${helm_chart}/Chart.yaml" cd charts - [[ -s "${ah_config_file_name}" ]] && oras push "${oci_repo}:artifacthub.io" --config /dev/null:application/vnd.cncf.artifacthub.config.v1+yaml "${ah_config_file_name}:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml" + [[ -s "${ah_config_file_name}" ]] && oras push "${oci_repo}/${helm_chart}:artifacthub.io" --config /dev/null:application/vnd.cncf.artifacthub.config.v1+yaml "${ah_config_file_name}:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml" helm dependency update "${helm_chart}" helm lint "${helm_chart}" helm package "${helm_chart}" --version "${version}" @@ -103,8 +103,8 @@ publishHelmChart() { rm "${helm_chart_artifact}" cd .. - helm_chart_digest="$(crane digest "${oci_repo}:${version}")" - cosignOciArtifact "${version}" "${commit_sha}" "${build_date}" "${oci_repo}:${version}@${helm_chart_digest}" + helm_chart_digest="$(crane digest "${oci_repo}/${helm_chart}:${version}")" + cosignOciArtifact "${version}" "${commit_sha}" "${build_date}" "${oci_repo}/${helm_chart}:${version}@${helm_chart_digest}" } cosignOciArtifact() {