Skip to content

Commit

Permalink
docs changes in getting-started-with-karpenter doc to fix #5677 (#5989)
Browse files Browse the repository at this point in the history
  • Loading branch information
santosh-at-github authored Apr 8, 2024
1 parent 7713f4f commit 0af6b01
Show file tree
Hide file tree
Showing 16 changed files with 270 additions and 110 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,15 @@ The following cluster configuration will:
* Create a role to allow spot instances.
* Run Helm to install Karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}
{{< tabpane text=true right=false >}}
{{% tab header="**Create cluster command**:" disabled=true /%}}
{{% tab header="Managed NodeGroups" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}
{{% /tab %}}
{{% tab header="Fargate" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh" language="bash"%}}
{{% /tab %}}
{{< /tabpane >}}

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh" language="bash"%}}

Expand All @@ -88,7 +96,15 @@ See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/

### 4. Install Karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}
{{< tabpane text=true right=false >}}
{{% tab header="**Karpenter installation command**:" disabled=true /%}}
{{% tab header="Managed NodeGroups" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}
{{% /tab %}}
{{% tab header="Fargate" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh" language="bash"%}}
{{% /tab %}}
{{< /tabpane >}}

{{% alert title="DNS Policy Notice" color="warning" %}}
Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
eksctl create cluster -f - << EOF
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides "ClusterName=${CLUSTER_NAME}"

eksctl create cluster -f - <<EOF
---
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
Expand All @@ -8,10 +15,35 @@ metadata:
version: "${K8S_VERSION}"
tags:
karpenter.sh/discovery: ${CLUSTER_NAME}
fargateProfiles:
- name: karpenter
selectors:
- namespace: "${KARPENTER_NAMESPACE}"
iam:
withOIDC: true
serviceAccounts:
- metadata:
name: karpenter
namespace: "${KARPENTER_NAMESPACE}"
roleName: ${CLUSTER_NAME}-karpenter
attachPolicyARNs:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
roleOnly: true
iamIdentityMappings:
- arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}"
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
## If you intend to run Windows workloads, the kube-proxy group should be specified.
# For more information, see https://github.com/aws/karpenter/issues/5099.
# - eks:kube-proxy-windows
fargateProfiles:
- name: karpenter
selectors:
- namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)"
export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter"

echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand All @@ -25,20 +25,6 @@ iam:
permissionPolicyARNs:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
## Optionally run on fargate or on k8s 1.23
# Pod Identity is not available on fargate
# https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html
# iam:
# withOIDC: true
# serviceAccounts:
# - metadata:
# name: karpenter
# namespace: "${KARPENTER_NAMESPACE}"
# roleName: ${CLUSTER_NAME}-karpenter
# attachPolicyARNs:
# - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
# roleOnly: true
iamIdentityMappings:
- arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}"
username: system:node:{{EC2PrivateDNSName}}
Expand All @@ -59,12 +45,6 @@ managedNodeGroups:
addons:
- name: eks-pod-identity-agent
## Optionally run on fargate
# fargateProfiles:
# - name: karpenter
# selectors:
# - namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.endpoint" --output text)"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Logout of helm registry to perform an unauthenticated pull against the public ECR
helm registry logout public.ecr.aws

helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \
--set "settings.clusterName=${CLUSTER_NAME}" \
--set "settings.interruptionQueue=${CLUSTER_NAME}" \
--set controller.resources.requests.cpu=1 \
--set controller.resources.requests.memory=1Gi \
--set controller.resources.limits.cpu=1 \
--set controller.resources.limits.memory=1Gi \
--set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" \
--wait
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,15 @@ The following cluster configuration will:
* Create a role to allow spot instances.
* Run Helm to install Karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}
{{< tabpane text=true right=false >}}
{{% tab header="**Create cluster command**:" disabled=true /%}}
{{% tab header="Managed NodeGroups" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}
{{% /tab %}}
{{% tab header="Fargate" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh" language="bash"%}}
{{% /tab %}}
{{< /tabpane >}}

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh" language="bash"%}}

Expand All @@ -88,7 +96,15 @@ See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/

### 4. Install Karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}
{{< tabpane text=true right=false >}}
{{% tab header="**Karpenter installation command**:" disabled=true /%}}
{{% tab header="Managed NodeGroups" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}
{{% /tab %}}
{{% tab header="Fargate" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh" language="bash"%}}
{{% /tab %}}
{{< /tabpane >}}

As the OCI Helm chart is signed by [Cosign](https://github.com/sigstore/cosign) as part of the release process you can verify the chart before installing it by running the following command.

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
eksctl create cluster -f - << EOF
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides "ClusterName=${CLUSTER_NAME}"

eksctl create cluster -f - <<EOF
---
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
Expand All @@ -8,10 +15,35 @@ metadata:
version: "${K8S_VERSION}"
tags:
karpenter.sh/discovery: ${CLUSTER_NAME}
fargateProfiles:
- name: karpenter
selectors:
- namespace: "${KARPENTER_NAMESPACE}"
iam:
withOIDC: true
serviceAccounts:
- metadata:
name: karpenter
namespace: "${KARPENTER_NAMESPACE}"
roleName: ${CLUSTER_NAME}-karpenter
attachPolicyARNs:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
roleOnly: true
iamIdentityMappings:
- arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}"
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
## If you intend to run Windows workloads, the kube-proxy group should be specified.
# For more information, see https://github.com/aws/karpenter/issues/5099.
# - eks:kube-proxy-windows
fargateProfiles:
- name: karpenter
selectors:
- namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)"
export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter"

echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,6 @@ iam:
permissionPolicyARNs:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
## Optionally run on fargate or on k8s 1.23
# Pod Identity is not available on fargate
# https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html
# iam:
# withOIDC: true
# serviceAccounts:
# - metadata:
# name: karpenter
# namespace: "${KARPENTER_NAMESPACE}"
# roleName: ${CLUSTER_NAME}-karpenter
# attachPolicyARNs:
# - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
# roleOnly: true
iamIdentityMappings:
- arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}"
username: system:node:{{EC2PrivateDNSName}}
Expand All @@ -59,12 +45,6 @@ managedNodeGroups:
addons:
- name: eks-pod-identity-agent
## Optionally run on fargate
# fargateProfiles:
# - name: karpenter
# selectors:
# - namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.endpoint" --output text)"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Logout of helm registry to perform an unauthenticated pull against the public ECR
helm registry logout public.ecr.aws

helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \
--set "settings.clusterName=${CLUSTER_NAME}" \
--set "settings.interruptionQueue=${CLUSTER_NAME}" \
--set controller.resources.requests.cpu=1 \
--set controller.resources.requests.memory=1Gi \
--set controller.resources.limits.cpu=1 \
--set controller.resources.limits.memory=1Gi \
--set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" \
--wait
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,15 @@ The following cluster configuration will:
* Create a role to allow spot instances.
* Run helm to install karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}
{{< tabpane text=true right=false >}}
{{% tab header="**Create cluster command**:" disabled=true /%}}
{{% tab header="Managed NodeGroups" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}}
{{% /tab %}}
{{% tab header="Fargate" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh" language="bash"%}}
{{% /tab %}}
{{< /tabpane >}}

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh" language="bash"%}}

Expand All @@ -88,7 +96,15 @@ See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/

### 4. Install Karpenter

{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}
{{< tabpane text=true right=false >}}
{{% tab header="**Karpenter installation command**:" disabled=true /%}}
{{% tab header="Managed NodeGroups" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}}
{{% /tab %}}
{{% tab header="Fargate" %}}
{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart-fargate.sh" language="bash"%}}
{{% /tab %}}
{{< /tabpane >}}

{{% alert title="DNS Policy Notice" color="warning" %}}
Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
eksctl create cluster -f - << EOF
curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides "ClusterName=${CLUSTER_NAME}"

eksctl create cluster -f - <<EOF
---
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
Expand All @@ -8,10 +15,35 @@ metadata:
version: "${K8S_VERSION}"
tags:
karpenter.sh/discovery: ${CLUSTER_NAME}
fargateProfiles:
- name: karpenter
selectors:
- namespace: "${KARPENTER_NAMESPACE}"
iam:
withOIDC: true
serviceAccounts:
- metadata:
name: karpenter
namespace: "${KARPENTER_NAMESPACE}"
roleName: ${CLUSTER_NAME}-karpenter
attachPolicyARNs:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
roleOnly: true
iamIdentityMappings:
- arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}"
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
## If you intend to run Windows workloads, the kube-proxy group should be specified.
# For more information, see https://github.com/aws/karpenter/issues/5099.
# - eks:kube-proxy-windows
fargateProfiles:
- name: karpenter
selectors:
- namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)"
export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter"

echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,6 @@ iam:
permissionPolicyARNs:
- arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
## Optionally run on fargate or on k8s 1.23
# Pod Identity is not available on fargate
# https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html
# iam:
# withOIDC: true
# serviceAccounts:
# - metadata:
# name: karpenter
# namespace: "${KARPENTER_NAMESPACE}"
# roleName: ${CLUSTER_NAME}-karpenter
# attachPolicyARNs:
# - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}
# roleOnly: true
iamIdentityMappings:
- arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}"
username: system:node:{{EC2PrivateDNSName}}
Expand All @@ -59,12 +45,6 @@ managedNodeGroups:
addons:
- name: eks-pod-identity-agent
## Optionally run on fargate
# fargateProfiles:
# - name: karpenter
# selectors:
# - namespace: "${KARPENTER_NAMESPACE}"
EOF

export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)"
Expand Down
Loading

0 comments on commit 0af6b01

Please sign in to comment.