From 889aa85275be13bd5b71f4145353beeb86e79a20 Mon Sep 17 00:00:00 2001 From: Amanuel Engeda <74629455+engedaam@users.noreply.github.com> Date: Mon, 11 Dec 2023 10:18:32 -0800 Subject: [PATCH 1/2] docs: Use IRSA for k8s 1.23 (#5296) --- .../scripts/step02-create-cluster.sh | 2 +- .../scripts/step08-apply-helm-chart.sh | 2 +- .../scripts/step02-create-cluster.sh | 2 +- .../scripts/step08-apply-helm-chart.sh | 2 +- .../scripts/step02-create-cluster.sh | 2 +- .../scripts/step08-apply-helm-chart.sh | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index ac47cff29115..105affa1d517 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -25,7 +25,7 @@ iam: permissionPolicyARNs: - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} -## Optionally run on fargate +## Optionally run on fargate or on k8s 1.23 # Pod Identity is not available on fargate # https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html # iam: diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh index c8e9b73b304a..135a036dc5c1 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh @@ -2,7 +2,7 @@ helm registry logout public.ecr.aws helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ - # Optionally run on fargate + # Optionally run on fargate or on k8s 1.23 # --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set "settings.clusterName=${CLUSTER_NAME}" \ --set "settings.interruptionQueue=${CLUSTER_NAME}" \ diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index fa7119331533..7a7b57c05a4c 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -25,7 +25,7 @@ iam: permissionPolicyARNs: - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} -## Optionally run on fargate +## Optionally run on fargate or on k8s 1.23 # Pod Identity is not available on fargate # https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html # iam: diff --git a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh index c8e9b73b304a..135a036dc5c1 100755 --- a/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh +++ b/website/content/en/preview/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh @@ -2,7 +2,7 @@ helm registry logout public.ecr.aws helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ - # Optionally run on fargate + # Optionally run on fargate or on k8s 1.23 # --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set "settings.clusterName=${CLUSTER_NAME}" \ --set "settings.interruptionQueue=${CLUSTER_NAME}" \ diff --git a/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index ac47cff29115..105affa1d517 100755 --- a/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -25,7 +25,7 @@ iam: permissionPolicyARNs: - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} -## Optionally run on fargate +## Optionally run on fargate or on k8s 1.23 # Pod Identity is not available on fargate # https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html # iam: diff --git a/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh b/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh index c8e9b73b304a..135a036dc5c1 100755 --- a/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh +++ b/website/content/en/v0.33/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh @@ -2,7 +2,7 @@ helm registry logout public.ecr.aws helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ - # Optionally run on fargate + # Optionally run on fargate or on k8s 1.23 # --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set "settings.clusterName=${CLUSTER_NAME}" \ --set "settings.interruptionQueue=${CLUSTER_NAME}" \ From aecf4dce0dcf78988a2eac7d0b17260251a899af Mon Sep 17 00:00:00 2001 From: Jason Deal Date: Mon, 11 Dec 2023 10:45:50 -0800 Subject: [PATCH 2/2] docs: add faq for pod autoscaling (#5298) --- website/content/en/docs/faq.md | 11 ++++++++--- website/content/en/preview/faq.md | 11 ++++++++--- website/content/en/v0.33/faq.md | 11 ++++++++--- 3 files changed, 24 insertions(+), 9 deletions(-) diff --git a/website/content/en/docs/faq.md b/website/content/en/docs/faq.md index 1ff02ac57fc8..74186909e176 100644 --- a/website/content/en/docs/faq.md +++ b/website/content/en/docs/faq.md @@ -92,9 +92,9 @@ Yes, Karpenter supports provisioning metal instance types when a NodePool's `nod ### How does Karpenter dynamically select instance types? -Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. +Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. -After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. +After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. The EC2 fleet API attempts to provision the instance type based on the [Price Capacity Optimized allocation strategy](https://aws.amazon.com/blogs/compute/introducing-price-capacity-optimized-allocation-strategy-for-ec2-spot-instances/). For the on-demand capacity type, this is effectively equivalent to the `lowest-price` allocation strategy. For the spot capacity type, Fleet will determine an instance type that has both the lowest price combined with the lowest chance of being interrupted. Note that this may not give you the instance type with the strictly lowest price for spot. @@ -185,6 +185,11 @@ amiSelectorTerms: - name: Windows_Server-2022-English-Full-EKS_Optimized-{{< param "latest_k8s_version" >}}* ``` +### Can I use Karpenter to scale my workload's pods? +Karpenter is a node autoscaler which will create new nodes in response to unschedulable pods. Scaling the pods themselves is outside of its scope. +This is the realm of pod autoscalers such as the [Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) (for scaling an individual pod's resources) or the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (for scaling replicas). +We also recommend taking a look at [Keda](https://keda.sh/) if you're looking for more advanced autoscaling capabilities for pods. + ## Deprovisioning ### How does Karpenter deprovision nodes? See [Deprovisioning nodes]({{< ref "./concepts/disruption" >}}) for information on how Karpenter deprovisions nodes. @@ -206,7 +211,7 @@ For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgra ### How do I upgrade an EKS Cluster with Karpenter? -When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). +When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). {{% alert title="Note" color="primary" %}} Karpenter's default [EC2NodeClass `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane, meaning that an upgrade of the control plane will cause Karpenter to auto-discover the new AMIs for that version. diff --git a/website/content/en/preview/faq.md b/website/content/en/preview/faq.md index 5f821b3d5fff..5edef1245150 100644 --- a/website/content/en/preview/faq.md +++ b/website/content/en/preview/faq.md @@ -92,9 +92,9 @@ Yes, Karpenter supports provisioning metal instance types when a NodePool's `nod ### How does Karpenter dynamically select instance types? -Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. +Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. -After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. +After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. The EC2 fleet API attempts to provision the instance type based on the [Price Capacity Optimized allocation strategy](https://aws.amazon.com/blogs/compute/introducing-price-capacity-optimized-allocation-strategy-for-ec2-spot-instances/). For the on-demand capacity type, this is effectively equivalent to the `lowest-price` allocation strategy. For the spot capacity type, Fleet will determine an instance type that has both the lowest price combined with the lowest chance of being interrupted. Note that this may not give you the instance type with the strictly lowest price for spot. @@ -185,6 +185,11 @@ amiSelectorTerms: - name: Windows_Server-2022-English-Full-EKS_Optimized-{{< param "latest_k8s_version" >}}* ``` +### Can I use Karpenter to scale my workload's pods? +Karpenter is a node autoscaler which will create new nodes in response to unschedulable pods. Scaling the pods themselves is outside of its scope. +This is the realm of pod autoscalers such as the [Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) (for scaling an individual pod's resources) or the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (for scaling replicas). +We also recommend taking a look at [Keda](https://keda.sh/) if you're looking for more advanced autoscaling capabilities for pods. + ## Deprovisioning ### How does Karpenter deprovision nodes? See [Deprovisioning nodes]({{< ref "./concepts/disruption" >}}) for information on how Karpenter deprovisions nodes. @@ -206,7 +211,7 @@ For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgra ### How do I upgrade an EKS Cluster with Karpenter? -When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). +When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). {{% alert title="Note" color="primary" %}} Karpenter's default [EC2NodeClass `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane, meaning that an upgrade of the control plane will cause Karpenter to auto-discover the new AMIs for that version. diff --git a/website/content/en/v0.33/faq.md b/website/content/en/v0.33/faq.md index 1ff02ac57fc8..74186909e176 100644 --- a/website/content/en/v0.33/faq.md +++ b/website/content/en/v0.33/faq.md @@ -92,9 +92,9 @@ Yes, Karpenter supports provisioning metal instance types when a NodePool's `nod ### How does Karpenter dynamically select instance types? -Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. +Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. -After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. +After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. The EC2 fleet API attempts to provision the instance type based on the [Price Capacity Optimized allocation strategy](https://aws.amazon.com/blogs/compute/introducing-price-capacity-optimized-allocation-strategy-for-ec2-spot-instances/). For the on-demand capacity type, this is effectively equivalent to the `lowest-price` allocation strategy. For the spot capacity type, Fleet will determine an instance type that has both the lowest price combined with the lowest chance of being interrupted. Note that this may not give you the instance type with the strictly lowest price for spot. @@ -185,6 +185,11 @@ amiSelectorTerms: - name: Windows_Server-2022-English-Full-EKS_Optimized-{{< param "latest_k8s_version" >}}* ``` +### Can I use Karpenter to scale my workload's pods? +Karpenter is a node autoscaler which will create new nodes in response to unschedulable pods. Scaling the pods themselves is outside of its scope. +This is the realm of pod autoscalers such as the [Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) (for scaling an individual pod's resources) or the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (for scaling replicas). +We also recommend taking a look at [Keda](https://keda.sh/) if you're looking for more advanced autoscaling capabilities for pods. + ## Deprovisioning ### How does Karpenter deprovision nodes? See [Deprovisioning nodes]({{< ref "./concepts/disruption" >}}) for information on how Karpenter deprovisions nodes. @@ -206,7 +211,7 @@ For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgra ### How do I upgrade an EKS Cluster with Karpenter? -When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). +When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). {{% alert title="Note" color="primary" %}} Karpenter's default [EC2NodeClass `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane, meaning that an upgrade of the control plane will cause Karpenter to auto-discover the new AMIs for that version.