diff --git a/charts/karpenter-crd/Chart.yaml b/charts/karpenter-crd/Chart.yaml index bad3fdb20d54..ff9651e1558c 100644 --- a/charts/karpenter-crd/Chart.yaml +++ b/charts/karpenter-crd/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: karpenter-crd description: A Helm chart for Karpenter Custom Resource Definitions (CRDs) type: application -version: 0.34.0 -appVersion: 0.34.0 +version: 0.35.0 +appVersion: 0.35.0 keywords: - cluster - node diff --git a/charts/karpenter/Chart.yaml b/charts/karpenter/Chart.yaml index 60da1f8e84c1..536ebabb3b64 100644 --- a/charts/karpenter/Chart.yaml +++ b/charts/karpenter/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: karpenter description: A Helm chart for Karpenter, an open-source node provisioning project built for Kubernetes. type: application -version: 0.34.0 -appVersion: 0.34.0 +version: 0.35.0 +appVersion: 0.35.0 keywords: - cluster - node diff --git a/charts/karpenter/README.md b/charts/karpenter/README.md index 5bd21268cc0b..8d1aec3043eb 100644 --- a/charts/karpenter/README.md +++ b/charts/karpenter/README.md @@ -2,7 +2,7 @@ A Helm chart for Karpenter, an open-source node provisioning project built for Kubernetes. -![Version: 0.34.0](https://img.shields.io/badge/Version-0.34.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.34.0](https://img.shields.io/badge/AppVersion-0.34.0-informational?style=flat-square) +![Version: 0.35.0](https://img.shields.io/badge/Version-0.35.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.35.0](https://img.shields.io/badge/AppVersion-0.35.0-informational?style=flat-square) ## Documentation @@ -15,7 +15,7 @@ You can follow the detailed installation instruction in the [documentation](http ```bash helm upgrade --install --namespace karpenter --create-namespace \ karpenter oci://public.ecr.aws/karpenter/karpenter \ - --version 0.34.0 \ + --version 0.35.0 \ --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}" \ --set settings.clusterName=${CLUSTER_NAME} \ --set settings.interruptionQueue=${CLUSTER_NAME} \ @@ -34,9 +34,9 @@ helm upgrade --install --namespace karpenter --create-namespace \ | controller.envFrom | list | `[]` | | | controller.extraVolumeMounts | list | `[]` | Additional volumeMounts for the controller pod. | | controller.healthProbe.port | int | `8081` | The container port to use for http health probe. | -| controller.image.digest | string | `"sha256:1ec788c4358106d728a352426462014b7ee4734e9d5ec932d2f37a7b15f9be65"` | SHA256 digest of the controller image. | +| controller.image.digest | string | `"sha256:48d1246f6b2066404e300cbf3e26d0bcdc57a76531dcb634d571f4f0e050cb57"` | SHA256 digest of the controller image. | | controller.image.repository | string | `"public.ecr.aws/karpenter/controller"` | Repository path to the controller image. | -| controller.image.tag | string | `"v0.34.0"` | Tag of the controller image. | +| controller.image.tag | string | `"0.35.0"` | Tag of the controller image. | | controller.metrics.port | int | `8000` | The container port to use for metrics. | | controller.resources | object | `{}` | Resources for the controller pod. | | controller.sidecarContainer | list | `[]` | Additional sidecarContainer config | diff --git a/charts/karpenter/values.yaml b/charts/karpenter/values.yaml index 4fddc9d413b9..c9ee81887887 100644 --- a/charts/karpenter/values.yaml +++ b/charts/karpenter/values.yaml @@ -45,7 +45,7 @@ podDisruptionBudget: name: karpenter maxUnavailable: 1 # -- SecurityContext for the pod. -podSecurityContext: +podSecurityContext: fsGroup: 65536 # -- PriorityClass name for the pod. priorityClassName: system-cluster-critical @@ -99,9 +99,9 @@ controller: # -- Repository path to the controller image. repository: public.ecr.aws/karpenter/controller # -- Tag of the controller image. - tag: v0.34.0 + tag: 0.35.0 # -- SHA256 digest of the controller image. - digest: sha256:1ec788c4358106d728a352426462014b7ee4734e9d5ec932d2f37a7b15f9be65 + digest: sha256:48d1246f6b2066404e300cbf3e26d0bcdc57a76531dcb634d571f4f0e050cb57 # -- Additional environment variables for the controller pod. env: [] # - name: AWS_REGION diff --git a/hack/docs/compatibility-karpenter.yaml b/hack/docs/compatibility-karpenter.yaml index 8c2e678c5def..edd7bea872ee 100644 --- a/hack/docs/compatibility-karpenter.yaml +++ b/hack/docs/compatibility-karpenter.yaml @@ -46,5 +46,8 @@ compatibility: minK8sVersion: 1.23 maxK8sVersion: 1.29 - appVersion: 0.34.1 + minK8sVersion: 1.23 + maxK8sVersion: 1.29 + - appVersion: 0.35.0 minK8sVersion: 1.23 maxK8sVersion: 1.29 \ No newline at end of file diff --git a/website/content/en/docs/concepts/nodeclasses.md b/website/content/en/docs/concepts/nodeclasses.md index bc139771dcb7..e95f576cef11 100644 --- a/website/content/en/docs/concepts/nodeclasses.md +++ b/website/content/en/docs/concepts/nodeclasses.md @@ -1,4 +1,4 @@ ---- + --- title: "NodeClasses" linkTitle: "NodeClasses" weight: 2 @@ -35,7 +35,7 @@ spec: # Each term in the array of subnetSelectorTerms is ORed together # Within a single term, all conditions are ANDed subnetSelectorTerms: - # Select on any subnet that has the "karpenter.sh/discovery: ${CLUSTER_NAME}" + # Select on any subnet that has the "karpenter.sh/discovery: ${CLUSTER_NAME}" # AND the "environment: test" tag OR any subnet with ID "subnet-09fa4a0a8f233a921" - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -46,8 +46,8 @@ spec: # Each term in the array of securityGroupSelectorTerms is ORed together # Within a single term, all conditions are ANDed securityGroupSelectorTerms: - # Select on any security group that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any security group with the "my-security-group" name + # Select on any security group that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any security group with the "my-security-group" name # OR any security group with ID "sg-063d7acfb4b06c82c" - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -70,15 +70,15 @@ spec: # Each term in the array of amiSelectorTerms is ORed together # Within a single term, all conditions are ANDed amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name + # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any AMI with the "my-ami" name # OR any AMI with ID "ami-123" - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" environment: test - name: my-ami - id: ami-123 - + # Optional, use instance-store volumes for node ephemeral-storage instanceStorePolicy: RAID0 @@ -113,6 +113,10 @@ spec: # Optional, configures detailed monitoring for the instance detailedMonitoring: true + + # Optional, configures if the instance should be launched with an associated public IP address. + # If not specified, the default value depends on the subnet's public IP auto-assign setting. + associatePublicIPAddress: true status: # Resolved subnets subnets: @@ -160,7 +164,7 @@ Refer to the [NodePool docs]({{}}) for settings applicable t ## spec.amiFamily -AMIFamily is a required field, dictating both the default bootstrapping logic for nodes provisioned through this `EC2NodeClass` but also selecting a group of recommended, latest AMIs by default. Currently, Karpenter supports `amiFamily` values `AL2`, `Bottlerocket`, `Ubuntu`, `Windows2019`, `Windows2022` and `Custom`. GPUs are only supported by default with `AL2` and `Bottlerocket`. The `AL2` amiFamily does not support ARM64 GPU instance types unless you specify custom [`amiSelectorTerms`]({{}}). Default bootstrapping logic is shown below for each of the supported families. +AMIFamily is a required field, dictating both the default bootstrapping logic for nodes provisioned through this `EC2NodeClass` but also selecting a group of recommended, latest AMIs by default. Currently, Karpenter supports `amiFamily` values `AL2`, `AL2023`, `Bottlerocket`, `Ubuntu`, `Windows2019`, `Windows2022` and `Custom`. GPUs are only supported by default with `AL2` and `Bottlerocket`. The `AL2` amiFamily does not support ARM64 GPU instance types unless you specify custom [`amiSelectorTerms`]({{}}). Default bootstrapping logic is shown below for each of the supported families. ### AL2 @@ -180,6 +184,34 @@ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 --//-- ``` +### AL2023 + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: test-cluster + apiServerEndpoint: https://example.com + certificateAuthority: ca-bundle + cidr: 10.100.0.0/16 + kubelet: + config: + maxPods: 110 + flags: + - --node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test + +--//-- + +``` + ### Bottlerocket ```toml @@ -248,7 +280,7 @@ This selection logic is modeled as terms, where each term contains multiple cond ```yaml subnetSelectorTerms: - # Select on any subnet that has the "karpenter.sh/discovery: ${CLUSTER_NAME}" + # Select on any subnet that has the "karpenter.sh/discovery: ${CLUSTER_NAME}" # AND the "environment: test" tag OR any subnet with ID "subnet-09fa4a0a8f233a921" - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -306,6 +338,7 @@ spec: - id: "subnet-0471ca205b8a129ae" ``` + ## spec.securityGroupSelectorTerms Security Group Selector Terms allow you to specify selection logic for all security groups that will be attached to an instance launched from the `EC2NodeClass`. The security group of an instance is comparable to a set of firewall rules. @@ -315,8 +348,8 @@ This selection logic is modeled as terms, where each term contains multiple cond ```yaml securityGroupSelectorTerms: - # Select on any security group that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any security group with the "my-security-group" name + # Select on any security group that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any security group with the "my-security-group" name # OR any security group with ID "sg-063d7acfb4b06c82c" - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -402,8 +435,8 @@ This selection logic is modeled as terms, where each term contains multiple cond ```yaml amiSelectorTerms: - # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag - # AND the "environment: test" tag OR any AMI with the "my-ami" name + # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any AMI with the "my-ami" name # OR any AMI with ID "ami-123" - tags: karpenter.sh/discovery: "${CLUSTER_NAME}" @@ -576,6 +609,17 @@ spec: encrypted: true ``` +### AL2023 +```yaml +spec: + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 20Gi + volumeType: gp3 + encrypted: true +``` + ### Bottlerocket ```yaml spec: @@ -628,25 +672,29 @@ The `instanceStorePolicy` field controls how [instance-store](https://docs.aws.a If you intend to use these volumes for faster node ephemeral-storage, set `instanceStorePolicy` to `RAID0`: -```yaml -spec: +```yaml +spec: instanceStorePolicy: RAID0 ``` -This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). + +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: -The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: +#### AL2 -#### AL2 +On AL2, Karpenter automatically configures the disks through an additional boostrap argument (`--local-disks raid0`). The device name is `/dev/md/0` and its mount point is `/mnt/k8s-disks/0`. You should ensure any additional disk setup does not interfere with these. -On AL2, Karpenter automatically configures the disks through an additional boostrap argument (`--local-disks raid0`). The device name is `/dev/md/0` and its mount point is `/mnt/k8s-disks/0`. You should ensure any additional disk setup does not interfere with these. +#### AL2023 -#### Others +On AL2023, Karpenter automatically configures the disks via the generated `NodeConfig` object. Like AL2, the device name is `/dev/md/0` and its mount point is `/mnt/k8s-disks/0`. You should ensure any additional disk setup does not interfere with these. -For all other AMI families, you must configure the disks yourself. Check out the [`setup-local-disks`](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bin/setup-local-disks) script in [amazon-eks-ami](https://github.com/awslabs/amazon-eks-ami) to see how this is done for AL2. +#### Others -{{% alert title="Tip" color="secondary" %}} -Since the Kubelet & Containerd will be using the instance-store filesystem, you may consider using a more minimal root volume size. +For all other AMI families, you must configure the disks yourself. Check out the [`setup-local-disks`](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bin/setup-local-disks) script in [amazon-eks-ami](https://github.com/awslabs/amazon-eks-ami) to see how this is done for AL2. + +{{% alert title="Tip" color="secondary" %}} +Since the Kubelet & Containerd will be using the instance-store filesystem, you may consider using a more minimal root volume size. {{% /alert %}} ## spec.userData @@ -712,7 +760,30 @@ Consider the following example to understand how your custom UserData will be me ```bash #!/bin/bash -echo "Running custom user data script" +echo "Running custom user data script (bash)" +``` + +#### Merged UserData (bash) + +```bash +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Running custom user data script (bash)" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash -xe +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ +--use-max-pods false \ +--kubelet-extra-args '--node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test --max-pods=110' +--//-- ``` #### Passed-in UserData (MIME) @@ -725,12 +796,12 @@ Content-Type: multipart/mixed; boundary="BOUNDARY" Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash -echo "Running custom user data script" +echo "Running custom user data script (mime)" --BOUNDARY-- ``` -#### Merged UserData +#### Merged UserData (MIME) ```bash MIME-Version: 1.0 @@ -740,7 +811,7 @@ Content-Type: multipart/mixed; boundary="//" Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash -echo "Running custom user data script" +echo "Running custom user data script (mime)" --// Content-Type: text/x-shellscript; charset="us-ascii" @@ -770,6 +841,169 @@ spec: ``` {{% /alert %}} +### AL2023 + +* Your UserData may be in one of three formats: a [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive), a NodeConfig YAML / JSON string, or a shell script. +* Karpenter will transform your custom UserData into a MIME part, if necessary, and then create a MIME multi-part archive. This archive will consist of a generated NodeConfig, containing Karpenter's default values, followed by the transformed custom UserData. For more information on the NodeConfig spec, refer to the [AL2023 EKS Optimized AMI docs](https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/examples/). +* If a value is specified both in the Karpenter generated NodeConfig and the same value is specified in the custom user data, the value in the custom user data will take precedence. + +#### Passed-in UserData (NodeConfig) + +```yaml +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +``` + +#### Merged UserData (NodeConfig) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +# Karpenter Generated NodeConfig +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + apiServerEndpoint: https://test-cluster + certificateAuthority: cluster-ca + cidr: 10.100.0.0/16 + name: test-cluster + kubelet: + config: + clusterDNS: + - 10.100.0.10 + maxPods: 118 + flags: + - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" + +--// +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--//-- +``` + +#### Passed-in UserData (bash) + +```shell +#!/bin/bash +echo "Hello, AL2023!" +``` + +#### Merged UserData (bash) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + apiServerEndpoint: https://test-cluster + certificateAuthority: cluster-ca + cidr: 10.100.0.0/16 + name: test-cluster + kubelet: + config: + clusterDNS: + - 10.100.0.10 + maxPods: 118 + flags: + - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" +--//-- +``` + +#### Passed-in UserData (MIME) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" +--// +``` + +#### Merged UserData (MIME) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + apiServerEndpoint: https://test-cluster + certificateAuthority: cluster-ca + cidr: 10.100.0.0/16 + name: test-cluster + kubelet: + config: + clusterDNS: + - 10.100.0.10 + maxPods: 118 + flags: + - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" + +--// +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" +--//-- +``` + ### Bottlerocket * Your UserData must be valid TOML. @@ -862,6 +1096,15 @@ spec: detailedMonitoring: true ``` +## spec.associatePublicIPAddress + +A boolean field that controls whether instances created by Karpenter for this EC2NodeClass will have an associated public IP address. This overrides the `MapPublicIpOnLaunch` setting applied to the subnet the node is launched in. If this field is not set, the `MapPublicIpOnLaunch` field will be respected. + +{{% alert title="Note" color="warning" %}} +If a `NodeClaim` requests `vpc.amazonaws.com/efa` resources, `spec.associatePublicIPAddress` is respected. However, if this `NodeClaim` requests **multiple** EFA resources and the value for `spec.associatePublicIPAddress` is true, the instance will fail to launch. This is due to an EC2 restriction which +requires that the field is only set to true when configuring an instance with a single ENI at launch. When using this field, it is advised that users segregate their EFA workload to use a separate `NodePool` / `EC2NodeClass` pair. +{{% /alert %}} + ## status.subnets [`status.subnets`]({{< ref "#statussubnets" >}}) contains the resolved `id` and `zone` of the subnets that were selected by the [`spec.subnetSelectorTerms`]({{< ref "#specsubnetselectorterms" >}}) for the node class. The subnets will be sorted by the available IP address count in decreasing order. diff --git a/website/content/en/docs/contributing/development-guide.md b/website/content/en/docs/contributing/development-guide.md index e803169d6c5a..2afbb4d2ead7 100644 --- a/website/content/en/docs/contributing/development-guide.md +++ b/website/content/en/docs/contributing/development-guide.md @@ -73,7 +73,8 @@ make test # E2E correctness tests ### Change Log Level -By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your helm values. +By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your Helm values. + ```bash --set logLevel=debug ``` diff --git a/website/content/en/docs/faq.md b/website/content/en/docs/faq.md index d6bffd44a86e..f068290c7626 100644 --- a/website/content/en/docs/faq.md +++ b/website/content/en/docs/faq.md @@ -14,7 +14,7 @@ See [Configuring NodePools]({{< ref "./concepts/#configuring-nodepools" >}}) for AWS is the first cloud provider supported by Karpenter, although it is designed to be used with other cloud providers as well. ### Can I write my own cloud provider for Karpenter? -Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.34.1/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. +Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.35.0/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. ### What operating system nodes does Karpenter deploy? Karpenter uses the OS defined by the [AMI Family in your EC2NodeClass]({{< ref "./concepts/nodeclasses#specamifamily" >}}). @@ -26,7 +26,7 @@ Karpenter has multiple mechanisms for configuring the [operating system]({{< ref Karpenter is flexible to multi-architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}). ### What RBAC access is required? -All the required RBAC rules can be found in the helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/role.yaml) files for details. +All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/role.yaml) files for details. ### Can I run Karpenter outside of a Kubernetes cluster? Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API. @@ -202,7 +202,7 @@ Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes Karpenter requires proper permissions in the `KarpenterNode IAM Role` and the `KarpenterController IAM Role`. To upgrade Karpenter to version `$VERSION`, make sure that the `KarpenterNode IAM Role` and the `KarpenterController IAM Role` have the right permission described in `https://karpenter.sh/$VERSION/getting-started/getting-started-with-karpenter/cloudformation.yaml`. -Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the helm upgrade command. +Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the Helm upgrade command. {{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}} For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgrading/upgrade-guide/" >}}). @@ -211,7 +211,7 @@ For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgra ### How do I upgrade an EKS Cluster with Karpenter? -When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift is enabled by default starting v0.33.x. +When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift is enabled by default starting `0.33.0`. {{% alert title="Note" color="primary" %}} Karpenter's default [EC2NodeClass `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane, meaning that an upgrade of the control plane will cause Karpenter to auto-discover the new AMIs for that version. diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md index 80d1650764a2..49439c562074 100644 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/_index.md @@ -44,9 +44,9 @@ authenticate properly by running `aws sts get-caller-identity`. After setting up the tools, set the Karpenter and Kubernetes version: ```bash -export KARPENTER_NAMESPACE=kube-system -export KARPENTER_VERSION=v0.34.1 -export K8S_VERSION=1.29 +export KARPENTER_NAMESPACE="kube-system" +export KARPENTER_VERSION="0.35.0" +export K8S_VERSION="1.29" ``` Then set the following environment variable: @@ -58,7 +58,7 @@ If you open a new shell to run steps in this procedure, you need to set some or To remind yourself of these values, type: ```bash -echo $KARPENTER_NAMESPACE $KARPENTER_VERSION $K8S_VERSION $CLUSTER_NAME $AWS_DEFAULT_REGION $AWS_ACCOUNT_ID $TEMPOUT +echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${K8S_VERSION}" "${CLUSTER_NAME}" "${AWS_DEFAULT_REGION}" "${AWS_ACCOUNT_ID}" "${TEMPOUT}" ``` {{% /alert %}} @@ -75,7 +75,7 @@ The following cluster configuration will: * Use [AWS EKS managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for the kube-system and karpenter namespaces. Uncomment fargateProfiles settings (and comment out managedNodeGroups settings) to use Fargate for both namespaces instead. * Set KARPENTER_IAM_ROLE_ARN variables. * Create a role to allow spot instances. -* Run helm to install karpenter +* Run Helm to install Karpenter {{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}} @@ -97,11 +97,11 @@ If you need Karpenter to manage the DNS service pods' capacity, this means that {{% /alert %}} {{% alert title="Common Expression Language/Webhooks Notice" color="warning" %}} -Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter helm chart. +Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart. {{% /alert %}} {{% alert title="Pod Identity Supports Notice" color="warning" %}} -Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter helm chart. +Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter Helm chart. {{% /alert %}} {{% alert title="Warning" color="warning" %}} @@ -177,7 +177,7 @@ The section below covers advanced installation techniques for installing Karpent ### Private Clusters -You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` helm chart. +You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` Helm chart. ```bash privateCluster: diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml b/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml index b68a986cf99b..67d28f71217c 100644 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/grafana-values.yaml @@ -22,6 +22,6 @@ dashboardProviders: dashboards: default: capacity-dashboard: - url: https://karpenter.sh/v0.34/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json + url: https://karpenter.sh/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json performance-dashboard: - url: https://karpenter.sh/v0.34/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json + url: https://karpenter.sh/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step01-config.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step01-config.sh index a3af512d02ac..bbaa418df349 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step01-config.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step01-config.sh @@ -2,4 +2,4 @@ export AWS_PARTITION="aws" # if you are not using standard partitions, you may n export CLUSTER_NAME="${USER}-karpenter-demo" export AWS_DEFAULT_REGION="us-west-2" export AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" -export TEMPOUT=$(mktemp) +export TEMPOUT="$(mktemp)" diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index 7a7b57c05a4c..cdad96eeca23 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ @@ -26,7 +26,7 @@ iam: - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} ## Optionally run on fargate or on k8s 1.23 -# Pod Identity is not available on fargate +# Pod Identity is not available on fargate # https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html # iam: # withOIDC: true @@ -67,7 +67,7 @@ addons: # - namespace: "${KARPENTER_NAMESPACE}" EOF -export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)" +export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.endpoint" --output text)" export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" -echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN +echo "${CLUSTER_ENDPOINT} ${KARPENTER_IAM_ROLE_ARN}" diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh index b8e610f7bee6..54e826db269b 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh @@ -1,6 +1,6 @@ -TEMPOUT=$(mktemp) +TEMPOUT="$(mktemp)" -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh index 951813a865cc..1107c2ec1d24 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh @@ -4,8 +4,8 @@ helm repo update kubectl create namespace monitoring -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | envsubst | tee prometheus-values.yaml +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | envsubst | tee prometheus-values.yaml helm install --namespace monitoring prometheus prometheus-community/prometheus --values prometheus-values.yaml -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml helm install --namespace monitoring grafana grafana-charts/grafana --values grafana-values.yaml diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh index 9d431160dda0..93b1ac792ade 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh @@ -1 +1 @@ -kubectl delete node $NODE_NAME +kubectl delete node "${NODE_NAME}" diff --git a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh index dc84b673389f..f694ac3797f4 100755 --- a/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh +++ b/website/content/en/docs/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh @@ -1,6 +1,6 @@ helm uninstall karpenter --namespace "${KARPENTER_NAMESPACE}" aws cloudformation delete-stack --stack-name "Karpenter-${CLUSTER_NAME}" -aws ec2 describe-launch-templates --filters Name=tag:karpenter.k8s.aws/cluster,Values=${CLUSTER_NAME} | +aws ec2 describe-launch-templates --filters "Name=tag:karpenter.k8s.aws/cluster,Values=${CLUSTER_NAME}" | jq -r ".LaunchTemplates[].LaunchTemplateName" | xargs -I{} aws ec2 delete-launch-template --launch-template-name {} eksctl delete cluster --name "${CLUSTER_NAME}" diff --git a/website/content/en/docs/getting-started/migrating-from-cas/_index.md b/website/content/en/docs/getting-started/migrating-from-cas/_index.md index 0932063757e8..3c13b29f841f 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/_index.md +++ b/website/content/en/docs/getting-started/migrating-from-cas/_index.md @@ -92,10 +92,10 @@ One for your Karpenter node role and one for your existing node group. First set the Karpenter release you want to deploy. ```bash -export KARPENTER_VERSION=v0.34.1 +export KARPENTER_VERSION="0.35.0" ``` -We can now generate a full Karpenter deployment yaml from the helm chart. +We can now generate a full Karpenter deployment yaml from the Helm chart. {{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh" language="bash" %}} @@ -133,7 +133,7 @@ Now that our deployment is ready we can create the karpenter namespace, create t ## Create default NodePool -We need to create a default NodePool so Karpenter knows what types of nodes we want for unscheduled workloads. You can refer to some of the [example NodePool](https://github.com/aws/karpenter/tree/v0.34.1/examples/v1beta1) for specific needs. +We need to create a default NodePool so Karpenter knows what types of nodes we want for unscheduled workloads. You can refer to some of the [example NodePool](https://github.com/aws/karpenter/tree/v0.35.0/examples/v1beta1) for specific needs. {{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh" language="bash" %}} diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step01-env.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step01-env.sh index 20645685137b..f456eddc75ee 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step01-env.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step01-env.sh @@ -1,6 +1,6 @@ AWS_PARTITION="aws" # if you are not using standard partitions, you may need to configure to aws-cn / aws-us-gov AWS_REGION="$(aws configure list | grep region | tr -s " " | cut -d" " -f3)" -OIDC_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} \ +OIDC_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" \ --query "cluster.identity.oidc.issuer" --output text)" AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' \ --output text) diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step03-node-policies.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step03-node-policies.sh index d57f79039d04..fbc5455e541b 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step03-node-policies.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step03-node-policies.sh @@ -1,11 +1,11 @@ aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy" aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy" aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ - --policy-arn arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore" diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh index 70c59e826393..cc3d7f929986 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh @@ -19,7 +19,7 @@ cat << EOF > controller-trust-policy.json } EOF -aws iam create-role --role-name KarpenterControllerRole-${CLUSTER_NAME} \ +aws iam create-role --role-name "KarpenterControllerRole-${CLUSTER_NAME}" \ --assume-role-policy-document file://controller-trust-policy.json cat << EOF > controller-policy.json @@ -138,6 +138,6 @@ cat << EOF > controller-policy.json } EOF -aws iam put-role-policy --role-name KarpenterControllerRole-${CLUSTER_NAME} \ - --policy-name KarpenterControllerPolicy-${CLUSTER_NAME} \ +aws iam put-role-policy --role-name "KarpenterControllerRole-${CLUSTER_NAME}" \ + --policy-name "KarpenterControllerPolicy-${CLUSTER_NAME}" \ --policy-document file://controller-policy.json diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh index de972ea2bddd..47df188dc87d 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh @@ -1,6 +1,6 @@ -for NODEGROUP in $(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ - --query 'nodegroups' --output text); do aws ec2 create-tags \ +for NODEGROUP in $(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" --query 'nodegroups' --output text); do + aws ec2 create-tags \ --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ - --resources $(aws eks describe-nodegroup --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name $NODEGROUP --query 'nodegroup.subnets' --output text ) + --resources "$(aws eks describe-nodegroup --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" --query 'nodegroup.subnets' --output text )" done diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh index 397e40904cee..c63bde3b78dc 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh @@ -1,22 +1,22 @@ -NODEGROUP=$(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ +NODEGROUP=$(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" \ --query 'nodegroups[0]' --output text) -LAUNCH_TEMPLATE=$(aws eks describe-nodegroup --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} --query 'nodegroup.launchTemplate.{id:id,version:version}' \ +LAUNCH_TEMPLATE=$(aws eks describe-nodegroup --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" --query 'nodegroup.launchTemplate.{id:id,version:version}' \ --output text | tr -s "\t" ",") # If your EKS setup is configured to use only Cluster security group, then please execute - SECURITY_GROUPS=$(aws eks describe-cluster \ - --name ${CLUSTER_NAME} --query "cluster.resourcesVpcConfig.clusterSecurityGroupId" --output text) + --name "${CLUSTER_NAME}" --query "cluster.resourcesVpcConfig.clusterSecurityGroupId" --output text) # If your setup uses the security groups in the Launch template of a managed node group, then : -SECURITY_GROUPS=$(aws ec2 describe-launch-template-versions \ - --launch-template-id ${LAUNCH_TEMPLATE%,*} --versions ${LAUNCH_TEMPLATE#*,} \ +SECURITY_GROUPS="$(aws ec2 describe-launch-template-versions \ + --launch-template-id "${LAUNCH_TEMPLATE%,*}" --versions "${LAUNCH_TEMPLATE#*,}" \ --query 'LaunchTemplateVersions[0].LaunchTemplateData.[NetworkInterfaces[0].Groups||SecurityGroupIds]' \ - --output text) + --output text)" aws ec2 create-tags \ --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ - --resources ${SECURITY_GROUPS} + --resources "${SECURITY_GROUPS}" diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step09-deploy.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step09-deploy.sh index 51714d78f6dd..e46742fd22ea 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step09-deploy.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step09-deploy.sh @@ -1,8 +1,8 @@ kubectl create namespace "${KARPENTER_NAMESPACE}" || true kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodepools.yaml + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodepools.yaml" kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml" kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodeclaims.yaml + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodeclaims.yaml" kubectl apply -f karpenter.yaml diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh index d88a6d5c7236..a28c9759bcdb 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh @@ -1,5 +1,5 @@ -for NODEGROUP in $(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ - --query 'nodegroups' --output text); do aws eks update-nodegroup-config --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} \ +for NODEGROUP in $(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" \ + --query 'nodegroups' --output text); do aws eks update-nodegroup-config --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" \ --scaling-config "minSize=1,maxSize=1,desiredSize=1" done diff --git a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh index 51ad964c28a7..efce68394779 100644 --- a/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh +++ b/website/content/en/docs/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh @@ -1,3 +1,3 @@ -aws eks update-nodegroup-config --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} \ +aws eks update-nodegroup-config --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" \ --scaling-config "minSize=2,maxSize=2,desiredSize=2" diff --git a/website/content/en/docs/reference/cloudformation.md b/website/content/en/docs/reference/cloudformation.md index 1ad28b7bd617..791008d60c5e 100644 --- a/website/content/en/docs/reference/cloudformation.md +++ b/website/content/en/docs/reference/cloudformation.md @@ -17,8 +17,8 @@ These descriptions should allow you to understand: To download a particular version of `cloudformation.yaml`, set the version and use `curl` to pull the file to your local system: ```bash -export KARPENTER_VERSION=v0.34.1 -curl https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > cloudformation.yaml +export KARPENTER_VERSION="0.35.0" +curl https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > cloudformation.yaml ``` Following some header information, the rest of the `cloudformation.yaml` file describes the resources that CloudFormation deploys. diff --git a/website/content/en/docs/reference/instance-types.md b/website/content/en/docs/reference/instance-types.md index 64aeaf1f1174..6978c20ef258 100644 --- a/website/content/en/docs/reference/instance-types.md +++ b/website/content/en/docs/reference/instance-types.md @@ -11411,6 +11411,30 @@ below are the resources available with some assumptions and after the instance o |pods|737| |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| +### `m7gd.metal` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|m| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|m7gd| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-local-nvme|3800| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-size|metal| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|m7gd.metal| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|233962Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| ## m7i Family ### `m7i.large` #### Labels diff --git a/website/content/en/docs/reference/metrics.md b/website/content/en/docs/reference/metrics.md index 0f42331c1b95..b7cf1366f50f 100644 --- a/website/content/en/docs/reference/metrics.md +++ b/website/content/en/docs/reference/metrics.md @@ -115,11 +115,17 @@ The number of times that Karpenter failed to launch a replacement node for disru ### `karpenter_disruption_queue_depth` The number of commands currently being waited on in the disruption orchestration queue. +### `karpenter_disruption_pods_disrupted_total` +Total number of reschedulable pods disrupted on nodes. Labeled by NodePool, disruption action, method, and consolidation type. + +### `karpenter_disruption_nodes_disrupted_total` +Total number of nodes disrupted. Labeled by NodePool, disruption action, method, and consolidation type. + ### `karpenter_disruption_evaluation_duration_seconds` -Duration of the disruption evaluation process in seconds. +Duration of the disruption evaluation process in seconds. Labeled by method and consolidation type. ### `karpenter_disruption_eligible_nodes` -Number of nodes eligible for disruption by Karpenter. Labeled by disruption method. +Number of nodes eligible for disruption by Karpenter. Labeled by disruption method and consolidation type. ### `karpenter_disruption_consolidation_timeouts_total` Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. @@ -128,13 +134,21 @@ Number of times the Consolidation algorithm has reached a timeout. Labeled by co The number of nodes for a given NodePool that can be disrupted at a point in time. Labeled by NodePool. Note that allowed disruptions can change very rapidly, as new nodes may be created and others may be deleted at any point. ### `karpenter_disruption_actions_performed_total` -Number of disruption actions performed. Labeled by disruption method. +Number of disruption actions performed. Labeled by disruption action, method, and consolidation type. ## Consistency Metrics ### `karpenter_consistency_errors` Number of consistency checks that have failed. +## Cluster State Metrics + +### `karpenter_cluster_state_synced` +Returns 1 if cluster state is synced and 0 otherwise. Synced checks that nodeclaims and nodes that are stored in the APIServer have the same representation as Karpenter's cluster state + +### `karpenter_cluster_state_node_count` +Current count of nodes in cluster state + ## Cloudprovider Metrics ### `karpenter_cloudprovider_instance_type_price_estimate` @@ -176,3 +190,4 @@ Maximum number of concurrent reconciles per controller ### `controller_runtime_active_workers` Number of currently used workers per controller + diff --git a/website/content/en/docs/reference/threat-model.md b/website/content/en/docs/reference/threat-model.md index b85189627387..8bac8d7bc5a4 100644 --- a/website/content/en/docs/reference/threat-model.md +++ b/website/content/en/docs/reference/threat-model.md @@ -31,11 +31,11 @@ A Cluster Developer has the ability to create pods via `Deployments`, `ReplicaSe Karpenter has permissions to create and manage cloud instances. Karpenter has Kubernetes API permissions to create, update, and remove nodes, as well as evict pods. For a full list of the permissions, see the RBAC rules in the helm chart template. Karpenter also has AWS IAM permissions to create instances with IAM roles. -* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/aggregate-clusterrole.yaml) -* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/clusterrole-core.yaml) -* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/clusterrole.yaml) -* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/rolebinding.yaml) -* [role.yaml](https://github.com/aws/karpenter/blob/v0.34.1/charts/karpenter/templates/role.yaml) +* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/aggregate-clusterrole.yaml) +* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole-core.yaml) +* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole.yaml) +* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/rolebinding.yaml) +* [role.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/role.yaml) ## Assumptions @@ -63,16 +63,16 @@ Karpenter has permissions to create and manage cloud instances. Karpenter has Ku ### Threat: Using EC2 CreateTag/DeleteTag Permissions to Orchestrate Instance Creation/Deletion -**Background**: As of v0.28.0, Karpenter creates a mapping between CloudProvider instances and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: +**Background**: As of `0.28.0`, Karpenter creates a mapping between CloudProvider instances and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: * `karpenter.sh/managed-by` * `karpenter.sh/nodepool` * `kubernetes.io/cluster/${CLUSTER_NAME}` -* `karpenter.sh/provisioner-name` (prior to `v0.32.0`) +* `karpenter.sh/provisioner-name` (prior to `0.32.0`) Any user that has the ability to Create/Delete tags on CloudProvider instances will have the ability to orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. -In addition, as of v0.29.0, Karpenter will Drift on Security Groups and Subnets. If a user has the Create/Delete tags permission for either of resources, they can orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. +In addition, as of `0.29.0`, Karpenter will Drift on Security Groups and Subnets. If a user has the Create/Delete tags permission for either of resources, they can orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. **Threat:** A Cluster Operator attempts to create or delete a tag on a resource discovered by Karpenter. If it has the ability to create a tag it can effectively create or delete CloudProvider instances associated with the tagged resources. diff --git a/website/content/en/docs/tasks/_index.md b/website/content/en/docs/tasks/_index.md new file mode 100644 index 000000000000..7d4ac8605f4e --- /dev/null +++ b/website/content/en/docs/tasks/_index.md @@ -0,0 +1,10 @@ +--- +title: "Tasks" +linkTitle: "Tasks" +weight: 25 +description: > + Tasks to run with Karpenter +cascade: + type: docs +--- + diff --git a/website/content/en/docs/tasks/amitasks.md b/website/content/en/docs/tasks/amitasks.md new file mode 100644 index 000000000000..94235e0da878 --- /dev/null +++ b/website/content/en/docs/tasks/amitasks.md @@ -0,0 +1,176 @@ +--- +title: "Managing AMIs" +linkTitle: "Managing AMIs" +weight: 10 +description: > + Tasks for managing AMIS in Karpenter +--- + +Understanding how Karpenter assigns AMIs to nodes can help ensure that your workloads will run successfully on those nodes and continue to run if the nodes are upgraded to newer AMIs. +Below we describe how Karpenter assigns AMIs to nodes when they are first deployed and how newer AMIs are assigned later when nodes are spun up to replace old ones. +Later, there are tasks that describe the ways that you can intervene to assert control over how AMIs are used by Karpenter for your clusters. + +Features for managing AMIs described here should be considered as part of the larger upgrade policies that you have for your clusters. +See [How do I upgrade an EKS Cluster with Karpenter]({{< relref "../faq/#how-do-i-upgrade-an-eks-cluster-with-karpenter" >}}) for details on this process. + +## How Karpenter assigns AMIs to nodes by default + +If you do nothing to modify how Karpenter handles AMIs, here is how Karpenter assigns AMIs nodes: + +* When you create an `EC2NodeClass`, you are required to set the family of AMIs to use. For example, for the AL2 family, you would set `amiFamily: AL2`. +* With that `amiFamily` set, any time Karpenter needed to spin up a new node, it would use the latest AMI in the AL2 family. +* Later, if an existing node needs to be replaced, Karpenter checks to see if a newer AMI in the AL2 family is available and automatically uses the new AMI instead to spin up the new node. In other words, you may automatically get an AMI that you have not tested with your workloads. + +You can manually delete a node managed by Karpenter, which will cause the default behavior just described to take effect. +However, there are situations that will cause node replacements with newer AMIs to happen automatically. +These include: + +* **Expiration**: If node expiry is set for a node, the node is marked for deletion after a certain time. +* [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}): If a node is empty of workloads, or deemed to be inefficiently running workloads, nodes can be deleted and more appropriately featured nodes are brought up to consolidate workloads. +* [**Drift**]({{< relref "../concepts/disruption/#drift" >}}): Nodes are set for deletion when they drift from the desired state of the `NodeClaim`s and new nodes are brought up to replace them. +* [**Interruption**]({{< relref "../concepts/disruption/#interruption" >}}): Nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed. + +See [**Automated Methods**]({{< relref "../concepts/disruption/#automated-methods" >}}) for details on how Karpenter uses these automated actions to replace nodes. + +With these types of automated updates in place, there is some risk of a new AMI being brought up that introduces some incompatibilities or bugs that cause your workloads to be degraded or fail altogether. +The tasks described below tell you how to take more control over the ways in which Karpenter handles AMI assignments to nodes. + +{{% alert title="Important" color="warning" %}} +If you are new to Karpenter, you should know that the behavior described here is different than you get with Managed Node Groups (MNG). MNG will always use the assigned AMI when it creates a new node and will never automatically upgrade to a new AMI when a new node is required. See [Updating a Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html) to see how you would manually update MNG to use new AMIs. +{{% /alert %}} + +## Choosing AMI tasks +One of Karpenter's greatest assets is its ability to provide the right node at the right time, with little intervention from the person managing the cluster. +Its default behavior of using a later AMI if one becomes available in the selected family means you automatically get the latest security fixes and features. +However, with this comes the risk that the new AMI could break or degrade your workloads. + +As the Karpenter team looks for new ways to manage AMIs, the tasks below offer some means of reducing these risks, based on your own security and ease-of-use requirements. +Here are the advantages and challenges of each of the tasks described below: + +* Task 1 (Test AMIs): The safest way, and the one we recommend, for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but can reduce the risk of failed workloads in production. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. So combining this with other tasks, that do things like slow rollouts, can allow you to catch problems before they impact your whole cluster. +* Task 2 (Lock down AMIs): If workloads require a particluar AMI, this task can make sure that it is the only AMI used by Karpenter. This can be used in combination with Task 1, where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. Keep in mind that this makes upgrades a manual process for you. +* Task 3 (Disruption budgets): This task can be used as a way of preventing a major problem if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using crontab). This doesn't prevent a bad AMI from being deployed, but it does give you time to respond if a few upgraded nodes at a time show some distress. +* Task 4 (Do not interrupt): While this task doesn't represent a larger solution to the problem, it gives you the opportunity to either prevent all nodes or a node running a particular workload from being upgraded. Note that these settings have no impact in cases where the node is not in control of its being removed (such as when the instance it is running on crashes or a Spot instance is reclaimed). + +## Tasks + +The following tasks let you have an on impact Karpenter’s behavior as it relates to how nodes are created and AMIs are consumed. + +### Task 1: Manage how AMIs are tested and rolled out + +Instead of just avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. +For example, you could have: + +* **Test clusters**: On these private clusters, you can run the latest AMIs for your workloads in a safe environment. +* **Production clusters**: When you feel that everything is working properly, you can set the latest AMIs to be deployed in your production clusters so they are note upgraded. + +Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. + +### Task 2: Lock down which AMIs are selected + +Instead of letting Karpenter always run the latest AMI, you can change Karpenter’s default behavior. +When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you can set a specific AMI that you want Karpenter to always choose, using the `amiSelectorTerms` field. +This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. + +With the `amiSelectorTerms` field in an `EC2NodeClass`, you can set a specific AMI for Karpenter to use, based on AMI name or id (only one is required). +These examples show two different ways to identify the same AMI: + +```bash +amiSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: prod + - name: al2023-ami-2023.3.20240219.0-kernel-6.1-x86_64 +``` + +```bash +amiSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: prod + - id: ami-052c9ea013e6e3567 +``` + +See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. +Keep in mind, that this could prevent you from getting critical security patches when new AMIs are available, but it does give you control over exactly which AMI is running. + + +### Task 3: Control the pace of node disruptions + +To help prevent the possibilities of a new AMI being deployed to all your nodes and breaking all of your workloads, you can enable Karpenter [**Disruption Budgets**]({{< relref "../concepts/disruption/#disruption-budgets " >}}). +Disruption Budgets limit when and to what extent nodes can be disrupted. +You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). +You can set Disruption Budgets in a `NodePool` spec. +Here is an example: + +```bash +disruption: + consolidationPolicy: WhenEmpty + expireAfter: 1440h + budgets: + - nodes: 15% + - nodes: "3" + - nodes: "0" + schedule: "0 7 * * sat-sun" + duration: 12h +``` + +The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. +The `consolidationPolicy` field indicates that a node should be disrupted if the node is either underutilized (`WhenUnderutilized`) or not running any pods (`WhenEmpty`). +With `expireAfter` set to `1440` hours, the node expires after 60 days. +Extending those values causes longer times without disruption. + +Settings for budgets in the above example include the following: + +* **Percentage of nodes**: From the first `nodes` setting, only `15%` of the NodePool’s nodes can be disrupted at a time. +* **Number of nodes**: The second `nodes` setting limits the number of nodes that can be disrupted at a time to `5`. +* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 7am on Saturday and Sunday and continues for 12 hours. +The format of the schedule follows the `crontab` format for identifying dates and times. +See the [crontab](https://man7.org/linux/man-pages/man5/crontab.5.html) page for information on the supported values for these fields. + +As with all disruption settings, keep in mind that avoiding updated AMIs for your nodes can result in not getting fixes for known security risks and bugs. +You need to balance that with your desire to not risk breaking the workloads on your cluster. + +### Task 4: Prevent Karpenter from disrupting nodes + +There are several ways you can prevent Karpenter from disrupting nodes that it manages, to mitigate the risk of an untested AMI from being deployed. + +* **Set Pods to not allow disruption**: When you run pods from a Deployment spec, you can set `karpenter.sh/do-not-disrupt` to true on that Deployment. +This will prevent the node that pod is running on from being disrupted while the pod is running (see [**Pod-level Controls**]({{< relref "../concepts/disruption/#pod-level-controls" >}}) for details). +This can be useful for things like batch jobs, which you want to run to completion and never be moved. +For example: + + +```bash + apiVersion: apps/v1 + kind: Deployment + spec: + template: + metadata: + annotations: + karpenter.sh/do-not-disrupt: "true" +``` + +* **Set nodes to not allow disruption** In the NodePool spec, you can set `karpenter.sh/do-not-disrupt` to true. +This prevents any nodes created from the NodePool from being considered for disruption (see [**Example: Disable Disruption on a NodePool**]({{< relref "../concepts/disruption/#example-disable-disruption-on-a-nodepool" >}}) for details). +For example: + +```bash + apiVersion: karpenter.sh/v1beta1 + kind: NodePool + metadata: + name: default + spec: + template: + metadata: + annotations: # will be applied to all nodes + karpenter.sh/do-not-disrupt: "true" +``` + +Keep in mind that these are not permanent solutions and cannot prevent all node disruptions, such as disruptions resulting from failed node health checks or the instance running the node going down. +Using only the methods to prevent disruptions described here, you will not prevent new AMIs from being used if an unintended disruption of a node occurs, unless you have already locked down specific AMIs to use. +## Follow-up + +The Karpenter project continues to add features to give you greater control over AMI upgrades on your clusters. +If you have opinions about features you would like to see to manage AMIs with Karpenter, feel free to enter a Karpenter [New Issue](https://github.com/aws/karpenter-provider-aws/issues/new/choose). + diff --git a/website/content/en/docs/troubleshooting.md b/website/content/en/docs/troubleshooting.md index 680acd95cf6c..9bab69ef22ff 100644 --- a/website/content/en/docs/troubleshooting.md +++ b/website/content/en/docs/troubleshooting.md @@ -29,7 +29,7 @@ Update the zap-logger-config "level" and restart the Karpenter pod(s) to enable #### Debug logging via Helm -You can enable debug logging during installation with helm by setting the option `logLevel`. +You can enable debug logging during installation with Helm by setting the option `logLevel`. ``` helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter \ @@ -77,7 +77,7 @@ Info on whether there has been a change to the CRD between versions of Karpenter ### Unable to schedule pod due to insufficient node group instances -v0.16.0 changed the default replicas from 1 to 2. +`0.16.0` changed the default replicas from 1 to 2. Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/provisioner-name DoesNotExist requirement`) so it can't provision for the second Karpenter pod. @@ -89,18 +89,18 @@ To do so on AWS increase the `minimum` and `desired` parameters on the node grou ### Helm Error When Pulling the Chart -If Helm is showing an error when trying to install Karpenter helm charts: +If Helm is showing an error when trying to install Karpenter Helm charts: -- Ensure you are using a newer Helm version, Helm started supporting OCI images since v3.8.0. -- Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this +- Ensure you are using a newer Helm version, Helm started supporting OCI images since `3.8.0`. +- Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this. +- If you get an error like `Error: public.ecr.aws/karpenter/karpenter:0.34.0: not found` make sure you're adding a `v` prefix for Karpenter versions between `0.17.0` & `0.34.x`. - Verify that the image you are trying to pull actually exists in [gallery.ecr.aws/karpenter](https://gallery.ecr.aws/karpenter/karpenter) -- Sometimes Helm generates a generic error, you can add the --debug switch to any of the helm commands in this doc for more verbose error messages -- If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html) -- If you are receiving this error: `Error: failed to download "oci://public.ecr.aws/karpenter/karpenter" at version "0.17.0"`, then you need to prepend a `v` to the version number: `v0.17.0`. Before Karpenter moved to OCI helm charts (pre-v0.17.0), both `v0.16.0` and `0.16.0` would work, but OCI charts require an exact version match. +- Sometimes Helm generates a generic error, you can add the --debug switch to any of the Helm commands in this doc for more verbose error messages +- If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html). ### Helm Error when installing the `karpenter-crd` chart -Karpenter v0.26.1+ introduced the `karpenter-crd` helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. +Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. - In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run: @@ -137,7 +137,7 @@ kubectl get nodes -ojsonpath='{range .items[*].metadata}{@.name}:{@.finalizers}{ If you are not able to create a provisioner due to `Internal error occurred: failed calling webhook "validation.webhook.provisioners.karpenter.sh":` -Webhooks were renamed in `v0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. +Webhooks were renamed in `0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. Delete the stale webhooks. @@ -148,7 +148,7 @@ kubectl delete validatingwebhookconfiguration validation.webhook.provisioners.ka ### Failed calling webhook "defaulting.webhook.karpenter.sh" -The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `v0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. +The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. ```text kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh @@ -192,11 +192,11 @@ Disabling swap will allow kubelet to join the cluster successfully, however user ### DaemonSets can result in deployment failures -For Karpenter versions 0.5.3 and earlier, DaemonSets were not properly considered when provisioning nodes. +For Karpenter versions `0.5.3` and earlier, DaemonSets were not properly considered when provisioning nodes. This sometimes caused nodes to be deployed that could not meet the needs of the requested DaemonSets and workloads. -This issue no longer occurs after Karpenter version 0.5.3 (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). +This issue no longer occurs after Karpenter version `0.5.3` (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). -If you are using a pre-0.5.3 version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. +If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. For more information, see [Issue #1084](https://github.com/aws/karpenter/issues/1084). Examples of this behavior are included in [Issue #1180](https://github.com/aws/karpenter/issues/1180). @@ -213,7 +213,7 @@ See the Karpenter [Best Practices Guide](https://aws.github.io/aws-eks-best-prac ### Missing subnetSelector and securityGroupSelector tags causes provisioning failures -Starting with Karpenter v0.5.5, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. +Starting with Karpenter `0.5.5`, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. The [Provisioner]({{}}) section in the Karpenter Getting Started Guide uses the following example: ```text diff --git a/website/content/en/docs/upgrading/compatibility.md b/website/content/en/docs/upgrading/compatibility.md index 296fd9475134..88507e3c7b8f 100644 --- a/website/content/en/docs/upgrading/compatibility.md +++ b/website/content/en/docs/upgrading/compatibility.md @@ -15,9 +15,9 @@ Before you begin upgrading Karpenter, consider Karpenter compatibility issues re [comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) -| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | -|------------|---------|---------|---------|---------|---------|---------|--------| -| karpenter | 0.21.x+ | 0.21.x+ | 0.25.x+ | 0.28.x+ | 0.28.x+ | 0.31.x+ | 0.34.0 | +| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | +|------------|---------|---------|---------|---------|---------|---------|---------| +| karpenter | 0.21.x+ | 0.21.x+ | 0.25.x+ | 0.28.x+ | 0.28.x+ | 0.31.x+ | 0.34.0+ | [comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) @@ -35,7 +35,7 @@ For more information on Karpenter's support for these keys, view [this tracking {{% /alert %}} {{% alert title="Note" color="warning" %}} -Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter helm chart. +Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart. {{% /alert %}} ## Compatibility issues @@ -43,7 +43,7 @@ Karpenter supports using [Kubernetes Common Expression Language](https://kuberne When we introduce a breaking change, we do so only as described in this document. Karpenter follows [Semantic Versioning 2.0.0](https://semver.org/) in its stable release versions, while in -major version zero (v0.y.z) [anything may change at any time](https://semver.org/#spec-item-4). +major version zero (`0.y.z`) [anything may change at any time](https://semver.org/#spec-item-4). However, to further protect users during this phase we will only introduce breaking changes in minor releases (releases that increment y in x.y.z). Note this does not mean every minor upgrade has a breaking change as we will also increment the minor version when we release a new feature. @@ -55,7 +55,7 @@ Users should therefore check to see if there is a breaking change every time the When there is a breaking change we will: * Increment the minor version when in major version 0 -* Add a permanent separate section named `upgrading to vx.y.z+` under [release upgrade notes](#release-upgrade-notes) +* Add a permanent separate section named `upgrading to x.y.z+` under [release upgrade notes](#release-upgrade-notes) clearly explaining the breaking change and what needs to be done on the user side to ensure a safe upgrade * Add the sentence “This is a breaking change, please refer to the above link for upgrade instructions” to the top of the release notes and in all our announcements @@ -81,7 +81,7 @@ Karpenter offers three types of releases. This section explains the purpose of e Stable releases are the most reliable releases that are released with weekly cadence. Stable releases are our only recommended versions for production environments. Sometimes we skip a stable release because we find instability or problems that need to be fixed before having a stable release. -Stable releases are tagged with Semantic Versioning. For example `v0.13.0`. +Stable releases are tagged with a semantic version prefixed by a `v`. For example `v0.13.0`. ### Release Candidates @@ -93,7 +93,7 @@ By adopting this practice we allow our users who are early adopters to test out We release a snapshot release for every commit that gets merged into [`aws/karpenter-provider-aws`](https://www.github.com/aws/karpenter-provider-aws). This enables users to immediately try a new feature or fix right after it's merged rather than waiting days or weeks for release. Snapshot releases are not made available in the same public ECR repository as other release types, they are instead published to a separate private ECR repository. -Helm charts are published to `oci://{{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com/karpenter/snapshot/karpenter` and are tagged with the git commit hash prefixed by the Karpenter major version (e.g. `v0-fc17bfc89ebb30a3b102a86012b3e3992ec08adf`). +Helm charts are published to `oci://{{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com/karpenter/snapshot/karpenter` and are tagged with the git commit hash prefixed by the Karpenter major version (e.g. `0-fc17bfc89ebb30a3b102a86012b3e3992ec08adf`). Anyone with an AWS account can pull from this repository, but must first authenticate: ```bash @@ -103,4 +103,3 @@ aws ecr get-login-password --region {{< param "snapshot_repo.region" >}} | docke {{% alert title="Note" color="warning" %}} Snapshot releases are suitable for testing, and troubleshooting but they should not be used in production environments. Snapshot releases are ephemeral and will be removed 90 days after they were published. {{% /alert %}} - diff --git a/website/content/en/docs/upgrading/upgrade-guide.md b/website/content/en/docs/upgrading/upgrade-guide.md index 12f0811d7021..2bf2e17e603b 100644 --- a/website/content/en/docs/upgrading/upgrade-guide.md +++ b/website/content/en/docs/upgrading/upgrade-guide.md @@ -13,32 +13,42 @@ This guide contains information needed to upgrade to the latest release of Karpe ### CRD Upgrades Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are published: -* As an independent helm chart [karpenter-crd](https://gallery.ecr.aws/karpenter/karpenter-crd) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter-crd) that can be used by Helm to manage the lifecycle of these CRDs. To upgrade or install `karpenter-crd` run: +* As an independent Helm chart [karpenter-crd](https://gallery.ecr.aws/karpenter/karpenter-crd) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter-crd) that can be used by Helm to manage the lifecycle of these CRDs. To upgrade or install `karpenter-crd` run: ```bash KARPENTER_NAMESPACE=kube-system - helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version vx.y.z --namespace "${KARPENTER_NAMESPACE}" --create-namespace + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version x.y.z --namespace "${KARPENTER_NAMESPACE}" --create-namespace ``` {{% alert title="Note" color="warning" %}} If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. {{% /alert %}} -* As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. +* As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. -In general, you can reapply the CRDs in the `crds` directory of the Karpenter helm chart: +In general, you can reapply the CRDs in the `crds` directory of the Karpenter Helm chart: ```shell -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.34.1/pkg/apis/crds/karpenter.sh_nodepools.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.34.1/pkg/apis/crds/karpenter.sh_nodeclaims.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.34.1/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml +kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.0/pkg/apis/crds/karpenter.sh_nodepools.yaml +kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.0/pkg/apis/crds/karpenter.sh_nodeclaims.yaml +kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.0/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml ``` -### Upgrading to v0.34.1+ + -[comment]: <> (WHEN CREATING A NEW SECTION OF THE UPGRADE GUIDANCE FOR NEWER VERSIONS, ENSURE THAT YOU COPY THE ALERT SECTION BELOW TO PROPERLY WARN USERS OF THE RISK OF UPGRADING WITHOUT GOING TO v0.32 FIRST) +### Upgrading to `0.35.0`+ {{% alert title="Warning" color="warning" %}} -v0.33.0+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to v0.33.0+ without first [upgrading to v0.32.x]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.35.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +* Karpenter OCI tags and Helm chart version are now valid semantic versions, meaning that the `v` prefix from the git tag has been removed and they now follow the `x.y.z` pattern. + +### Upgrading to `0.34.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.34.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. {{% /alert %}} {{% alert title="Warning" color="warning" %}} @@ -46,82 +56,80 @@ The Ubuntu EKS optimized AMI has moved from 20.04 to 22.04 for Kubernetes 1.29+. {{% /alert %}} * Karpenter now supports `nodepool.spec.disruption.budgets`, which allows users to control the speed of disruption in the cluster. Since this requires an update to the Custom Resource, before upgrading, you should re-apply the new updates to the CRDs. Check out [Disruption Budgets]({{}}) for more. -* With Disruption Budgets, Karpenter will disrupt multiple batches of nodes simultaneously, which can result in overall quicker scale-down of your cluster. Before v0.34, Karpenter had a hard-coded parallelism limit for each type of disruption. In v0.34, Karpenter will now disrupt at most 10% of nodes for a given NodePool. There is no setting that will be perfectly equivalent with the behavior prior to v0.34. When considering how to configure your budgets, please refer to the following limits for versions prior to v0.34: +* With Disruption Budgets, Karpenter will disrupt multiple batches of nodes simultaneously, which can result in overall quicker scale-down of your cluster. Before `0.34.0`, Karpenter had a hard-coded parallelism limit for each type of disruption. In `0.34.0`+, Karpenter will now disrupt at most 10% of nodes for a given NodePool. There is no setting that will be perfectly equivalent with the behavior prior to `0.34.0`. When considering how to configure your budgets, please refer to the following limits for versions prior to `0.34.0`: * `Empty Expiration / Empty Drift / Empty Consolidation`: infinite parallelism * `Non-Empty Expiration / Non-Empty Drift / Single-Node Consolidation`: one node at a time * `Multi-Node Consolidation`: max 100 nodes -* To support Disruption Budgets, v0.34+ includes critical changes to Karpenter's core controllers, which allows Karpenter to consider multiple batches of disrupting nodes simultaneously. This increases Karpenter's performance with the potential downside of higher CPU and memory utilization from the Karpenter pod. While the magnitude of this difference varies on a case-by-case basis, when upgrading to Karpenter v0.34+, please note that you may need to increase the resources allocated to the Karpenter controller pods. +* To support Disruption Budgets, `0.34.0`+ includes critical changes to Karpenter's core controllers, which allows Karpenter to consider multiple batches of disrupting nodes simultaneously. This increases Karpenter's performance with the potential downside of higher CPU and memory utilization from the Karpenter pod. While the magnitude of this difference varies on a case-by-case basis, when upgrading to Karpenter `0.34.0`+, please note that you may need to increase the resources allocated to the Karpenter controller pods. * Karpenter now adds a default `podSecurityContext` that configures the `fsgroup: 65536` of volumes in the pod. If you are using sidecar containers, you should review if this configuration is compatible for them. You can disable this default `podSecurityContext` through helm by performing `--set podSecurityContext=null` when installing/upgrading the chart. * The `dnsPolicy` for the Karpenter controller pod has been changed back to the Kubernetes cluster default of `ClusterFirst`. Setting our `dnsPolicy` to `Default` (confusingly, this is not the Kubernetes cluster default) caused more confusion for any users running IPv6 clusters with dual-stack nodes or anyone running Karpenter with dependencies on cluster services (like clusters running service meshes). If you still want the old behavior here, you can change the `dnsPolicy` to point to use `Default` by setting the helm value on install/upgrade with `--set dnsPolicy=Default`. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). * Karpenter now disallows `nodepool.spec.template.spec.resources` to be set. The webhook validation never allowed `nodepool.spec.template.spec.resources`. We are now ensuring that CEL validation also disallows `nodepool.spec.template.spec.resources` to be set. If you were previously setting the resources field on your NodePool, ensure that you remove this field before upgrading to the newest version of Karpenter or else updates to the resource may fail on the new version. -### Upgrading to v0.33.0+ - -[comment]: <> (WHEN CREATING A NEW SECTION OF THE UPGRADE GUIDANCE FOR NEWER VERSIONS, ENSURE THAT YOU COPY THE ALERT SECTION BELOW TO PROPERLY WARN USERS OF THE RISK OF UPGRADING WITHOUT GOING TO v0.32 FIRST) +### Upgrading to `0.33.0`+ {{% alert title="Warning" color="warning" %}} -v0.33.0+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. **Do not** upgrade to v0.33.0+ without first [upgrading to v0.32.x]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. **Do not** upgrade to `0.33.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. {{% /alert %}} * Karpenter no longer supports using the `karpenter.sh/provisioner-name` label in NodePool labels and requirements or in application node selectors, affinities, or topologySpreadConstraints. If you were previously using this label to target applications to specific Provisioners, you should update your applications to use the `karpenter.sh/nodepool` label instead before upgrading. If you upgrade without changing these labels, you may begin to see pod scheduling failures for these applications. * Karpenter now tags `spot-instances-request` with the same tags that it tags instances, volumes, and primary ENIs. This means that you will now need to add `ec2:CreateTags` permission for `spot-instances-request`. You can also further scope your controller policy for the `ec2:RunInstances` action to require that it launches the `spot-instances-request` with these specific tags. You can view an example of scoping these actions in the [Getting Started Guide's default CloudFormation controller policy](https://github.com/aws/karpenter/blob/v0.33.0/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml#L61). * We now recommend that you set the installation namespace for your Karpenter controllers to `kube-system` to denote Karpenter as a critical cluster component. This ensures that requests from the Karpenter controllers are treated with higher priority by assigning them to a different [PriorityLevelConfiguration](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration) than generic requests from other namespaces. For more details on API Priority and Fairness, read the [Kubernetes API Priority and Fairness Conceptual Docs](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/). Note: Changing the namespace for your Karpenter release will cause the service account namespace to change. If you are using IRSA for authentication with AWS, you will need to change scoping set in the controller's trust policy from `karpenter:karpenter` to `kube-system:karpenter`. -* `v0.33.x` disables mutating and validating webhooks by default in favor of using [Common Expression Language for CRD validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). The Common Expression Language Validation Feature [is enabled by default on EKS 1.25](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). If you are using Kubernetes version >= 1.25, no further action is required. If you are using a Kubernetes version below 1.25, you now need to set `DISABLE_WEBHOOK=false` in your container environment variables or `--set webhook.enabled=true` if using Helm. View the [Webhook Support Deprecated in Favor of CEL Section of the v1beta1 Migration Guide]({{}}). -* `v0.33.x` drops support for passing settings through the `karpenter-global-settings` ConfigMap. You should pass settings through the container environment variables in the Karpenter deployment manifest. View the [Global Settings Section of the v1beta1 Migration Guide]({{}}) for more details. -* `v0.33.x` enables `Drift=true` by default in the `FEATURE_GATES`. If you previously didn't enable the feature gate, Karpenter will now check if there is a difference between the desired state of your nodes declared in your NodePool and the actual state of your nodes. View the [Drift Section of Disruption Conceptual Docs]({{}}) for more details. -* `v0.33.x` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the helm values. If you are setting any values in the `logConfig` from the `v0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. -* `v0.33.x` change the default `LOG_LEVEL` from `debug` to `info` by default. If you are still enabling logging configuration through the `zap-logger-config`, no action is required. -* `v0.33.x` drops support for comma delimited lists on tags for `SubnetSelectorTerm`, `SecurityGroupsSelectorTerm`, and `AMISelectorTerm`. Karpenter now supports multiple terms for each of the selectors which means that we can specify a more explicit OR-based constraint through separate terms rather than a comma-delimited list of values. +* `0.33.0` disables mutating and validating webhooks by default in favor of using [Common Expression Language for CRD validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). The Common Expression Language Validation Feature [is enabled by default on EKS 1.25](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). If you are using Kubernetes version >= 1.25, no further action is required. If you are using a Kubernetes version below 1.25, you now need to set `DISABLE_WEBHOOK=false` in your container environment variables or `--set webhook.enabled=true` if using Helm. View the [Webhook Support Deprecated in Favor of CEL Section of the v1beta1 Migration Guide]({{}}). +* `0.33.0` drops support for passing settings through the `karpenter-global-settings` ConfigMap. You should pass settings through the container environment variables in the Karpenter deployment manifest. View the [Global Settings Section of the v1beta1 Migration Guide]({{}}) for more details. +* `0.33.0` enables `Drift=true` by default in the `FEATURE_GATES`. If you previously didn't enable the feature gate, Karpenter will now check if there is a difference between the desired state of your nodes declared in your NodePool and the actual state of your nodes. View the [Drift Section of Disruption Conceptual Docs]({{}}) for more details. +* `0.33.0` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the Helm values. If you are setting any values in the `logConfig` from the `0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. +* `0.33.0` change the default `LOG_LEVEL` from `debug` to `info` by default. If you are still enabling logging configuration through the `zap-logger-config`, no action is required. +* `0.33.0` drops support for comma delimited lists on tags for `SubnetSelectorTerm`, `SecurityGroupsSelectorTerm`, and `AMISelectorTerm`. Karpenter now supports multiple terms for each of the selectors which means that we can specify a more explicit OR-based constraint through separate terms rather than a comma-delimited list of values. -### Upgrading to v0.32.0+ +### Upgrading to `0.32.0`+ {{% alert title="Warning" color="warning" %}} -Karpenter v0.32.0 introduces v1beta1 APIs, including _significant_ changes to the API and installation procedures for the Karpenter controllers. **Do not** upgrade to v0.32.0+ without referencing the [v1beta1 Migration Upgrade Procedure]({{}}). +Karpenter `0.32.0` introduces v1beta1 APIs, including _significant_ changes to the API and installation procedures for the Karpenter controllers. **Do not** upgrade to `0.32.0`+ without referencing the [v1beta1 Migration Upgrade Procedure]({{}}). This version includes **dual support** for both alpha and beta APIs to ensure that you can slowly migrate your existing Provisioner, AWSNodeTemplate, and Machine alpha APIs to the newer NodePool, EC2NodeClass, and NodeClaim beta APIs. -Note that if you are rolling back after upgrading to v0.32.0, note that v0.31.4 is the only version that supports handling rollback after you have deployed the v1beta1 APIs to your cluster. +Note that if you are rolling back after upgrading to `0.32.0`, note that `0.31.4` is the only version that supports handling rollback after you have deployed the v1beta1 APIs to your cluster. {{% /alert %}} * Karpenter now serves the webhook prometheus metrics server on port `8001`. If this port is already in-use on the pod or you are running in `hostNetworking` mode, you may need to change this port value. You can configure this port value through the `WEBHOOK_METRICS_PORT` environment variable or the `webhook.metrics.port` value if installing via Helm. * Karpenter now exposes the ability to disable webhooks through the `webhook.enabled=false` value. This value will disable the webhook server and will prevent any permissions, mutating or validating webhook configurations from being deployed to the cluster. * Karpenter now moves all logging configuration for the Zap logger into the `logConfig` values block. Configuring Karpenter logging with this mechanism _is_ deprecated and will be dropped at v1. Karpenter now only surfaces logLevel through the `logLevel` helm value. If you need more advanced configuration due to log parsing constraints, we recommend configuring your log parser to handle Karpenter's Zap JSON logging. -* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the helm chart. If you were setting the value through `logEncoding`, this value will continue to work until v0.33.x but it is deprecated in favor of `logConfig.logEncoding` +* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the Helm chart. If you were setting the value through `logEncoding`, this value will continue to work until `0.33.x` but it is deprecated in favor of `logConfig.logEncoding` * Karpenter now uses the `karpenter.sh/disruption:NoSchedule=disrupting` taint instead of the upstream `node.kubernetes.io/unschedulable` taint for nodes spawned with a NodePool to prevent pods from scheduling to nodes being disrupted. Pods that previously tolerated the `node.kubernetes.io/unschedulable` taint that previously weren't evicted during termination will now be evicted. This most notably affects DaemonSets, which have the `node.kubernetes.io/unschedulable` toleration by default, where Karpenter will now remove these pods during termination. If you want your specific pods to not be evicted when nodes are scaled down, you should add a toleration to the pods with the following: `Key=karpenter.sh/disruption, Effect=NoSchedule, Operator=Equals, Values=disrupting`. * Note: Karpenter will continue to use the old `node.kubernetes.io/unschedulable` taint for nodes spawned with a Provisioner. -### Upgrading to v0.31.0+ +### Upgrading to `0.31.0`+ * Karpenter moved its `securityContext` constraints from pod-wide to only applying to the Karpenter container exclusively. If you were previously relying on the pod-wide `securityContext` for your sidecar containers, you will now need to set these values explicitly in your sidecar container configuration. -### Upgrading to v0.30.0+ +### Upgrading to `0.30.0`+ * Karpenter will now [statically drift]({{}}) on both Provisioner and AWSNodeTemplate Fields. For Provisioner Static Drift, the `karpenter.sh/provisioner-hash` annotation must be present on both the Provisioner and Machine. For AWSNodeTemplate drift, the `karpenter.k8s.aws/nodetemplate-hash` annotation must be present on the AWSNodeTemplate and Machine. Karpenter will not add these annotations to pre-existing nodes, so each of these nodes will need to be recycled one time for the annotations to be added. * Karpenter will now fail validation on AWSNodeTemplates and Provisioner `spec.provider` that have `amiSelectors`, `subnetSelectors`, or `securityGroupSelectors` set with a combination of id selectors (`aws-ids`, `aws::ids`) and other selectors. -* Karpenter now statically sets the `securityContext` at both the pod and container-levels and doesn't allow override values to be passed through the helm chart. This change was made to adhere to [Restricted Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted), which follows pod hardening best practices. +* Karpenter now statically sets the `securityContext` at both the pod and container-levels and doesn't allow override values to be passed through the Helm chart. This change was made to adhere to [Restricted Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted), which follows pod hardening best practices. {{% alert title="Note" color="primary" %}} If you have sidecar containers configured to run alongside Karpenter that cannot tolerate the [pod-wide `securityContext` constraints](https://github.com/aws/karpenter/blob/v0.30.0/charts/karpenter/templates/deployment.yaml#L40), you will need to specify overrides to the sidecar `securityContext` in your deployment. {{% /alert %}} -### Upgrading to v0.29.0+ +### Upgrading to `0.29.0`+ {{% alert title="Warning" color="warning" %}} -Karpenter `v0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `>v0.29.2` fixes this leak. +Karpenter `0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `0.29.2`+ fixes this leak. {{% /alert %}} -* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `v0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `v0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. +* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` Helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. * Karpenter will now reconcile nodes that are drifted due to their Security Groups or their Subnets. If your AWSNodeTemplate's Security Groups differ from the Security Groups used for an instance, Karpenter will consider it drifted. If the Subnet used by an instance is not contained in the allowed list of Subnets for an AWSNodeTemplate, Karpenter will also consider it drifted. * Since Karpenter uses tags for discovery of Subnets and SecurityGroups, check the [Threat Model]({{}}) to see how to manage this IAM Permission. -### Upgrading to v0.28.0+ +### Upgrading to `0.28.0`+ {{% alert title="Warning" color="warning" %}} -Karpenter `v0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `>v0.28.1` fixes this issue and is compatible with Kubernetes version 1.26+. +Karpenter `0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `0.28.1`+ fixes this issue and is compatible with Kubernetes version 1.26+. {{% /alert %}} -* The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. +* The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter Helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. * The `aws.nodeNameConvention` setting is now removed from the [`karpenter-global-settings`]({{}}) ConfigMap. Because Karpenter is now driving its orchestration of capacity through Machines, it no longer needs to know the node name, making this setting obsolete. Karpenter ignores configuration that it doesn't recognize in the [`karpenter-global-settings`]({{}}) ConfigMap, so leaving the `aws.nodeNameConvention` in the ConfigMap will simply cause this setting to be ignored. * Karpenter now defines a set of "restricted tags" which can't be overridden with custom tagging in the AWSNodeTemplate or in the [`karpenter-global-settings`]({{}}) ConfigMap. If you are currently using any of these tag overrides when tagging your instances, webhook validation will now fail. These tags include: @@ -133,7 +141,7 @@ Karpenter `v0.28.0` is incompatible with Kubernetes version 1.26+, which can res * `karpenter_nodes_terminated`: Use `karpenter_machines_terminated` if you are interested in the reason why a Karpenter machine was deleted. `karpenter_nodes_terminated` now only tracks the count of terminated nodes without any additional labels. * `karpenter_nodes_created`: Use `karpenter_machines_created` if you are interested in the reason why a Karpenter machine was created. `karpenter_nodes_created` now only tracks the count of created nodes without any additional labels. * `karpenter_deprovisioning_replacement_node_initialized_seconds`: This metric has been replaced in favor of `karpenter_deprovisioning_replacement_machine_initialized_seconds`. -* `v0.28.0` introduces the Machine CustomResource into the `karpenter.sh` API Group and requires this CustomResourceDefinition to run properly. Karpenter now orchestrates its CloudProvider capacity through these in-cluster Machine CustomResources. When performing a scheduling decision, Karpenter will create a Machine, resulting in launching CloudProvider capacity. The kubelet running on the new capacity will then register the node to the cluster shortly after launch. +* `0.28.0` introduces the Machine CustomResource into the `karpenter.sh` API Group and requires this CustomResourceDefinition to run properly. Karpenter now orchestrates its CloudProvider capacity through these in-cluster Machine CustomResources. When performing a scheduling decision, Karpenter will create a Machine, resulting in launching CloudProvider capacity. The kubelet running on the new capacity will then register the node to the cluster shortly after launch. * If you are using Helm to upgrade between versions of Karpenter, note that [Helm does not automate the process of upgrading or install the new CRDs into your cluster](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations). To install or upgrade the existing CRDs, follow the guidance under the [Custom Resource Definition (CRD) Upgrades]({{< relref "#custom-resource-definition-crd-upgrades" >}}) section of the upgrade guide. * Karpenter will hydrate Machines on startup for existing capacity managed by Karpenter into the cluster. Existing capacity launched by an older version of Karpenter is discovered by finding CloudProvider capacity with the `karpenter.sh/provisioner-name` tag or the `karpenter.sh/provisioner-name` label on nodes. * The metrics port for the Karpenter deployment was changed from 8080 to 8000. Users who scrape the pod directly for metrics rather than the service will need to adjust the commands they use to reference port 8000. Any users who scrape metrics from the service should be unaffected. @@ -149,19 +157,21 @@ Because Karpenter takes this dependency, any user that has the ability to Create {{% /alert %}} {{% alert title="Rolling Back" color="warning" %}} -If, after upgrading to `v0.28.0+`, a rollback to an older version of Karpenter needs to be performed, Karpenter will continue to function normally, though you will still have the Machine CustomResources on your cluster. You will need to manually delete the Machines and patch out the finalizers to fully complete the rollback. +If, after upgrading to `0.28.0`+, a rollback to an older version of Karpenter needs to be performed, Karpenter will continue to function normally, though you will still have the Machine CustomResources on your cluster. You will need to manually delete the Machines and patch out the finalizers to fully complete the rollback. -Karpenter marks CloudProvider capacity as "managed by" a Machine using the `karpenter-sh/managed-by` tag on the CloudProvider machine. It uses this tag to ensure that the Machine CustomResources in the cluster match the CloudProvider capacity managed by Karpenter. If these states don't match, Karpenter will garbage collect the capacity. Because of this, if performing an upgrade, followed by a rollback, followed by another upgrade to `v0.28.0+`, ensure you remove the `karpenter.sh/managed-by` tags from existing capacity; otherwise, Karpenter will deprovision the capacity without a Machine CR counterpart. +Karpenter marks CloudProvider capacity as "managed by" a Machine using the `karpenter-sh/managed-by` tag on the CloudProvider machine. It uses this tag to ensure that the Machine CustomResources in the cluster match the CloudProvider capacity managed by Karpenter. If these states don't match, Karpenter will garbage collect the capacity. Because of this, if performing an upgrade, followed by a rollback, followed by another upgrade to `0.28.0`+, ensure you remove the `karpenter.sh/managed-by` tags from existing capacity; otherwise, Karpenter will deprovision the capacity without a Machine CR counterpart. {{% /alert %}} -### Upgrading to v0.27.3+ -* The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `v0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. +### Upgrading to `0.27.3`+ + +* The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. ```bash kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh ``` -### Upgrading to v0.27.0+ +### Upgrading to `0.27.0`+ + * The Karpenter controller pods now deploy with `kubernetes.io/hostname` self anti-affinity by default. If you are running Karpenter in HA (high-availability) mode and you do not have enough nodes to match the number of pod replicas you are deploying with, you will need to scale-out your nodes for Karpenter. * The following controller metrics changed and moved under the `controller_runtime` metrics namespace: * `karpenter_metricscraper_...` @@ -178,27 +188,33 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh * `provisioner-state` -> `provisioner_state` * The `karpenter_allocation_controller_scheduling_duration_seconds` metric name changed to `karpenter_provisioner_scheduling_duration_seconds` -### Upgrading to v0.26.0+ +### Upgrading to `0.26.0`+ + * The `karpenter.sh/do-not-evict` annotation no longer blocks node termination when running `kubectl delete node`. This annotation on pods will only block automatic deprovisioning that is considered "voluntary," that is, disruptions that can be avoided. Disruptions that Karpenter deems as "involuntary" and will ignore the `karpenter.sh/do-not-evict` annotation include spot interruption and manual deletion of the node. See [Disabling Deprovisioning]({{}}) for more details. -* Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your helm values and are using Karpenter's defaults, you will now need to set these values in your helm chart deployment. -* The `controller.image` value in the helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. +* Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your Helm values and are using Karpenter's defaults, you will now need to set these values in your Helm chart deployment. +* The `controller.image` value in the Helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. + +### Upgrading to `0.25.0`+ -### Upgrading to v0.25.0+ * Cluster Endpoint can now be automatically discovered. If you are using Amazon Elastic Kubernetes Service (EKS), you can now omit the `clusterEndpoint` field in your configuration. In order to allow the resolving, you have to add the permission `eks:DescribeCluster` to the Karpenter Controller IAM role. -### Upgrading to v0.24.0+ +### Upgrading to `0.24.0`+ + * Settings are no longer updated dynamically while Karpenter is running. If you manually make a change to the [`karpenter-global-settings`]({{}}) ConfigMap, you will need to reload the containers by restarting the deployment with `kubectl rollout restart -n karpenter deploy/karpenter` * Karpenter no longer filters out instance types internally. Previously, `g2` (not supported by the NVIDIA device plugin) and FPGA instance types were filtered. The only way to filter instance types now is to set requirements on your provisioner or pods using well-known node labels described [here]({{}}). If you are currently using overly broad requirements that allows all of the `g` instance-category, you will want to tighten the requirement, or add an instance-generation requirement. * `aws.tags` in [`karpenter-global-settings`]({{}}) ConfigMap is now a top-level field and expects the value associated with this key to be a JSON object of string to string. This is change from previous versions where keys were given implicitly by providing the key-value pair `aws.tags.: value` in the ConfigMap. -### Upgrading to v0.22.0+ +### Upgrading to `0.22.0`+ + * Do not upgrade to this version unless you are on Kubernetes >= v1.21. Karpenter no longer supports Kubernetes v1.20, but now supports Kubernetes v1.25. This change is due to the v1 PDB API, which was introduced in K8s v1.20 and subsequent removal of the v1beta1 API in K8s v1.25. -### Upgrading to v0.20.0+ -* Prior to v0.20.0, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. v0.20.0+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. +### Upgrading to `0.20.0`+ + +* Prior to `0.20.0`, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. `0.20.0`+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. + +### Upgrading to `0.19.0`+ -### Upgrading to v0.19.0+ -* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the helm chart. If your Karpenter installation (helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. +* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the Helm chart. If your Karpenter installation (Helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. * Karpenter now supports native interruption handling. If you were previously using Node Termination Handler for spot interruption handling and health events, you will need to remove the component from your cluster before enabling `aws.interruptionQueueName`. For more details on Karpenter's interruption handling, see the [Interruption Handling Docs]({{< ref "../concepts/disruption/#interruption" >}}). * Instance category defaults are now explicitly persisted in the Provisioner, rather than handled implicitly in memory. By default, Provisioners will limit instance category to c,m,r. If any instance type constraints are applied, it will override this default. If you have created Provisioners in the past with unconstrained instance type, family, or category, Karpenter will now more flexibly use instance types than before. If you would like to apply these constraints, they must be included in the Provisioner CRD. * Karpenter CRD raw YAML URLs have migrated from `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/charts/karpenter/crds/...` to `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/pkg/apis/crds/...`. If you reference static Karpenter CRDs or rely on `kubectl replace -f` to apply these CRDs from their remote location, you will need to migrate to the new location. @@ -214,38 +230,44 @@ kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh * `AWS_NODE_NAME_CONVENTION` -> `settings.aws.nodeNameConvention` * `VM_MEMORY_OVERHEAD` -> `settings.aws.vmMemoryOverheadPercent` -### Upgrading to v0.18.0+ -* v0.18.0 removes the `karpenter_consolidation_nodes_created` and `karpenter_consolidation_nodes_terminated` prometheus metrics in favor of the more generic `karpenter_nodes_created` and `karpenter_nodes_terminated` metrics. You can still see nodes created and terminated by consolidation by checking the `reason` label on the metrics. Check out all the metrics published by Karpenter [here]({{}}). +### Upgrading to `0.18.0`+ + +* `0.18.0` removes the `karpenter_consolidation_nodes_created` and `karpenter_consolidation_nodes_terminated` prometheus metrics in favor of the more generic `karpenter_nodes_created` and `karpenter_nodes_terminated` metrics. You can still see nodes created and terminated by consolidation by checking the `reason` label on the metrics. Check out all the metrics published by Karpenter [here]({{}}). + +### Upgrading to `0.17.0`+ -### Upgrading to v0.17.0+ Karpenter's Helm chart package is now stored in [Karpenter's OCI (Open Container Initiative) registry](https://gallery.ecr.aws/karpenter/karpenter). The Helm CLI supports the new format since [v3.8.0+](https://helm.sh/docs/topics/registries/). -With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter helm charts look at [Install Karpenter Helm Chart]({{< ref "../getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). +With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter Helm charts look at [Install Karpenter Helm Chart]({{< ref "../getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). Users who have scripted the installation or upgrading of Karpenter need to adjust their scripts with the following changes: -1. There is no longer a need to add the Karpenter helm repo to helm -2. The full URL of the Helm chart needs to be present when using the helm commands -3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart, `v0.17.0`. +1. There is no longer a need to add the Karpenter Helm repo with `helm repo add` +2. The full URL of the Helm chart needs to be present when using the `helm` CLI +3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart (i.e `v0.17.0`). + +### Upgrading to `0.16.2`+ -### Upgrading to v0.16.2+ -* v0.16.2 adds new kubeletConfiguration fields to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +* `0.16.2` adds new kubeletConfiguration fields to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.2/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.16.0+ -* v0.16.0 adds a new weight field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +### Upgrading to `0.16.0`+ + +* `0.16.0` adds a new weight field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.15.0+ -* v0.15.0 adds a new consolidation field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +### Upgrading to `0.15.0`+ + +* `0.15.0` adds a new consolidation field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.15.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.14.0+ -* v0.14.0 adds new fields to the `provisioners.karpenter.sh` v1alpha5 and `awsnodetemplates.karpenter.k8s.aws` v1alpha1 CRDs. The CRDs will need to be updated to use the new parameters: +### Upgrading to `0.14.0`+ + +* `0.14.0` adds new fields to the `provisioners.karpenter.sh` v1alpha5 and `awsnodetemplates.karpenter.k8s.aws` v1alpha1 CRDs. The CRDs will need to be updated to use the new parameters: ```bash kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml @@ -253,7 +275,7 @@ kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/ kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml ``` -* v0.14.0 changes the way Karpenter discovers its dynamically generated AWS launch templates to use a tag rather than a Name scheme. The previous name scheme was `Karpenter-${CLUSTER_NAME}-*` which could collide with user created launch templates that Karpenter should not manage. The new scheme uses a tag on the launch template `karpenter.k8s.aws/cluster: ${CLUSTER_NAME}`. As a result, Karpenter will not clean-up dynamically generated launch templates using the old name scheme. You can manually clean these up with the following commands: +* `0.14.0` changes the way Karpenter discovers its dynamically generated AWS launch templates to use a tag rather than a Name scheme. The previous name scheme was `Karpenter-${CLUSTER_NAME}-*` which could collide with user created launch templates that Karpenter should not manage. The new scheme uses a tag on the launch template `karpenter.k8s.aws/cluster: ${CLUSTER_NAME}`. As a result, Karpenter will not clean-up dynamically generated launch templates using the old name scheme. You can manually clean these up with the following commands: ```bash ## Find launch templates that match the naming pattern and you do not want to keep @@ -263,52 +285,54 @@ aws ec2 describe-launch-templates --filters="Name=launch-template-name,Values=Ka aws ec2 delete-launch-template --launch-template-id ``` -* v0.14.0 introduces additional instance type filtering if there are no `node.kubernetes.io/instance-type` or `karpenter.k8s.aws/instance-family` or `karpenter.k8s.aws/instance-category` requirements that restrict instance types specified on the provisioner. This prevents Karpenter from launching bare metal and some older non-current generation instance types unless the provisioner has been explicitly configured to allow them. If you specify an instance type or family requirement that supplies a list of instance-types or families, that list will be used regardless of filtering. The filtering can also be completely eliminated by adding an `Exists` requirement for instance type or family. +* `0.14.0` introduces additional instance type filtering if there are no `node.kubernetes.io/instance-type` or `karpenter.k8s.aws/instance-family` or `karpenter.k8s.aws/instance-category` requirements that restrict instance types specified on the provisioner. This prevents Karpenter from launching bare metal and some older non-current generation instance types unless the provisioner has been explicitly configured to allow them. If you specify an instance type or family requirement that supplies a list of instance-types or families, that list will be used regardless of filtering. The filtering can also be completely eliminated by adding an `Exists` requirement for instance type or family. ```yaml - key: node.kubernetes.io/instance-type operator: Exists ``` -* v0.14.0 introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started -* v0.14.0 adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. +* `0.14.0` introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started +* `0.14.0` adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter Helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. -* v0.14.0 deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. +* `0.14.0` deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. -### Upgrading to v0.13.0+ -* v0.13.0 introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. v0.13.0 also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. +### Upgrading to `0.13.0`+ - If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: +* `0.13.0` introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. `0.13.0` also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. + + If you are upgrading from `0.10.1` - `0.11.1`, a new CRD `awsnodetemplate` was added. In `0.12.0`, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. 2. `kubectl delete crd awsnodetemplate` 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.13.2/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` - 4. Perform the Karpenter upgrade to v0.13.x, which will install the new `awsnodetemplates` CRD. + 4. Perform the Karpenter upgrade to `0.13.0`+, which will install the new `awsnodetemplates` CRD. 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. -* v0.13.0 also adds EC2/spot price fetching to Karpenter to allow making more accurate decisions regarding node deployments. Our [getting started guide]({{< ref "../getting-started/getting-started-with-karpenter" >}}) documents this, but if you are upgrading Karpenter you will need to modify your Karpenter controller policy to add the `pricing:GetProducts` and `ec2:DescribeSpotPriceHistory` permissions. +* `0.13.0` also adds EC2/spot price fetching to Karpenter to allow making more accurate decisions regarding node deployments. Our [getting started guide]({{< ref "../getting-started/getting-started-with-karpenter" >}}) documents this, but if you are upgrading Karpenter you will need to modify your Karpenter controller policy to add the `pricing:GetProducts` and `ec2:DescribeSpotPriceHistory` permissions. + +### Upgrading to `0.12.0`+ -### Upgrading to v0.12.0+ -* v0.12.0 adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. -* If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: +* `0.12.0` adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. +* If you are upgrading from `0.10.1` - `0.11.1`, a new CRD `awsnodetemplate` was added. In `0.12.0`, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. 2. `kubectl delete crd awsnodetemplate` 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.12.1/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` - 4. Perform the Karpenter upgrade to v0.12.x, which will install the new `awsnodetemplates` CRD. + 4. Perform the Karpenter upgrade to `0.12.0`+, which will install the new `awsnodetemplates` CRD. 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. -### Upgrading to v0.11.0+ +### Upgrading to `0.11.0`+ -v0.11.0 changes the way that the `vpc.amazonaws.com/pod-eni` resource is reported. Instead of being reported for all nodes that could support the resources regardless of if the cluster is configured to support it, it is now controlled by a command line flag or environment variable. The parameter defaults to false and must be set if your cluster uses [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). This can be enabled by setting the environment variable `AWS_ENABLE_POD_ENI` to true via the helm value `controller.env`. +`0.11.0` changes the way that the `vpc.amazonaws.com/pod-eni` resource is reported. Instead of being reported for all nodes that could support the resources regardless of if the cluster is configured to support it, it is now controlled by a command line flag or environment variable. The parameter defaults to false and must be set if your cluster uses [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). This can be enabled by setting the environment variable `AWS_ENABLE_POD_ENI` to true via the helm value `controller.env`. Other extended resources must be registered on nodes by their respective device plugins which are typically installed as DaemonSets (e.g. the `nvidia.com/gpu` resource will be registered by the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). Previously, Karpenter would register these resources on nodes at creation and they would be zeroed out by `kubelet` at startup. By allowing the device plugins to register the resources, pods will not bind to the nodes before any device plugin initialization has occurred. -v0.11.0 adds a `providerRef` field in the Provisioner CRD. To use this new field you will need to replace the Provisioner CRD manually: +`0.11.0` adds a `providerRef` field in the Provisioner CRD. To use this new field you will need to replace the Provisioner CRD manually: ```shell kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.11.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml ``` -### Upgrading to v0.10.0+ +### Upgrading to `0.10.0`+ -v0.10.0 adds a new field, `startupTaints` to the provisioner spec. Standard Helm upgrades [do not upgrade CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) so the field will not be available unless the CRD is manually updated. This can be performed prior to the standard upgrade by applying the new CRD manually: +`0.10.0` adds a new field, `startupTaints` to the provisioner spec. Standard Helm upgrades [do not upgrade CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) so the field will not be available unless the CRD is manually updated. This can be performed prior to the standard upgrade by applying the new CRD manually: ```shell kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.10.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml @@ -316,7 +340,7 @@ kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/ 📝 If you don't perform this manual CRD update, Karpenter will work correctly except for rejecting the creation/update of provisioners that use `startupTaints`. -### Upgrading to v0.6.2+ +### Upgrading to `0.6.2`+ If using Helm, the variable names have changed for the cluster's name and endpoint. You may need to update any configuration that sets the old variable names. diff --git a/website/content/en/preview/reference/instance-types.md b/website/content/en/preview/reference/instance-types.md index 64aeaf1f1174..6978c20ef258 100644 --- a/website/content/en/preview/reference/instance-types.md +++ b/website/content/en/preview/reference/instance-types.md @@ -11411,6 +11411,30 @@ below are the resources available with some assumptions and after the instance o |pods|737| |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| +### `m7gd.metal` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|m| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|m7gd| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-local-nvme|3800| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-size|metal| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|m7gd.metal| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|233962Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| ## m7i Family ### `m7i.large` #### Labels diff --git a/website/content/en/v0.31/concepts/_index.md b/website/content/en/v0.31/concepts/_index.md deleted file mode 100755 index fec50b86b6d7..000000000000 --- a/website/content/en/v0.31/concepts/_index.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: "Concepts" -linkTitle: "Concepts" -weight: 10 -description: > - Understand key concepts of Karpenter ---- - -Users fall under two basic roles: Kubernetes cluster administrators and application developers. -This document describes Karpenter concepts through the lens of those two types of users. - -## Cluster administrator - -As a Kubernetes cluster administrator, you can engage with Karpenter to: - -* Install Karpenter -* Configure provisioners to set constraints and other features for managing nodes -* Deprovision nodes -* Upgrade nodes - -Concepts associated with this role are described below. - - -### Installing Karpenter - -Karpenter is designed to run on a node in your Kubernetes cluster. -As part of the installation process, you need credentials from the underlying cloud provider to allow nodes to be started up and added to the cluster as they are needed. - -[Getting Started with Karpenter on AWS](../getting-started) -describes the process of installing Karpenter on an AWS cloud provider. -Because requests to add and delete nodes and schedule pods are made through Kubernetes, AWS IAM Roles for Service Accounts (IRSA) are needed by your Kubernetes cluster to make privileged requests to AWS. -For example, Karpenter uses AWS IRSA roles to grant the permissions needed to describe EC2 instance types and create EC2 instances. - -Once privileges are in place, Karpenter is deployed with a Helm chart. - -### Configuring provisioners - -Karpenter's job is to add nodes to handle unschedulable pods, schedule pods on those nodes, and remove the nodes when they are not needed. -To configure Karpenter, you create *provisioners* that define how Karpenter manages unschedulable pods and expires nodes. -Here are some things to know about the Karpenter provisioner: - -* **Unschedulable pods**: Karpenter only attempts to schedule pods that have a status condition `Unschedulable=True`, which the kube scheduler sets when it fails to schedule the pod to existing capacity. - -* **Provisioner CR**: Karpenter defines a Custom Resource called a Provisioner to specify provisioning configuration. -Each provisioner manages a distinct set of nodes, but pods can be scheduled to any provisioner that supports its scheduling constraints. -A provisioner contains constraints that impact the nodes that can be provisioned and attributes of those nodes (such timers for removing nodes). -See [Provisioning]({{}}) docs for a description of settings and provisioner examples. - -* **Well-known labels**: The provisioner can use well-known Kubernetes labels to allow pods to request only certain instance types, architectures, operating systems, or other attributes when creating nodes. -See [Well-Known Labels, Annotations and Taints](https://kubernetes.io/docs/reference/labels-annotations-taints/) for details. -Keep in mind that only a subset of these labels are supported in Karpenter, as described later. - -* **Deprovisioning nodes**: A provisioner can also include time-to-live values to indicate when nodes should be deprovisioned after a set amount of time from when they were created or after they becomes empty of deployed pods. - -* **Multiple provisioners**: Multiple provisioners can be configured on the same cluster. -For example, you might want to configure different teams on the same cluster to run on completely separate capacity. -One team could run on nodes using BottleRocket, while another uses EKSOptimizedAMI. - -Although most use cases are addressed with a single provisioner for multiple teams, multiple provisioners are useful to isolate nodes for billing, use different node constraints (such as no GPUs for a team), or use different deprovisioning settings. - -### Deprovisioning nodes - -Karpenter deletes nodes when they are no longer needed. - -* **Finalizer**: Karpenter places a finalizer bit on each node it creates. -When a request comes in to delete one of those nodes (such as a TTL or a manual `kubectl delete node`), Karpenter will cordon the node, drain all the pods, terminate the EC2 instance, and delete the node object. -Karpenter handles all clean-up work needed to properly delete the node. -* **Node Expiry**: If a node expiry time-to-live value (`ttlSecondsUntilExpired`) is reached, that node is drained of pods and deleted (even if it is still running workloads). -* **Empty nodes**: When the last workload pod running on a Karpenter-managed node is gone, the node is annotated with an emptiness timestamp. -Once that "node empty" time-to-live (`ttlSecondsAfterEmpty`) is reached, finalization is triggered. -* **Consolidation**: If enabled, Karpenter will work to actively reduce cluster cost by identifying when nodes can be removed as their workloads will run on other nodes in the cluster and when nodes can be replaced with cheaper variants due to a change in the workloads. -* **Interruption**: If enabled, Karpenter will watch for upcoming involuntary interruption events that could affect your nodes (health events, spot interruption, etc.) and will cordon, drain, and terminate the node(s) ahead of the event to reduce workload disruption. - -For more details on how Karpenter deletes nodes, see [Deprovisioning nodes](./deprovisioning) for details. - -### Upgrading nodes - -A straight-forward way to upgrade nodes is to set `ttlSecondsUntilExpired`. -Nodes will be terminated after a set period of time and will be replaced with newer nodes using the latest discovered AMI. -See more in [AWSNodeTemplate](./node-templates). - -### Constraints - -The concept of layered constraints is key to using Karpenter. -With no constraints defined in provisioners and none requested from pods being deployed, Karpenter chooses from the entire universe of features available to your cloud provider. -Nodes can be created using any instance type and run in any zones. - -An application developer can tighten the constraints defined in a provisioner by the cluster administrator by defining additional scheduling constraints in their pod spec. -Refer to the description of Karpenter constraints in the Application Developer section below for details. - -### Scheduling - -Karpenter launches nodes in response to pods that the Kubernetes scheduler has marked unschedulable. After solving scheduling constraints and launching capacity, Karpenter launches a machine in your chosen cloud provider. - -Once Karpenter brings up a node, that node is available for the Kubernetes scheduler to schedule pods on it as well. - -### Cloud provider -Karpenter makes requests to provision new nodes to the associated cloud provider. -The first supported cloud provider is AWS, although Karpenter is designed to work with other cloud providers. -Separating Kubernetes and AWS-specific settings allows Karpenter a clean path to integrating with other cloud providers. - -While using Kubernetes well-known labels, the provisioner can set some values that are specific to the cloud provider. -So, for example, to include a certain instance type, you could use the Kubernetes label `node.kubernetes.io/instance-type`, but set its value to an AWS instance type (such as `m5.large` or `m5.2xlarge`). - -### Consolidation - -If consolidation is enabled for a provisioner, Karpenter attempts to reduce the overall cost of the nodes launched by that provisioner if workloads have changed in two ways: -- Node Deletion -- Node Replacement - -To perform these actions, Karpenter simulates all pods being evicted from a candidate node and then looks at the results of the scheduling simulation to determine if those pods can run on a combination of existing nodes in the cluster and a new cheaper node. This operation takes into consideration all scheduling constraints placed on your workloads and provisioners (e.g. taints, tolerations, node selectors, inter-pod affinity, etc). - -If as a result of the scheduling simulation all pods can run on existing nodes, the candidate node is simply deleted. If all pods can run on a combination of existing nodes and a cheaper node, we launch the cheaper node and delete the candidate node which causes the pods to be evicted and re-created by their controllers in order to be rescheduled. - -For Node Replacement to work well, your provisioner must allow selecting from a variety of instance types with varying amounts of allocatable resources. Consolidation will only consider launching nodes using instance types which are allowed by your provisioner. - -### Interruption - -If interruption-handling is enabled for the controller, Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These interruption events include: - -* Spot Interruption Warnings -* Scheduled Change Health Events (Maintenance Events) -* Instance Terminating Events -* Instance Stopping Events - -When Karpenter detects one of these events will occur to your nodes, it automatically cordons, drains, and terminates the node(s) ahead of the interruption event to give the maximum amount of time for workload cleanup prior to compute disruption. This enables scenarios where the `terminationGracePeriod` for your workloads may be long or cleanup for your workloads is critical, and you want enough time to be able to gracefully clean-up your pods. - -{{% alert title="Note" color="warning" %}} -Karpenter publishes Kubernetes events to the node for all events listed above in addition to [__Spot Rebalance Recommendations__](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/rebalance-recommendations.html). Karpenter does not currently support taint, drain, and terminate logic for Spot Rebalance Recommendations. - -If you require handling for Spot Rebalance Recommendations, you can use the [AWS Node Termination Handler (NTH)](https://github.com/aws/aws-node-termination-handler) alongside Karpenter; however, note that the AWS Node Termination Handler cordons and drains nodes on rebalance recommendations, potentially causing more node churn in the cluster than with interruptions alone. Further information can be found in the [Troubleshooting Guide]({{< ref "../troubleshooting#aws-node-termination-handler-nth-interactions" >}}). -{{% /alert %}} - -### Kubernetes cluster autoscaler -Like Karpenter, [Kubernetes Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) is -designed to add nodes when requests come in to run pods that cannot be met by current capacity. -Cluster autoscaler is part of the Kubernetes project, with implementations by most major Kubernetes cloud providers. -By taking a fresh look at provisioning, Karpenter offers the following improvements: - -* **Designed to handle the full flexibility of the cloud**: -Karpenter has the ability to efficiently address the full range of instance types available through AWS. -Cluster autoscaler was not originally built with the flexibility to handle hundreds of instance types, zones, and purchase options. - -* **Group-less node provisioning**: Karpenter manages each instance directly, without use of additional orchestration mechanisms like node groups. -This enables it to retry in milliseconds instead of minutes when capacity is unavailable. -It also allows Karpenter to leverage diverse instance types, availability zones, and purchase options without the creation of hundreds of node groups. - -## Application developer - -As someone deploying pods that might be evaluated by Karpenter, you should know how to request the properties that your pods need of its compute resources. -Karpenter's job is to efficiently assess and choose compute assets based on requests from pod deployments. -These can include basic Kubernetes features or features that are specific to the cloud provider (such as AWS). - -Layered *constraints* are applied when a pod makes requests for compute resources that cannot be met by current capacity. -A pod can specify `nodeAffinity` (to run in a particular zone or instance type) or a `topologySpreadConstraints` spread (to cause a set of pods to be balanced across multiple nodes). -The pod can specify a `nodeSelector` to run only on nodes with a particular label and `resource.requests` to ensure that the node has enough available memory. - -The Kubernetes scheduler tries to match those constraints with available nodes. -If the pod is unschedulable, Karpenter creates compute resources that match its needs. -When Karpenter tries to provision a node, it analyzes scheduling constraints before choosing the node to create. - -As long as the requests are not outside of the provisioner's constraints, -Karpenter will look to best match the request, comparing the same well-known labels defined by the pod's scheduling constraints. -Note that if the constraints are such that a match is not possible, the pod will remain unscheduled. - -So, what constraints can you use as an application developer deploying pods that could be managed by Karpenter? - -Kubernetes features that Karpenter supports for scheduling pods include nodeAffinity and [nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector). -It also supports [PodDisruptionBudget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/), [topologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/), and [inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity). - -From the Kubernetes [Well-Known Labels, Annotations and Taints](https://kubernetes.io/docs/reference/labels-annotations-taints/) page, -you can see a full list of Kubernetes labels, annotations and taints that determine scheduling. -Those that are implemented in Karpenter include: - -* **kubernetes.io/arch**: For example, kubernetes.io/arch=amd64 -* **node.kubernetes.io/instance-type**: For example, node.kubernetes.io/instance-type=m3.medium -* **topology.kubernetes.io/zone**: For example, topology.kubernetes.io/zone=us-east-1c - -For more on how, as a developer, you can add constraints to your pod deployment, see [Scheduling](./scheduling/) for details. diff --git a/website/content/en/v0.31/concepts/deprovisioning.md b/website/content/en/v0.31/concepts/deprovisioning.md deleted file mode 100644 index b203f4c420cf..000000000000 --- a/website/content/en/v0.31/concepts/deprovisioning.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: "Deprovisioning" -linkTitle: "Deprovisioning" -weight: 4 -description: > - Understand different ways Karpenter deprovisions nodes ---- - -## Control Flow -Karpenter sets a Kubernetes [finalizer](https://kubernetes.io/docs/concepts/overview/working-with-objects/finalizers/) on each node it provisions. -The finalizer blocks deletion of the node object while the Termination Controller cordons and drains the node, before removing the underlying machine. Deprovisioning is triggered by the Deprovisioning Controller, by the user through manual deprovisioning, or through an external system that sends a delete request to the node object. - -### Deprovisioning Controller -Karpenter automatically discovers deprovisionable nodes and spins up replacements when needed. Karpenter deprovisions nodes by executing one [automatic method](#methods) at a time, in order of Expiration, Drift, Emptiness, and then Consolidation. Each method varies slightly but they all follow the standard deprovisioning process: -1. Identify a list of prioritized candidates for the deprovisioning method. - * If there are [pods that cannot be evicted](#pod-eviction) on the node, Karpenter will ignore the node and try deprovisioning it later. - * If there are no deprovisionable nodes, continue to the next deprovisioning method. -2. For each deprovisionable node, execute a scheduling simulation with the pods on the node to find if any replacement nodes are needed. -3. Cordon the node(s) to prevent pods from scheduling to it. -4. Pre-spin any replacement nodes needed as calculated in Step (2), and wait for them to become ready. - * If a replacement node fails to initialize, un-cordon the node(s), and restart from Step (1), starting at the first deprovisioning method again. -5. Delete the node(s) and wait for the Termination Controller to gracefully shutdown the node(s). -6. Once the Termination Controller terminates the node, go back to Step (1), starting at the the first deprovisioning method again. - -### Termination Controller -When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [K8s Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: -1. Cordon the node to prevent pods from scheduling to it. -2. Begin evicting the pods on the node with the [K8s Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all daemonset pods and [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/). Wait for the node to be fully drained before proceeding to Step (3). - * While waiting, if the underlying machine for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. -3. Terminate the machine in the Cloud Provider. -4. Remove the finalizer from the node to allow the APIServer to delete the node, completing termination. - -## Methods - -There are both automated and manual ways of deprovisioning nodes provisioned by Karpenter: - -### Manual Methods -* **Node Deletion**: You could use `kubectl` to manually remove a single Karpenter node: - - ```bash - # Delete a specific node - kubectl delete node $NODE_NAME - - # Delete all nodes owned any provisioner - kubectl delete nodes -l karpenter.sh/provisioner-name - - # Delete all nodes owned by a specific provisioner - kubectl delete nodes -l karpenter.sh/provisioner-name=$PROVISIONER_NAME - ``` -* **Provisioner Deletion**: Nodes are owned by the Provisioner through an [owner reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/#owner-references-in-object-specifications) that launched them. Karpenter will gracefully terminate nodes through cascading deletion when the owning provisioner is deleted. - -### Automated Methods -* **Emptiness**: Karpenter notes when the last workload (non-daemonset) pod stops running on a node. From that point, Karpenter waits the number of seconds set by `ttlSecondsAfterEmpty` in the provisioner, then Karpenter requests to delete the node. This feature can keep costs down by removing nodes that are no longer being used for workloads. -* **Expiration**: Karpenter will annotate nodes as expired and deprovision nodes after they have lived a set number of seconds, based on the provisioner `ttlSecondsUntilExpired` value. One use case for node expiry is to periodically recycle nodes. Old nodes (with a potentially outdated Kubernetes version or operating system) are deleted, and replaced with nodes on the current version (assuming that you requested the latest version, rather than a specific version). -* **Consolidation**: Karpenter works to actively reduce cluster cost by identifying when: - * Nodes can be removed as their workloads will run on other nodes in the cluster. - * Nodes can be replaced with cheaper variants due to a change in the workloads. -* **Drift**: Karpenter will annotate nodes as drifted and deprovision nodes that have drifted from their desired specification. See [Drift]({{}}) to see which fields are considered. -* **Interruption**: If enabled, Karpenter will watch for upcoming involuntary interruption events that could affect your nodes (health events, spot interruption, etc.) and will cordon, drain, and terminate the node(s) ahead of the event to reduce workload disruption. - -{{% alert title="Note" color="primary" %}} -- Automated deprovisioning is configured through the ProvisionerSpec `.ttlSecondsAfterEmpty`, `.ttlSecondsUntilExpired` and `.consolidation.enabled` fields. If these are not configured, Karpenter will not set default values for them and will not terminate nodes for that purpose. - -- Keep in mind that a small `ttlSecondsUntilExpired` results in a higher churn in cluster activity. For a small enough `ttlSecondsUntilExpired`, nodes may expire faster than Karpenter can safely deprovision them, resulting in constant node deprovisioning. - -- Pods without an ownerRef (also called "controllerless" or "naked" pods) will be evicted during automatic node disruption, besides [Interruption](#interruption). A pod with the annotation `karpenter.sh/do-not-evict: "true"` will cause its node to be opted out from the same deprovisioning methods. - -- Using preferred anti-affinity and topology spreads can reduce the effectiveness of consolidation. At node launch, Karpenter attempts to satisfy affinity and topology spread preferences. In order to reduce node churn, consolidation must also attempt to satisfy these constraints to avoid immediately consolidating nodes after they launch. This means that consolidation may not deprovision nodes in order to avoid violating preferences, even if kube-scheduler can fit the host pods elsewhere. Karpenter reports these pods via logging to bring awareness to the possible issues they can cause (e.g. `pod default/inflate-anti-self-55894c5d8b-522jd has a preferred Anti-Affinity which can prevent consolidation`). - -- By adding the finalizer, Karpenter improves the default Kubernetes process of node deletion. -When you run `kubectl delete node` on a node without a finalizer, the node is deleted without triggering the finalization logic. The machine will continue running in EC2, even though there is no longer a node object for it. -The kubelet isn’t watching for its own existence, so if a node is deleted, the kubelet doesn’t terminate itself. -All the pod objects get deleted by a garbage collection process later, because the pods’ node is gone. - -{{% /alert %}} - -## Consolidation - -Karpenter has two mechanisms for cluster consolidation: -- Deletion - A node is eligible for deletion if all of its pods can run on free capacity of other nodes in the cluster. -- Replace - A node can be replaced if all of its pods can run on a combination of free capacity of other nodes in the cluster and a single cheaper replacement node. - -Consolidation has three mechanisms that are performed in order to attempt to identify a consolidation action: -1) Empty Node Consolidation - Delete any entirely empty nodes in parallel -2) Multi-Node Consolidation - Try to delete two or more nodes in parallel, possibly launching a single replacement that is cheaper than the price of all nodes being removed -3) Single-Node Consolidation - Try to delete any single node, possibly launching a single replacement that is cheaper than the price of that node - -It's impractical to examine all possible consolidation options for multi-node consolidation, so Karpenter uses a heuristic to identify a likely set of nodes that can be consolidated. For single-node consolidation we consider each node in the cluster individually. - -When there are multiple nodes that could be potentially deleted or replaced, Karpenter choose to consolidate the node that overall disrupts your workloads the least by preferring to terminate: - -* nodes running fewer pods -* nodes that will expire soon -* nodes with lower priority pods - -{{% alert title="Note" color="primary" %}} -For spot nodes, Karpenter only uses the deletion consolidation mechanism. It will not replace a spot node with a cheaper spot node. Spot instance types are selected with the `price-capacity-optimized` strategy and often the cheapest spot instance type is not launched due to the likelihood of interruption. Consolidation would then replace the spot instance with a cheaper instance negating the `price-capacity-optimized` strategy entirely and increasing interruption rate. -{{% /alert %}} - -If consolidation is enabled, Karpenter periodically reports events against nodes that indicate why the node can't be consolidated. These events can be used to investigate nodes that you expect to have been consolidated, but still remain in your cluster. - -``` -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Unconsolidatable 66s karpenter pdb default/inflate-pdb prevents pod evictions - Normal Unconsolidatable 33s (x3 over 30m) karpenter can't replace with a cheaper node - ``` - -## Interruption - -If interruption-handling is enabled, Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These interruption events include: - -* Spot Interruption Warnings -* Scheduled Change Health Events (Maintenance Events) -* Instance Terminating Events -* Instance Stopping Events - -When Karpenter detects one of these events will occur to your nodes, it automatically cordons, drains, and terminates the node(s) ahead of the interruption event to give the maximum amount of time for workload cleanup prior to compute disruption. This enables scenarios where the `terminationGracePeriod` for your workloads may be long or cleanup for your workloads is critical, and you want enough time to be able to gracefully clean-up your pods. - -For Spot interruptions, the provisioner will start a new machine as soon as it sees the Spot interruption warning. Spot interruptions have a __2 minute notice__ before Amazon EC2 reclaims the instance. Karpenter's average node startup time means that, generally, there is sufficient time for the new node to become ready and to move the pods to the new node before the machine is reclaimed. - -{{% alert title="Note" color="primary" %}} -Karpenter publishes Kubernetes events to the node for all events listed above in addition to __Spot Rebalance Recommendations__. Karpenter does not currently support cordon, drain, and terminate logic for Spot Rebalance Recommendations. -{{% /alert %}} - -Karpenter enables this feature by watching an SQS queue which receives critical events from AWS services which may affect your nodes. Karpenter requires that an SQS queue be provisioned and EventBridge rules and targets be added that forward interruption events from AWS services to the SQS queue. Karpenter provides details for provisioning this infrastructure in the [CloudFormation template in the Getting Started Guide](../../getting-started/getting-started-with-karpenter/#create-the-karpenter-infrastructure-and-iam-roles). - -To enable the interruption handling feature flag, configure the `karpenter-global-settings` ConfigMap with the following value mapped to the name of the interruption queue that handles interruption events. - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: karpenter-global-settings - namespace: karpenter -data: - ... - aws.interruptionQueueName: karpenter-cluster - ... -``` - -### Drift -Drift handles changes to the NodePool/EC2NodeClass. For Drift, values in the NodePool/EC2NodeClass are reflected in the NodeClaimTemplateSpec/EC2NodeClassSpec in the same way that they’re set. A NodeClaim will be detected as drifted if the values in its owning NodePool/EC2NodeClass do not match the values in the NodeClaim. Similar to the upstream `deployment.spec.template` relationship to pods, Karpenter will annotate the owning NodePool and EC2NodeClass with a hash of the NodeClaimTemplateSpec to check for drift. Some special cases will be discovered either from Karpenter or through the CloudProvider interface, triggered by NodeClaim/Instance/NodePool/EC2NodeClass changes. - -#### Special Cases on Drift -In special cases, drift can correspond to multiple values and must be handled differently. Drift on resolved fields can create cases where drift occurs without changes to CRDs, or where CRD changes do not result in drift. For example, if a NodeClaim has `node.kubernetes.io/instance-type: m5.large`, and requirements change from `node.kubernetes.io/instance-type In [m5.large]` to `node.kubernetes.io/instance-type In [m5.large, m5.2xlarge]`, the NodeClaim will not be drifted because its value is still compatible with the new requirements. Conversely, if a NodeClaim is using a NodeClaim image `ami: ami-abc`, but a new image is published, Karpenter's `EC2NodeClass.spec.amiSelectorTerms` will discover that the new correct value is `ami: ami-xyz`, and detect the NodeClaim as drifted. - -##### NodePool -| Fields | -|----------------| -| Requirements | - -##### EC2NodeClass -| Fields | -|-------------------------------| -| Subnet Selector Terms | -| Security Group Selector Terms | -| AMI Selector Terms | - -#### Behavioral Fields -Behavioral Fields are treated as over-arching settings on the NodePool to dictate how Karpenter behaves. These fields don’t correspond to settings on the NodeClaim or instance. They’re set by the user to control Karpenter’s Provisioning and disruption logic. Since these don’t map to a desired state of NodeClaims, __behavioral fields are not considered for Drift__. - -__Behavioral Fields__ -- Weight -- Limits -- ConsolidationPolicy -- ConsolidateAfter -- ExpireAfter ---- - -Read the [Drift Design](https://github.com/aws/karpenter-core/blob/main/designs/drift.md) for more. - -To enable the drift feature flag, refer to the [Settings Feature Gates]({{}}). - -Karpenter will add `MachineDrifted` status condition on the machines if the machine is drifted, and does not have the status condition, - -Karpenter will remove the `MachineDrifted` status condition for the following these scenarios: -1. The `featureGates.driftEnabled` is not enabled but the machine is drifted, karpenter will remove the status condition. -2. The machine isn't drifted, but has the status condition, karpenter will remove it. - -If the node is marked as drifted by another controller, karpenter will do nothing. - -## Controls - -### Pod-Level Controls - -You can block Karpenter from voluntarily choosing to disrupt certain pods by setting the `karpenter.sh/do-not-evict: "true"` annotation on the pod. This is useful for pods that you want to run from start to finish without disruption. By opting pods out of this disruption, you are telling Karpenter that it should not voluntarily remove a node containing this pod. - -Examples of pods that you might want to opt-out of disruption include an interactive game that you don't want to interrupt or a long batch job (such as you might have with machine learning) that would need to start over if it were interrupted. - -```yaml -apiVersion: apps/v1 -kind: Deployment -spec: - template: - metadata: - annotations: - karpenter.sh/do-not-evict: "true" -``` - -{{% alert title="Note" color="primary" %}} -This annotation will be ignored for [terminating pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase), [terminal pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) (Failed/Succeeded), [DaemonSet pods](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), or [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/). -{{% /alert %}} - -Examples of voluntary node removal that will be prevented by this annotation include: -- [Consolidation]({{}}) -- [Drift]({{}}) -- Emptiness -- Expiration - -{{% alert title="Note" color="primary" %}} -Voluntary node removal does not include [Interruption]({{}}) or manual deletion initiated through `kubectl delete node`. Both of these are considered involuntary events, since node removal cannot be delayed. -{{% /alert %}} - -### Node-Level Controls - -Nodes can be opted out of consolidation deprovisioning by setting the annotation `karpenter.sh/do-not-consolidate: "true"` on the node. - -```yaml -apiVersion: v1 -kind: Node -metadata: - annotations: - karpenter.sh/do-not-consolidate: "true" -``` - -#### Example: Disable Consolidation on Provisioner - -Provisioner `.spec.annotations` allow you to set annotations that will be applied to all nodes launched by this provisioner. By setting the annotation `karpenter.sh/do-not-consolidate: "true"` on the provisioner, you will selectively prevent all nodes launched by this Provisioner from being considered in consolidation calculations. - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner -metadata: - name: default -spec: - annotations: # will be applied to all nodes - karpenter.sh/do-not-consolidate: "true" -``` diff --git a/website/content/en/v0.31/concepts/node-templates.md b/website/content/en/v0.31/concepts/node-templates.md deleted file mode 100644 index ecab793e0984..000000000000 --- a/website/content/en/v0.31/concepts/node-templates.md +++ /dev/null @@ -1,689 +0,0 @@ ---- -title: "Node Templates" -linkTitle: "Node Templates" -weight: 2 -description: > - Configure AWS specific settings ---- - -Node Templates enable configuration of AWS specific settings. -Each provisioner must reference an AWSNodeTemplate using `spec.providerRef`. -Multiple provisioners may point to the same AWSNodeTemplate. - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner -metadata: - name: default -spec: - providerRef: - name: default ---- -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -metadata: - name: default -spec: - subnetSelector: { ... } # required, discovers tagged subnets to attach to instances - securityGroupSelector: { ... } # required, discovers tagged security groups to attach to instances - instanceProfile: "..." # optional, overrides the node's identity from global settings - amiFamily: "..." # optional, resolves a default ami and userdata - amiSelector: { ... } # optional, discovers tagged amis to override the amiFamily's default - userData: "..." # optional, overrides autogenerated userdata with a merge semantic - tags: { ... } # optional, propagates tags to underlying EC2 resources - metadataOptions: { ... } # optional, configures IMDS for the instance - blockDeviceMappings: [ ... ] # optional, configures storage devices for the instance - detailedMonitoring: "..." # optional, configures detailed monitoring for the instance -status: - subnets: { ... } # resolved subnets - securityGroups: { ... } # resolved security groups - amis: { ... } # resolved AMIs -``` -Refer to the [Provisioner docs]({{}}) for settings applicable to all providers. To explore various `AWSNodeTemplate` configurations, refer to the examples provided [in the Karpenter Github repository](https://github.com/aws/karpenter/blob/main/examples/provisioner/). - -See below for other AWS provider-specific parameters. - -## spec.subnetSelector - -The `AWSNodeTemplate` discovers subnets using [AWS tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). -Subnets may be specified by any AWS tag, including `Name`. Selecting tag values using wildcards (`*`) is supported. -Subnet IDs may be specified by using the key `aws-ids` and then passing the IDs as a comma-separated string value. -When launching nodes, a subnet is automatically chosen that matches the desired zone. -If multiple subnets exist for a zone, the one with the most available IP addresses will be used. - -**Examples** - -Select all with a specified tag key: -```yaml -spec: - subnetSelector: - karpenter.sh/discovery/MyClusterName: '*' -``` - -Select by name and tag (all criteria must match):: -```yaml -spec: - subnetSelector: - Name: my-subnet - MyTag: '' # matches all resources with the tag -``` - -Select using comma separated tag values: -```yaml -spec: - subnetSelector: - Name: "my-subnet-1,my-subnet-2" -``` - -Select using wildcards: -```yaml -spec: - subnetSelector: - Name: "*Public*" - -``` - -Select by ID: -```yaml -spec: - subnetSelector: - aws-ids: "subnet-09fa4a0a8f233a921,subnet-0471ca205b8a129ae" -``` - -## spec.securityGroupSelector - -The security group of an instance is comparable to a set of firewall rules. -EKS creates at least two security groups by default, [review the documentation](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) for more info. -Security groups may be specified by any AWS tag, including "Name". Selecting tags using wildcards (`*`) is supported. - -{{% alert title="Note" color="primary" %}} -When launching nodes, Karpenter uses all the security groups that match the selector. If you choose to use the `kubernetes.io/cluster/$CLUSTER_NAME` tag for discovery, note that this may result in failures using the AWS Load Balancer controller. The Load Balancer controller only supports a single security group having that tag key. See [this issue](https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/2367) for more details. -{{% /alert %}} - -To verify if this restriction affects you, run the following commands. -```bash -CLUSTER_VPC_ID="$(aws eks describe-cluster --name $CLUSTER_NAME --query cluster.resourcesVpcConfig.vpcId --output text)" - -aws ec2 describe-security-groups --filters Name=vpc-id,Values=$CLUSTER_VPC_ID Name=tag-key,Values=kubernetes.io/cluster/$CLUSTER_NAME --query 'SecurityGroups[].[GroupName]' --output text -``` - -If multiple securityGroups are printed, you will need a more specific securityGroupSelector. We generally recommend that you use the `karpenter.sh/discovery: $CLUSTER_NAME` tag selector instead. - -**Examples** - -Select all assigned to a cluster: -```yaml -spec: - securityGroupSelector: - karpenter.sh/discovery: "${CLUSTER_NAME}" -``` - -Select all with a specified tag key: -```yaml -spec: - securityGroupSelector: - MyTag: '*' -``` - -Select by name and tag (all criteria must match): -```yaml -spec: - securityGroupSelector: - Name: my-security-group - MyTag: '' # matches all resources with the tag -``` - -Select by comma-separated tag values: -```yaml -spec: - securityGroupSelector: - Name: "my-security-group-1,my-security-group-2" -``` - -Select by name using a wildcard: -```yaml -spec: - securityGroupSelector: - Name: "*Public*" -``` - -Select by ID: -```yaml -spec: - securityGroupSelector: - aws-ids: "sg-063d7acfb4b06c82c,sg-06e0cf9c198874591" -``` - -## spec.instanceProfile - -An `InstanceProfile` is a way to pass a single IAM role to EC2 instance launched the provisioner. -A default profile is configured in global settings, but may be overridden here. -The `AWSNodeTemplate` will not create an `InstanceProfile` automatically. -The `InstanceProfile` must refer to a `Role` that has permission to connect to the cluster. -```yaml -spec: - instanceProfile: MyInstanceProfile -``` - -## spec.amiFamily - -The AMI used when provisioning nodes can be controlled by the `amiFamily` field. Based on the value set for `amiFamily`, Karpenter will automatically query for the appropriate [EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html) via AWS Systems Manager (SSM). When an `amiFamily` of `Custom` is chosen, then an `amiSelector` must be specified that informs Karpenter on which custom AMIs are to be used. - -Currently, Karpenter supports `amiFamily` values `AL2`, `Bottlerocket`, `Ubuntu`, `Windows2019`, `Windows2022` and `Custom`. GPUs are only supported with `AL2` and `Bottlerocket`. The `AL2` amiFamily does not support ARM64 GPU instance types unless you specify a custom amiSelector. - -{{% alert title="Defaults" color="secondary" %}} -If no `amiFamily` is defined, Karpenter will set the default `amiFamily` to AL2 - -```yaml -spec: - amiFamily: AL2 -``` -{{% /alert %}} - -## spec.amiSelector - -AMISelector is used to configure custom AMIs for Karpenter to use, where the AMIs are discovered through `aws::` prefixed filters (`aws::ids`, `aws::owners` and `aws::name`) and [AWS tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). This field is optional, and Karpenter will use the latest EKS-optimized AMIs if an amiSelector is not specified. - -To select an AMI by name, use `aws::name`. EC2 AMIs may be specified by any AWS tag, including `Name`. Selecting by tag or by name using wildcards (`*`) is supported. - -EC2 AMI IDs may be specified by using the key `aws::ids` (`aws-ids` is also supported) and then passing the IDs as a comma-separated string value. - -To ensure that AMIs are owned by the expected owner, use `aws::owners` which expects a comma-separated list of AWS account owners - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. If this is not set, *and* `aws::ids`/`aws-ids` are not set, it defaults to `self,amazon`. - -{{% alert title="Note" color="primary" %}} -If you use only `aws::owners`, Karpenter will discover all images that are owned by those specified, selecting the most recently created ones to be used. If you specify `aws::owners`, but nothing else, there is a larger chance that Karpenter could select an image that is not compatible with your instance type. To lower this chance, it is recommended to use `aws::name` or `aws::ids` if you're using `aws::owners` to select a subset of images that you have validated are compatible with your selected instance types. -{{% /alert %}} - -### AMI Selection - -If an `amiSelector` matches more than one AMI, Karpenter will automatically determine which AMI best fits the workloads on the launched worker node under the following constraints: - -* When launching nodes, Karpenter automatically determines which architecture a custom AMI is compatible with and will use images that match an instanceType's requirements. -* If multiple AMIs are found that can be used, Karpenter will choose the latest one. -* If no AMIs are found that can be used, then no nodes will be provisioned. - -If you need to express other constraints for an AMI beyond architecture, you can express these constraints as tags on the AMI. For example, if you want to limit an EC2 AMI to only be used with instanceTypes that have an `nvidia` GPU, you can specify an EC2 tag with a key of `karpenter.k8s.aws/instance-gpu-manufacturer` and value `nvidia` on that AMI. - -All labels defined [in the scheduling documentation](../scheduling#well-known-labels) can be used as requirements for an EC2 AMI. - -```bash -> aws ec2 describe-images --image-id ami-123 --query Images[0].Tags -[ - { - "Key": "karpenter.sh/discovery", - "Value": "my-cluster" - }, - { - "Key": "Name", - "Value": "amazon-eks-node-1.21-customized-v0" - }, - { - "Key": "karpenter.k8s.aws/instance-gpu-manufacturer", - "Value": "nvidia" - } -] -``` - -#### Examples - -Select all AMIs with a specified tag: -```yaml - amiSelector: - karpenter.sh/discovery/MyClusterName: '*' -``` - -Select AMIs by the AMI name: -```yaml - amiSelector: - aws::name: my-ami -``` -Select AMIs by the Name tag: -```yaml - amiSelector: - Name: my-ami -``` - -Select AMIs by name and a specific owner: -```yaml - amiSelector: - aws::name: my-ami - aws::owners: self/ownerAccountID -``` - -Select AMIs by an arbitrary AWS tag key/value pair: -```yaml - amiSelector: - MyAMITag: value -``` - -Specify AMIs explicitly by ID: -```yaml - amiSelector: - aws::ids: "ami-123,ami-456" -``` - -## spec.tags - -Karpenter adds tags to all resources it creates, including EC2 Instances, EBS volumes, and Launch Templates. The default set of AWS tags are listed below. - -``` -Name: karpenter.sh/provisioner-name/ -karpenter.sh/provisioner-name: -kubernetes.io/cluster/: owned -``` - -Additional tags can be added in the AWSNodeTemplate tags section which are merged with global tags in `aws.tags` (located in karpenter-global-settings ConfigMap). -```yaml -spec: - tags: - InternalAccountingTag: 1234 - dev.corp.net/app: Calculator - dev.corp.net/team: MyTeam -``` - -Karpenter allows overrides of the default "Name" tag but does not allow overrides to restricted domains (such as "karpenter.sh", "karpenter.k8s.aws", and "kubernetes.io/cluster"). This ensures that Karpenter is able to correctly auto-discover machines that it owns. - -## spec.metadataOptions - -Control the exposure of [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) on EC2 Instances launched by this provisioner using a generated launch template. - -Refer to [recommended, security best practices](https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node) for limiting exposure of Instance Metadata and User Data to pods. - -If metadataOptions are omitted from this provisioner, the following default settings will be used. - -```yaml -spec: - metadataOptions: - httpEndpoint: enabled - httpProtocolIPv6: disabled - httpPutResponseHopLimit: 2 - httpTokens: required -``` - -## spec.blockDeviceMappings - -The `blockDeviceMappings` field in an AWSNodeTemplate can be used to control the Elastic Block Storage (EBS) volumes that Karpenter attaches to provisioned nodes. Karpenter uses default block device mappings for the AMI Family specified. For example, the `Bottlerocket` AMI Family defaults with two block device mappings, one for Bottlerocket's control volume and the other for container resources such as images and logs. - -Learn more about [block device mappings](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). - -### Examples - -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -spec: - blockDeviceMappings: - - deviceName: /dev/xvda - ebs: - volumeSize: 100Gi - volumeType: gp3 - iops: 10000 - encrypted: true - kmsKeyID: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" - deleteOnTermination: true - throughput: 125 - snapshotID: snap-0123456789 -``` - -{{% alert title="Defaults" color="secondary" %}} -If no `blockDeviceMappings` is defined, Karpenter will set the default `blockDeviceMappings` to the following for the given AMI family. - -#### AL2 -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -spec: - blockDeviceMappings: - - deviceName: /dev/xvda - ebs: - volumeSize: 20Gi - volumeType: gp3 - encrypted: true -``` - -#### Bottlerocket -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -spec: - blockDeviceMappings: - # Root device - - deviceName: /dev/xvda - ebs: - volumeSize: 4Gi - volumeType: gp3 - encrypted: true - # Data device: Container resources such as images and logs - - deviceName: /dev/xvdb - ebs: - volumeSize: 20Gi - volumeType: gp3 - encrypted: true -``` - -#### Ubuntu -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -spec: - blockDeviceMappings: - - deviceName: /dev/sda1 - ebs: - volumeSize: 20Gi - volumeType: gp3 - encrypted: true -``` - -#### Windows2019, Windows2022 -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -spec: - blockDeviceMappings: - - deviceName: /dev/sda1 - ebs: - volumeSize: 50Gi - volumeType: gp3 - encrypted: true -``` -{{% /alert %}} - -## spec.userData - -You can control the UserData that is applied to your worker nodes via this field. - -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -metadata: - name: bottlerocket-example -spec: - amiFamily: Bottlerocket - instanceProfile: MyInstanceProfile - subnetSelector: - karpenter.sh/discovery: my-cluster - securityGroupSelector: - karpenter.sh/discovery: my-cluster - userData: | - [settings.kubernetes] - "kube-api-qps" = 30 - "shutdown-grace-period" = "30s" - "shutdown-grace-period-for-critical-pods" = "30s" - [settings.kubernetes.eviction-hard] - "memory.available" = "20%" - amiSelector: - karpenter.sh/discovery: my-cluster -``` - -This example adds SSH keys to allow remote login to the node (replace *my-authorized_keys* with your key file): - -{{% alert title="Note" color="primary" %}} -Instead of using SSH as set up in this example, you can use Session Manager (SSM) or EC2 Instance Connect to gain shell access to Karpenter nodes. -See [Node NotReady]({{< ref "../troubleshooting/#node-notready" >}}) troubleshooting for an example of starting an SSM session from the command line or [EC2 Instance Connect](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-connect-set-up.html) documentation to connect to nodes using SSH. -{{% /alert %}} - -```yaml -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -metadata: - name: al2-example -spec: - amiFamily: AL2 - instanceProfile: MyInstanceProfile - subnetSelector: - karpenter.sh/discovery: my-cluster - securityGroupSelector: - karpenter.sh/discovery: my-cluster - userData: | - #!/bin/bash - mkdir -p ~ec2-user/.ssh/ - touch ~ec2-user/.ssh/authorized_keys - cat >> ~ec2-user/.ssh/authorized_keys < >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 -/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ ---use-max-pods false \ ---container-runtime containerd \ ---kubelet-extra-args '--node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/provisioner-name=test --max-pods=110' ---//-- -``` - -You can also set kubelet-config properties by modifying the kubelet-config.json file before the EKS bootstrap script starts the kubelet: - -``` -apiVersion: karpenter.k8s.aws/v1alpha1 -kind: AWSNodeTemplate -metadata: - name: kubelet-config-example -spec: - subnetSelector: - karpenter.sh/discovery: my-cluster - securityGroupSelector: - karpenter.sh/discovery: my-cluster - userData: | - #!/bin/bash - echo "$(jq '.kubeAPIQPS=50' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json -``` - -#### Windows - -* Your UserData must be specified as PowerShell commands. -* The UserData specified will be prepended to a Karpenter managed section that will bootstrap the kubelet. -* Karpenter will continue to set ClusterDNS and all other parameters defined in spec.kubeletConfiguration as before. - -Consider the following example to understand how your custom UserData settings will be merged in. - -Your UserData - -``` -Write-Host "Running custom user data script" -``` - -Final merged UserData - -``` - -Write-Host "Running custom user data script" -[string]$EKSBootstrapScriptFile = "$env:ProgramFiles\Amazon\EKS\Start-EKSBootstrap.ps1" -& $EKSBootstrapScriptFile -EKSClusterName 'test-cluster' -APIServerEndpoint 'https://test-cluster' -Base64ClusterCA 'ca-bundle' -KubeletExtraArgs '--node-labels="karpenter.sh/capacity-type=spot,karpenter.sh/provisioner-name=windows2022" --max-pods=110' -DNSClusterIP '10.0.100.10' - -``` - -{{% alert title="Windows Support Notice" color="warning" %}} -Currently, Karpenter does not specify `-ServiceCIDR` to [EKS Windows AMI Bootstrap script](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html#bootstrap-script-configuration-parameters). -Windows worker nodes will use `172.20.0.0/16` or `10.100.0.0/16` for Kubernetes service IP address ranges based on the IP address of the primary interface. -The effective ServiceCIDR can be verified at `$env:ProgramData\Amazon\EKS\cni\config\vpc-bridge.conf` on the worker node. - -Support for the Windows ServiceCIDR argument can be tracked in a [Karpenter Github Issue](https://github.com/aws/karpenter/issues/4088). Currently, if the effective ServiceCIDR is incorrect for your windows worker nodes, you can add the following userData as a workaround. - -```yaml -spec: - userData: | - $global:EKSCluster = Get-EKSCluster -Name my-cluster -``` -{{% /alert %}} - -## spec.detailedMonitoring - -Enabling detailed monitoring on the node template controls the [EC2 detailed monitoring](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) feature. If you enable this option, the Amazon EC2 console displays monitoring graphs with a 1-minute period for the instances that Karpenter launches. -```yaml -spec: - detailedMonitoring: true -``` - -## status.subnets -`status.subnets` contains the `id` and `zone` of the subnets utilized during node launch. The subnets are sorted by the available IP address count in decreasing order. - -**Examples** - -```yaml -status: - subnets: - - id: subnet-0a462d98193ff9fac - zone: us-east-2b - - id: subnet-0322dfafd76a609b6 - zone: us-east-2c - - id: subnet-0727ef01daf4ac9fe - zone: us-east-2b - - id: subnet-00c99aeafe2a70304 - zone: us-east-2a - - id: subnet-023b232fd5eb0028e - zone: us-east-2c - - id: subnet-03941e7ad6afeaa72 - zone: us-east-2a -``` - -## status.securityGroups -`status.securityGroups` contains the `id` and `name` of the security groups utilized during node launch. - -**Examples** - -```yaml - status: - securityGroups: - - id: sg-041513b454818610b - name: ClusterSharedNodeSecurityGroup - - id: sg-0286715698b894bca - name: ControlPlaneSecurityGroup-1AQ073TSAAPW -``` - -## status.amis -`status.amis` contains the `id`, `name`, and `requirements` of the amis utilized during node launch. - -**Examples** - -```yaml - amis: - - id: ami-03c3a3dcda64f5b75 - name: amazon-linux-2-gpu - requirements: - - key: kubernetes.io/arch - operator: In - values: - - amd64 - - key: karpenter.k8s.aws/instance-accelerator-manufacturer - operator: In - values: - - aws - - nvidia - - id: ami-06afb2d101cc4b8bd - name: amazon-linux-2-arm64 - requirements: - - key: kubernetes.io/arch - operator: In - values: - - arm64 - - key: karpenter.k8s.aws/instance-accelerator-manufacturer - operator: NotIn - values: - - aws - - nvidia - - id: ami-0e28b76d768af234e - name: amazon-linux-2 - requirements: - - key: kubernetes.io/arch - operator: In - values: - - amd64 - - key: karpenter.k8s.aws/instance-accelerator-manufacturer - operator: NotIn - values: - - aws - - nvidia -``` \ No newline at end of file diff --git a/website/content/en/v0.31/concepts/pod-density.md b/website/content/en/v0.31/concepts/pod-density.md deleted file mode 100644 index 6a8a41b6af17..000000000000 --- a/website/content/en/v0.31/concepts/pod-density.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Control Pod Density" -linkTitle: "Control Pod Density" -weight: 6 -description: > - Learn ways to specify pod density with Karpenter ---- - -Pod density is the number of pods per node. - -Kubernetes has a default limit of 110 pods per node. If you are using the EKS Optimized AMI on AWS, the [number of pods is limited by instance type](https://github.com/awslabs/amazon-eks-ami/blob/master/files/eni-max-pods.txt) in the default configuration. - -## Increase Pod Density - -### Networking Limitations - -*☁️ AWS Specific* - -By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. - -Karpenter can be configured to disable nodes' ENI-based pod density. This is especially useful for small to medium instance types which have a lower ENI-based pod density. - -{{% alert title="Note" color="primary" %}} -When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to more pods per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. -{{% /alert %}} - -{{% alert title="Windows Support Notice" color="warning" %}} -Presently, Windows worker nodes do not support using more than one ENI. -As a consequence, the number of IP addresses, and subsequently, the number of pods that a Windows worker node can support is limited by the number of IPv4 addresses available on the primary ENI. -At the moment, Karpenter will only consider individual secondary IP addresses when calculating the pod density limit. -{{% /alert %}} - -### Provisioner-Specific Pod Density - -#### Static Pod Density - -Static pod density can be configured at the provisioner level by specifying `maxPods` within the `.spec.kubeletConfiguration`. All nodes spawned by this provisioner will set this `maxPods` value on their kubelet and will account for this value during scheduling. - -See [Provisioner API Kubelet Configuration](../provisioners/#max-pods) for more details. - -#### Dynamic Pod Density - -Dynamic pod density (density that scales with the instance size) can be configured at the provisioner level by specifying `podsPerCore` within the `.spec.kubeletConfiguration`. Karpenter will calculate the expected pod density for each instance based on the instance's number of logical cores (vCPUs) and will account for this during scheduling. - -See [Provisioner API Kubelet Configuration](../provisioners/#pod-density) for more details. - -### Controller-Wide Pod Density - -{{% alert title="Deprecation Warning" color="warning" %}} -`AWS_ENI_LIMITED_POD_DENSITY` is deprecated in favor of the `.spec.kubeletConfiguration.maxPods` set at the Provisioner-level -{{% /alert %}} - -Set the environment variable `AWS_ENI_LIMITED_POD_DENSITY: "false"` (or the argument `--aws-eni-limited-pod-density=false`) in the Karpenter controller to allow nodes to host up to 110 pods by default. - -Environment variables for the Karpenter controller may be specified as [helm chart values](https://github.com/aws/karpenter/blob/c73f425e924bb64c3f898f30ca5035a1d8591183/charts/karpenter/values.yaml#L15). - -### VPC CNI Custom Networking - -By default, the VPC CNI allocates IPs for a node and pods from the same subnet. With [VPC CNI Custom Networking](https://aws.github.io/aws-eks-best-practices/networking/custom-networking), the pods will receive IP addresses from another subnet dedicated to pod IPs. This approach makes it easier to manage IP addresses and allows for separate Network Access Control Lists (NACLs) applied to your pods. VPC CNI Custom Networking reduces the pod density of a node since one of the ENI attachments will be used for the node and cannot share the allocated IPs on the interface to pods. Karpenter supports VPC CNI Custom Networking and similar CNI setups where the primary node interface is separated from the pods interfaces through a global [setting](./settings.md#configmap) within the karpenter-global-settings configmap: `aws.reservedENIs`. In the common case, `aws.reservedENIs` should be set to `"1"` if using Custom Networking. - -{{% alert title="Windows Support Notice" color="warning" %}} -It's currently not possible to specify custom networking with Windows nodes. -{{% /alert %}} - -## Limit Pod Density - -Generally, increasing pod density is more efficient. However, some use cases exist for limiting pod density. - -### Topology Spread - -You can use [topology spread]({{< relref "scheduling.md#topology-spread" >}}) features to reduce blast radius. For example, spreading workloads across EC2 Availability Zones. - - -### Restrict Instance Types - -Exclude large instance sizes to reduce the blast radius of an EC2 instance failure. - -Consider setting up upper or lower boundaries on target instance sizes with the node.kubernetes.io/instance-type key. - -The following example shows how to avoid provisioning large Graviton instances in order to reduce the impact of individual instance failures: - -``` --key: node.kubernetes.io/instance-type - operator: NotIn - values: - 'm6g.16xlarge' - 'm6gd.16xlarge' - 'r6g.16xlarge' - 'r6gd.16xlarge' - 'c6g.16xlarge' -``` diff --git a/website/content/en/v0.31/concepts/provisioners.md b/website/content/en/v0.31/concepts/provisioners.md deleted file mode 100644 index 5881eb6046f8..000000000000 --- a/website/content/en/v0.31/concepts/provisioners.md +++ /dev/null @@ -1,484 +0,0 @@ ---- -title: "Provisioners" -linkTitle: "Provisioners" -weight: 1 -description: > - Learn about Karpenter Provisioners ---- - -When you first installed Karpenter, you set up a default Provisioner. -The Provisioner sets constraints on the nodes that can be created by Karpenter and the pods that can run on those nodes. -The Provisioner can be set to do things like: - -* Define taints to limit the pods that can run on nodes Karpenter creates -* Define any startup taints to inform Karpenter that it should taint the node initially, but that the taint is temporary. -* Limit node creation to certain zones, instance types, and computer architectures -* Set defaults for node expiration - -You can change your Provisioner or add other Provisioners to Karpenter. -Here are things you should know about Provisioners: - -* Karpenter won't do anything if there is not at least one Provisioner configured. -* Each Provisioner that is configured is looped through by Karpenter. -* If Karpenter encounters a taint in the Provisioner that is not tolerated by a Pod, Karpenter won't use that Provisioner to provision the pod. -* If Karpenter encounters a startup taint in the Provisioner it will be applied to nodes that are provisioned, but pods do not need to tolerate the taint. Karpenter assumes that the taint is temporary and some other system will remove the taint. -* It is recommended to create Provisioners that are mutually exclusive. So no Pod should match multiple Provisioners. If multiple Provisioners are matched, Karpenter will use the Provisioner with the highest [weight](#specweight). - -For some example `Provisioner` configurations, see the [examples in the Karpenter GitHub repository](https://github.com/aws/karpenter/blob/main/examples/provisioner/). - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner -metadata: - name: default -spec: - # References cloud provider-specific custom resource, see your cloud provider specific documentation - providerRef: - name: default - - # Provisioned nodes will have these taints - # Taints may prevent pods from scheduling if they are not tolerated by the pod. - taints: - - key: example.com/special-taint - effect: NoSchedule - - # Provisioned nodes will have these taints, but pods do not need to tolerate these taints to be provisioned by this - # provisioner. These taints are expected to be temporary and some other entity (e.g. a DaemonSet) is responsible for - # removing the taint after it has finished initializing the node. - startupTaints: - - key: example.com/another-taint - effect: NoSchedule - - # Labels are arbitrary key-values that are applied to all nodes - labels: - billing-team: my-team - - # Annotations are arbitrary key-values that are applied to all nodes - annotations: - example.com/owner: "my-team" - - # Requirements that constrain the parameters of provisioned nodes. - # These requirements are combined with pod.spec.topologySpreadConstraints, pod.spec.affinity.nodeAffinity, pod.spec.affinity.podAffinity, and pod.spec.nodeSelector rules. - # Operators { In, NotIn, Exists, DoesNotExist, Gt, and Lt } are supported. - # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#operators - requirements: - - key: "karpenter.k8s.aws/instance-category" - operator: In - values: ["c", "m", "r"] - - key: "karpenter.k8s.aws/instance-cpu" - operator: In - values: ["4", "8", "16", "32"] - - key: "karpenter.k8s.aws/instance-hypervisor" - operator: In - values: ["nitro"] - - key: "karpenter.k8s.aws/instance-generation" - operator: Gt - values: ["2"] - - key: "topology.kubernetes.io/zone" - operator: In - values: ["us-west-2a", "us-west-2b"] - - key: "kubernetes.io/arch" - operator: In - values: ["arm64", "amd64"] - - key: "karpenter.sh/capacity-type" # If not included, the webhook for the AWS cloud provider will default to on-demand - operator: In - values: ["spot", "on-demand"] - - # Karpenter provides the ability to specify a few additional Kubelet args. - # These are all optional and provide support for additional customization and use cases. - kubeletConfiguration: - clusterDNS: ["10.0.1.100"] - containerRuntime: containerd - systemReserved: - cpu: 100m - memory: 100Mi - ephemeral-storage: 1Gi - kubeReserved: - cpu: 200m - memory: 100Mi - ephemeral-storage: 3Gi - evictionHard: - memory.available: 5% - nodefs.available: 10% - nodefs.inodesFree: 10% - evictionSoft: - memory.available: 500Mi - nodefs.available: 15% - nodefs.inodesFree: 15% - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - evictionMaxPodGracePeriod: 60 - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - cpuCFSQuota: true - podsPerCore: 2 - maxPods: 20 - - - # Resource limits constrain the total size of the cluster. - # Limits prevent Karpenter from creating new instances once the limit is exceeded. - limits: - resources: - cpu: "1000" - memory: 1000Gi - - # Enables consolidation which attempts to reduce cluster cost by both removing un-needed nodes and down-sizing those - # that can't be removed. Mutually exclusive with the ttlSecondsAfterEmpty parameter. - consolidation: - enabled: true - - # If omitted, the feature is disabled and nodes will never expire. If set to less time than it requires for a node - # to become ready, the node may expire before any pods successfully start. - ttlSecondsUntilExpired: 2592000 # 30 Days = 60 * 60 * 24 * 30 Seconds; - - # If omitted, the feature is disabled, nodes will never scale down due to low utilization - ttlSecondsAfterEmpty: 30 - - # Priority given to the provisioner when the scheduler considers which provisioner - # to select. Higher weights indicate higher priority when comparing provisioners. - # Specifying no weight is equivalent to specifying a weight of 0. - weight: 10 -``` - -## spec.requirements - -Kubernetes defines the following [Well-Known Labels](https://kubernetes.io/docs/reference/labels-annotations-taints/), and cloud providers (e.g., AWS) implement them. They are defined at the "spec.requirements" section of the Provisioner API. - -In addition to the well-known labels from Kubernetes, Karpenter supports AWS-specific labels for more advanced scheduling. See the full list [here](../scheduling/#well-known-labels). - -These well-known labels may be specified at the provisioner level, or in a workload definition (e.g., nodeSelector on a pod.spec). Nodes are chosen using both the provisioner's and pod's requirements. If there is no overlap, nodes will not be launched. In other words, a pod's requirements must be within the provisioner's requirements. If a requirement is not defined for a well known label, any value available to the cloud provider may be chosen. - -For example, an instance type may be specified using a nodeSelector in a pod spec. If the instance type requested is not included in the provisioner list and the provisioner has instance type requirements, Karpenter will not create a node or schedule the pod. - -📝 None of these values are required. - -### Instance Types - -- key: `node.kubernetes.io/instance-type` -- key: `karpenter.k8s.aws/instance-family` -- key: `karpenter.k8s.aws/instance-category` -- key: `karpenter.k8s.aws/instance-generation` - -Generally, instance types should be a list and not a single value. Leaving these requirements undefined is recommended, as it maximizes choices for efficiently placing pods. - -Review [AWS instance types](../instance-types). Most instance types are supported with the exclusion of [non-HVM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/virtualization_types.html). - -{{% alert title="Defaults" color="secondary" %}} -If no instance type constraints are defined, Karpenter will set default instance type constraints on your Provisioner that supports most common user workloads: - -```yaml -requirements: - - key: karpenter.k8s.aws/instance-category - operator: In - values: ["c", "m", "r"] - - key: karpenter.k8s.aws/instance-generation - operator: Gt - values: ["2"] -``` -{{% /alert %}} - -### Availability Zones - -- key: `topology.kubernetes.io/zone` -- value example: `us-east-1c` -- value list: `aws ec2 describe-availability-zones --region ` - -Karpenter can be configured to create nodes in a particular zone. Note that the Availability Zone `us-east-1a` for your AWS account might not have the same location as `us-east-1a` for another AWS account. - -[Learn more about Availability Zone -IDs.](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html) - -### Architecture - -- key: `kubernetes.io/arch` -- values - - `amd64` - - `arm64` - -Karpenter supports `amd64` nodes, and `arm64` nodes. - -{{% alert title="Defaults" color="secondary" %}} -If no architecture constraint is defined, Karpenter will set the default architecture constraint on your Provisioner that supports most common user workloads: - -```yaml -requirements: - - key: kubernetes.io/arch - operator: In - values: ["amd64"] -``` -{{% /alert %}} - -### Operating System - - key: `kubernetes.io/os` - - values - - `linux` - - `windows` - -Karpenter supports `linux` and `windows` operating systems. - -{{% alert title="Defaults" color="secondary" %}} -If no operating system constraint is defined, Karpenter will set the default operating system constraint on your Provisioner that supports most common user workloads: - -```yaml -requirements: - - key: kubernetes.io/os - operator: In - values: ["linux"] -``` -{{% /alert %}} - -### Capacity Type - -- key: `karpenter.sh/capacity-type` -- values - - `spot` - - `on-demand` - -Karpenter supports specifying capacity type, which is analogous to [EC2 purchase options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-purchasing-options.html). - -Karpenter prioritizes Spot offerings if the provisioner allows Spot and on-demand instances. If the provider API (e.g. EC2 Fleet's API) indicates Spot capacity is unavailable, Karpenter caches that result across all attempts to provision EC2 capacity for that instance type and zone for the next 45 seconds. If there are no other possible offerings available for Spot, Karpenter will attempt to provision on-demand instances, generally within milliseconds. - -Karpenter also allows `karpenter.sh/capacity-type` to be used as a topology key for enforcing topology-spread. - -{{% alert title="Defaults" color="secondary" %}} -If no capacity type constraint is defined, Karpenter will set the default capacity type constraint on your Provisioner that supports most common user workloads: - -```yaml -requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["on-demand"] -``` -{{% /alert %}} - -## spec.weight - -Karpenter allows you to describe provisioner preferences through a `weight` mechanism similar to how weight is described with [pod and node affinities](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). - -For more information on weighting Provisioners, see the [Weighting Provisioners section](../scheduling#weighting-provisioners) in the scheduling details. - -## spec.kubeletConfiguration - -Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for -additional customization and use cases. Adjust these only if you know you need to do so. For more details on kubelet configuration arguments, [see the KubeletConfiguration API specification docs](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). The implemented fields are a subset of the full list of upstream kubelet configuration arguments. Please cut an issue if you'd like to see another field implemented. - -```yaml -spec: - ... - kubeletConfiguration: - clusterDNS: ["10.0.1.100"] - containerRuntime: containerd - systemReserved: - cpu: 100m - memory: 100Mi - ephemeral-storage: 1Gi - kubeReserved: - cpu: 200m - memory: 100Mi - ephemeral-storage: 3Gi - evictionHard: - memory.available: 5% - nodefs.available: 10% - nodefs.inodesFree: 10% - evictionSoft: - memory.available: 500Mi - nodefs.available: 15% - nodefs.inodesFree: 15% - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - evictionMaxPodGracePeriod: 60 - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - cpuCFSQuota: true - podsPerCore: 2 - maxPods: 20 -``` - -You can specify the container runtime to be either `dockerd` or `containerd`. By default, `containerd` is used. - -* `containerd` is the only valid container runtime when using the `Bottlerocket` AMIFamily or when using Kubernetes version 1.24+ and the `AL2`, `Windows2019`, or `Windows2022` AMIFamilies. - -### Reserved Resources - -Karpenter will automatically configure the system and kube reserved resource requests on the fly on your behalf. These requests are used to configure your node and to make scheduling decisions for your pods. If you have specific requirements or know that you will have additional capacity requirements, you can optionally override the `--system-reserved` configuration defaults with the `.spec.kubeletConfiguration.systemReserved` values and the `--kube-reserved` configuration defaults with the `.spec.kubeletConfiguration.kubeReserved` values. - -For more information on the default `--system-reserved` and `--kube-reserved` configuration refer to the [Kubelet Docs](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#kube-reserved) - -### Eviction Thresholds - -The kubelet supports eviction thresholds by default. When enough memory or file system pressure is exerted on the node, the kubelet will begin to evict pods to ensure that system daemons and other system processes can continue to run in a healthy manner. - -Kubelet has the notion of [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds). In hard evictions, pods are evicted as soon as a threshold is met, with no grace period to terminate. Soft evictions, on the other hand, provide an opportunity for pods to be terminated gracefully. They do so by sending a termination signal to pods that are planning to be evicted and allowing those pods to terminate up to their grace period. - -Karpenter supports [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) through the `.spec.kubeletConfiguration.evictionHard` field and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds) through the `.spec.kubeletConfiguration.evictionSoft` field. `evictionHard` and `evictionSoft` are configured by listing [signal names](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals) with either percentage values or resource values. - -```yaml -spec: - ... - kubeletConfiguration: - evictionHard: - memory.available: 500Mi - nodefs.available: 10% - nodefs.inodesFree: 10% - imagefs.available: 5% - imagefs.inodesFree: 5% - pid.available: 7% - evictionSoft: - memory.available: 1Gi - nodefs.available: 15% - nodefs.inodesFree: 15% - imagefs.available: 10% - imagefs.inodesFree: 10% - pid.available: 10% -``` - -#### Supported Eviction Signals - -| Eviction Signal | Description | -| --------------- | ----------- | -| memory.available | memory.available := node.status.capacity[memory] - node.stats.memory.workingSet | -| nodefs.available | nodefs.available := node.stats.fs.available | -| nodefs.inodesFree | nodefs.inodesFree := node.stats.fs.inodesFree | -| imagefs.available | imagefs.available := node.stats.runtime.imagefs.available | -| imagefs.inodesFree | imagefs.inodesFree := node.stats.runtime.imagefs.inodesFree | -| pid.available | pid.available := node.stats.rlimit.maxpid - node.stats.rlimit.curproc | - -For more information on eviction thresholds, view the [Node-pressure Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction) section of the official Kubernetes docs. - -#### Soft Eviction Grace Periods - -Soft eviction pairs an eviction threshold with a specified grace period. With soft eviction thresholds, the kubelet will only begin evicting pods when the node exceeds its soft eviction threshold over the entire duration of its grace period. For example, if you specify `evictionSoft[memory.available]` of `500Mi` and a `evictionSoftGracePeriod[memory.available]` of `1m30`, the node must have less than `500Mi` of available memory over a minute and a half in order for the kubelet to begin evicting pods. - -Optionally, you can specify an `evictionMaxPodGracePeriod` which defines the administrator-specified maximum pod termination grace period to use during soft eviction. If a namespace-owner had specified a pod `terminationGracePeriodInSeconds` on pods in their namespace, the minimum of `evictionPodGracePeriod` and `terminationGracePeriodInSeconds` would be used. - -```yaml -spec: - ... - kubeletConfiguration: - evictionSoftGracePeriod: - memory.available: 1m - nodefs.available: 1m30s - nodefs.inodesFree: 2m - imagefs.available: 1m30s - imagefs.inodesFree: 2m - pid.available: 2m - evictionMaxPodGracePeriod: 60 -``` - -### Pod Density - -#### Max Pods - -By default, AWS will configure the maximum density of pods on a node [based on the node instance type](https://github.com/awslabs/amazon-eks-ami/blob/master/files/eni-max-pods.txt). For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.kubeletConfiguration.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. - -{{% alert title="Note" color="primary" %}} -When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to support a higher pod density per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. -{{% /alert %}} - -#### Pods Per Core - -An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.kubeletConfiguration.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. - -The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both are set, the minimum of the `podsPerCore` dynamic pod density and the static `maxPods` value will be used for scheduling. - -{{% alert title="Note" color="primary" %}} -`maxPods` may not be set in the `kubeletConfiguration` of a Provisioner, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the Provisioner will not cause unexpected behavior by exceeding the `maxPods` value. -{{% /alert %}} - -{{% alert title="Pods Per Core on Bottlerocket" color="warning" %}} -Bottlerocket AMIFamily currently does not support `podsPerCore` configuration. If a Provisioner contains a `provider` or `providerRef` to a node template that will launch a Bottlerocket instance, the `podsPerCore` value will be ignored for scheduling and for configuring the kubelet. -{{% /alert %}} - -## spec.limits.resources - -The provisioner spec includes a limits section (`spec.limits.resources`), which constrains the maximum amount of resources that the provisioner will manage. - -Karpenter supports limits of any resource type reported by your cloudprovider. It limits instance types when scheduling to those that will not exceed the specified limits. If a limit has been exceeded, nodes provisioning is prevented until some nodes have been terminated. - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner -metadata: - name: default -spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["spot"] - limits: - resources: - cpu: 1000 - memory: 1000Gi - nvidia.com/gpu: 2 -``` - -{{% alert title="Note" color="primary" %}} -Karpenter provisioning is highly parallel. Because of this, limit checking is eventually consistent, which can result in overrun during rapid scale outs. -{{% /alert %}} - -CPU limits are described with a `DecimalSI` value. Note that the Kubernetes API will coerce this into a string, so we recommend against using integers to avoid GitOps skew. - -Memory limits are described with a [`BinarySI` value, such as 1000Gi.](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) - -You can view the current consumption of cpu and memory on your cluster by running: -``` -kubectl get provisioner -o=jsonpath='{.items[0].status}' -``` - -Review the [Kubernetes core API](https://github.com/kubernetes/api/blob/37748cca582229600a3599b40e9a82a951d8bbbf/core/v1/resource.go#L23) (`k8s.io/api/core/v1`) for more information on `resources`. - -## spec.providerRef - -This field points to the cloud provider-specific custom resource. Learn more about [AWSNodeTemplates](../node-templates/). - -## spec.consolidation - -You can configure Karpenter to deprovision instances through your Provisioner in multiple ways. You can use `spec.ttlSecondsAfterEmpty`, `spec.ttlSecondsUntilExpired` or `spec.consolidation.enabled`. Read [Deprovisioning](../deprovisioning/) for more. - -## Example Use-Cases - -### Isolating Expensive Hardware - -A provisioner can be set up to only provision nodes on particular processor types. -The following example sets a taint that only allows pods with tolerations for Nvidia GPUs to be scheduled: - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner -metadata: - name: gpu -spec: - consolidation: - enabled: true - requirements: - - key: node.kubernetes.io/instance-type - operator: In - values: ["p3.8xlarge", "p3.16xlarge"] - taints: - - key: nvidia.com/gpu - value: "true" - effect: NoSchedule -``` -In order for a pod to run on a node defined in this provisioner, it must tolerate `nvidia.com/gpu` in its pod spec. - -### Cilium Startup Taint - -Per the Cilium [docs](https://docs.cilium.io/en/stable/installation/taints/#taint-effects), it's recommended to place a taint of `node.cilium.io/agent-not-ready=true:NoExecute` on nodes to allow Cilium to configure networking prior to other pods starting. This can be accomplished via the use of Karpenter `startupTaints`. These taints are placed on the node, but pods aren't required to tolerate these taints to be considered for provisioning. - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner -metadata: - name: cilium-startup -spec: - consolidation: - enabled: true - startupTaints: - - key: node.cilium.io/agent-not-ready - value: "true" - effect: NoExecute -``` diff --git a/website/content/en/v0.31/concepts/settings.md b/website/content/en/v0.31/concepts/settings.md deleted file mode 100644 index cfe4416f652d..000000000000 --- a/website/content/en/v0.31/concepts/settings.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: "Settings" -linkTitle: "Settings" -weight: 5 -description: > - Configure Karpenter ---- - -There are two main configuration mechanisms that can be used to configure Karpenter: Environment Variables / CLI parameters to the controller and webhook binaries and the `karpenter-global-settings` config-map. - -## Environment Variables / CLI Flags - -[comment]: <> (the content below is generated from hack/docs/configuration_gen_docs.go) - -| Environment Variable | CLI Flag | Description | -|--|--|--| -| DISABLE_WEBHOOK | \-\-disable-webhook | Disable the admission and validation webhooks (default = false)| -| ENABLE_PROFILING | \-\-enable-profiling | Enable the profiling on the metric endpoint (default = false)| -| HEALTH_PROBE_PORT | \-\-health-probe-port | The port the health probe endpoint binds to for reporting controller health (default = 8081)| -| KARPENTER_SERVICE | \-\-karpenter-service | The Karpenter Service name for the dynamic webhook certificate| -| KUBE_CLIENT_BURST | \-\-kube-client-burst | The maximum allowed burst of queries to the kube-apiserver (default = 300)| -| KUBE_CLIENT_QPS | \-\-kube-client-qps | The smoothed rate of qps to kube-apiserver (default = 200)| -| LEADER_ELECT | \-\-leader-elect | Start leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability. (default = true)| -| MEMORY_LIMIT | \-\-memory-limit | Memory limit on the container running the controller. The GC soft memory limit is set to 90% of this value. (default = -1)| -| METRICS_PORT | \-\-metrics-port | The port the metric endpoint binds to for operating metrics about the controller itself (default = 8000)| -| WEBHOOK_PORT | \-\-webhook-port | The port the webhook endpoint binds to for validation and mutation of resources (default = 8443)| - -[comment]: <> (end docs generated content from hack/docs/configuration_gen_docs.go) - -## ConfigMap - -Karpenter installs a default configuration via its Helm chart that should work for most. Additional configuration can be performed by editing the `karpenter-global-settings` configmap within the namespace that Karpenter was installed in. - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: karpenter-global-settings - namespace: karpenter -data: - # The maximum length of a batch window. The longer this is, the more pods we can consider for provisioning at one - # time which usually results in fewer but larger nodes. - batchMaxDuration: 10s - # The maximum amount of time with no new pending pods that if exceeded ends the current batching window. If pods arrive - # faster than this time, the batching window will be extended up to the maxDuration. If they arrive slower, the pods - # will be batched separately. - batchIdleDuration: 1s - # Role to assume for calling AWS services. - aws.assumeRoleARN: arn:aws:iam::111222333444:role/examplerole - # Duration of assumed credentials in minutes. Default value is 15 minutes. Not used unless aws.assumeRole set. - aws.assumeRoleDuration: 15m - # Cluster CA bundle for nodes to use for TLS connections with the API server. If not set, this is taken from the controller's TLS configuration. - aws.clusterCABundle: "LS0tLS1..." - # [REQUIRED] The kubernetes cluster name for resource discovery - aws.clusterName: karpenter-cluster - # The external kubernetes cluster endpoint for new nodes to connect with. If not specified, will discover the cluster endpoint using DescribeCluster API - aws.clusterEndpoint: https://00000000000000000000000000000000.gr7.us-west-2.eks.amazonaws.com - # The default instance profile to use when provisioning nodes - aws.defaultInstanceProfile: karpenter-instance-profile - # If true, then instances that support pod ENI will report a vpc.amazonaws.com/pod-eni resource - aws.enablePodENI: "false" - # Indicates whether new nodes should use ENI-based pod density. DEPRECATED: Use `.spec.kubeletConfiguration.maxPods` to set pod density on a per-provisioner basis - aws.enableENILimitedPodDensity: "true" - # If true, then assume we can't reach AWS services which don't have a VPC endpoint - # This also has the effect of disabling look-ups to the AWS pricing endpoint - aws.isolatedVPC: "false" - # The VM memory overhead as a percent that will be subtracted - # from the total memory for all instance types - aws.vmMemoryOverheadPercent: "0.075" - # aws.interruptionQueueName is disabled if not specified. Enabling interruption handling may - # require additional permissions on the controller service account. Additional permissions are outlined in the docs - aws.interruptionQueueName: karpenter-cluster - # Global tags are specified by including a JSON object of string to string from tag key to tag value - aws.tags: '{"custom-tag1-key": "custom-tag-value", "custom-tag2-key": "custom-tag-value"}' - # Reserved ENIs are not included in the calculations for max-pods or kube-reserved - # This is most often used in the VPC CNI custom networking setup https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html - aws.reservedENIs: "1" -``` - -### Feature Gates -Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features). You can add a feature gate's ConfigKey to the `karpenter-global-settings` ConfigMap above with the desired value. - -| Feature | Default | Config Key | Stage | Since | Until | -|---------|---------|---------------------------|-------|---------|-------| -| Drift | false | featureGates.driftEnabled | Alpha | v0.21.0 | | - - -### Batching Parameters - -The batching parameters control how Karpenter batches an incoming stream of pending pods. Reducing these values may trade off a slightly faster time from pending pod to node launch, in exchange for launching smaller nodes. Increasing the values can do the inverse. Karpenter provides reasonable defaults for these values, but if you have specific knowledge about your workloads you can tweak these parameters to match the expected rate of incoming pods. - -For a standard deployment scale-up, the pods arrive at the QPS setting of the `kube-controller-manager`, and the default values are typically fine. These settings are intended for use cases where other systems may create large numbers of pods over a period of many seconds or minutes and there is a desire to batch them together. - -#### `batchIdleDuration` - -The `batchIdleDuration` is the period of time that a new pending pod extends the current batching window. This can be increased to handle scenarios where pods arrive slower than one second part, but it would be preferable if they were batched together onto a single larger node. - -This value is expressed as a string value like `10s`, `1m` or `2h45m`. The valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. - -#### `batchMaxDuration` - -The `batchMaxDuration` is the maximum period of time a batching window can be extended to. Increasing this value will allow the maximum batch window size to increase to collect more pending pods into a single batch at the expense of a longer delay from when the first pending pod was created. - -This value is expressed as a string value like `10s`, `1m` or `2h45m`. The valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. - -### AWS Parameters - -#### `aws.tags` - -Global tags are applied to __all__ AWS infrastructure resources deployed by Karpenter. These resources include: - -- Launch Templates -- Volumes -- Instances - -Tags are specified by including a JSON object of string to string from tag key to tag value. - -```yaml - aws.tags: '{"custom-tag1-key": "custom-tag-value", "custom-tag2-key": "custom-tag-value"}' -``` - -{{% alert title="Note" color="primary" %}} -Since you can specify tags at the global level and in the `AWSNodeTemplate` resource, if a key is specified in both locations, the `AWSNodeTemplate` tag value will override the global tag. -{{% /alert %}} diff --git a/website/content/en/v0.31/concepts/threat-model.md b/website/content/en/v0.31/concepts/threat-model.md deleted file mode 100644 index 3ae71fb51861..000000000000 --- a/website/content/en/v0.31/concepts/threat-model.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "Threat Model" -linkTitle: "Threat Model" -weight: 999 ---- - -Karpenter observes Kubernetes pods and launches nodes in response to those pods’ scheduling constraints. Karpenter does not perform the actual scheduling and instead waits for [kube-scheduler](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/) to schedule the pods. - -When running in AWS, Karpenter is typically installed onto EC2 instances that run in EKS Clusters. Karpenter relies on public facing AWS APIs and standard IAM Permissions. Karpenter uses AWS-SDK-Go v1, and AWS advises that credentials are provided using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). - - -## Architecture & Actors - -1. **Cluster Operator**: An identity that installs and configures Karpenter in a Kubernetes cluster, and configures Karpenter's cloud identity and permissions. -2. **Cluster Developer**: An identity that can create pods, typically through Deployments, DaemonSets, or other pod-controller types. -3. **Karpenter Controller:** The Karpenter application pod that operates inside a cluster. - -![threat-model](/threat-model.png) - -## Capabilities - -### Cluster Operator - -The Cluster Operator has full control over Kubernetes resources to install and configure Karpenter, its CRDs, and Provisioners and NodeTemplates. The Cluster Operator has privileges to manage the cloud identities and permissions for Nodes, and the cloud identity and permissions for Karpenter. - -### Cluster Developer - -A Cluster Developer has the ability to create pods via Deployments, ReplicaSets, StatefulSets, Jobs, etc. This assumes that the Cluster Developer cannot modify the Karpenter pod or launch pods using Karpenter’s service account and gain access to Karpenter’s IAM role. - -### Karpenter Controller - -Karpenter has permissions to create and manage cloud instances. Karpenter has Kubernetes API permissions to create, update, and remove nodes, as well as evict pods. For a full list of the permissions, see the RBAC rules in the helm chart template. Karpenter also has AWS IAM permissions to create instances with IAM roles. - -* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.31.0/charts/karpenter/templates/aggregate-clusterrole.yaml) -* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.31.0/charts/karpenter/templates/clusterrole-core.yaml) -* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.31.0/charts/karpenter/templates/clusterrole.yaml) -* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.31.0/charts/karpenter/templates/rolebinding.yaml) -* [role.yaml](https://github.com/aws/karpenter/blob/v0.31.0/charts/karpenter/templates/role.yaml) - -## Assumptions - -| Category | Assumption | Comment | -| --- | --- | --- | -| Generic | The Karpenter pod is operated on a node in the cluster, and uses a Service Account for authentication to the Kubernetes API | Cluster Operators may want to isolate the node running the Karpenter pod to a system-pool of nodes to mitigate the possibility of container breakout with Karpenter’s permissions. | -| Generic | Cluster Developer does not have any Kubernetes permissions to manage Karpenter running in the cluster (The deployment, pods, clusterrole, etc) | | -| Generic | Restrictions on the fields of pods a Cluster Developer can create are out of scope. | Cluster Operators can use policy frameworks to enforce restrictions on Pod capabilities | -| Generic | No sensitive data is included in non-Secret resources in the Kubernetes API. The Karpenter controller has the ability to list all pods, nodes, deployments, and many other pod-controller and storage resource types. | Karpenter does not have permission to list/watch cluster-wide ConfigMaps or Secrets | -| Generic | Karpenter has permissions to create, modify, and delete nodes from the cluster, and evict any pod. | Cluster Operators running applications with varying security profiles in the same cluster may want to configure dedicated nodes and scheduling rules for Karpenter to mitigate potential container escapes from other containers | -| AWS-Specific | The Karpenter IAM policy is encoded in the GitHub repo. Any additional permissions possibly granted to that role by the administrator are out of scope | | -| AWS-Specific | The Karpenter pod uses IRSA for AWS credentials | Setup of IRSA is out of scope for this document | - -## Generic Threats and Mitigations - -### Threat: Cluster Developer can influence creation of an arbitrary number of nodes - -**Background**: Karpenter creates new instances based on the count of pending pods. - -**Threat**: A Cluster Developer attempts to have Karpenter create more instances than intended by creating a large number of pods or by using anti-affinity to schedule one pod per node. - -**Mitigation**: In addition to [Kubernetes resource limits](https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota), Cluster Operators can [configure limits on a Provisioner](https://karpenter.sh/preview/concepts/provisioners/#speclimitsresources) to limit the total amount of memory, CPU, or other resources provisioned across all nodes. - -## AWS-Specific Threats - -### Threat: Using EC2 CreateTag/DeleteTag Permissions to Orchestrate Machine Creation/Deletion - -**Background**: As of v0.28.0, Karpenter creates a mapping between CloudProvider machines and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: - -* `karpenter.sh/managed-by` -* `karpenter.sh/provisioner-name` -* `kubernetes.io/cluster/${CLUSTER_NAME}` - -Any user that has the ability to Create/Delete tags on CloudProvider machines will have the ability to orchestrate Karpenter to Create/Delete CloudProvider machines as a side effect. - -In addition, as of v0.29.0, Karpenter will Drift on Security Groups and Subnets. If a user has the Create/Delete tags permission for either of resources, they can orchestrate Karpenter to Create/Delete CloudProvider machines as a side effect. - -**Threat:** A Cluster Operator attempts to create or delete a tag on a resource discovered by Karpenter. If it has the ability to create a tag it can effectively create or delete CloudProvider machines associated with the tagged resources. - -**Mitigation** Cluster Operators should [enforce tag-based IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) on these tags against any EC2 instance resource (`i-*`) for any users that might have [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html)/[DeleteTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteTags.html) permissions but should not have [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html)/[TerminateInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html) permissions. - -### Threat: Launching EC2 instances with IAM roles not intended for Karpenter nodes - -**Background**: Many IAM roles in an AWS account may trust the EC2 service principal. IAM administrators must grant the `iam:PassRole` permission to IAM principals to allow those principals in the account to launch instances with specific roles. - -**Threat:** A Cluster Operator attempts to create a Node Template with an IAM role not intended for Karpenter - -**Mitigation**: Cluster Operators must enumerate the roles in the resource section of the IAM policy granted to the Karpenter role for the `iam:PassRole` action. - -### Threat: Karpenter can be used to create or terminate EC2 instances outside of the cluster - -**Background**: EC2 instances can exist in an AWS account outside of the Kubernetes cluster. - -**Threat**: An actor who obtains control of the Karpenter pod’s IAM role may be able to create or terminate EC2 instances not part of the Kubernetes cluster managed by Karpenter. - -**Mitigation**: Karpenter creates instances with tags, several of which can be enforced in the IAM policy granted to the Karpenter IAM role that restrict the instances Karpenter can terminate. One tag can require that the instance was provisioned by a Karpenter controller, another tag can include a cluster name to mitigate any termination between two clusters with Karpenter in the same account. Cluster Operators also can restrict the region to prevent two clusters in the same account with the same name in different regions. - -### Threat: Karpenter launches an EC2 instance using an unintended AMI - -**Background**: Cluster Developers can create Node Templates that refer to an AMI by metadata, such as a name rather than an AMI resource ID. - -**Threat:** A threat actor creates a public AMI with the same name as a customer’s AMI in an attempt to get Karpenter to select the threat actor’s AMI instead of the intended AMI. - -**Mitigation**: When selecting AMIs by name or tags, Karpenter defaults to adding an ownership filter of `self,amazon` so AMI images external to the account are not used. diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/v0.31/getting-started/getting-started-with-karpenter/_index.md deleted file mode 100644 index 6d10f6f1d482..000000000000 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/_index.md +++ /dev/null @@ -1,171 +0,0 @@ - ---- -title: "Getting Started with Karpenter" -linkTitle: "Getting Started with Karpenter" -weight: 10 -description: > - Set up a cluster and add Karpenter ---- - -Karpenter automatically provisions new nodes in response to unschedulable pods. Karpenter does this by observing events within the Kubernetes cluster, and then sending commands to the underlying cloud provider. - -This guide shows how to get started with Karpenter by creating a Kubernetes cluster and installing Karpenter. -To use Karpenter, you must be running a supported Kubernetes cluster on a supported cloud provider. -Currently, only EKS on AWS is supported. - -## Create a cluster and add Karpenter - -This guide uses `eksctl` to create the cluster. -It should take less than 1 hour to complete, and cost less than $0.25. -Follow the clean-up instructions to reduce any charges. - -### 1. Install utilities - -Karpenter is installed in clusters with a Helm chart. - -Karpenter requires cloud provider permissions to provision nodes, for AWS IAM -Roles for Service Accounts (IRSA) should be used. IRSA permits Karpenter -(within the cluster) to make privileged requests to AWS (as the cloud provider) -via a ServiceAccount. - -Install these tools before proceeding: - -1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html) -2. `kubectl` - [the Kubernetes CLI](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) -3. `eksctl` - [the CLI for AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) -4. `helm` - [the package manager for Kubernetes](https://helm.sh/docs/intro/install/) - -[Configure the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) -with a user that has sufficient privileges to create an EKS cluster. Verify that the CLI can -authenticate properly by running `aws sts get-caller-identity`. - -### 2. Set environment variables - -After setting up the tools, set the Karpenter version number: - -```bash -export KARPENTER_VERSION=v0.31.4 -``` - -Then set the following environment variable: - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step01-config.sh" language="bash"%}} - -{{% alert title="Warning" color="warning" %}} -If you open a new shell to run steps in this procedure, you need to set some or all of the environment variables again. -To remind yourself of these values, type: - -```bash -echo $KARPENTER_VERSION $CLUSTER_NAME $AWS_DEFAULT_REGION $AWS_ACCOUNT_ID $TEMPOUT -``` - -{{% /alert %}} - - -### 3. Create a Cluster - -Create a basic cluster with `eksctl`. -The following cluster configuration will: - -* Use CloudFormation to set up the infrastructure needed by the EKS cluster. -* Create a Kubernetes service account and AWS IAM Role, and associate them using IRSA to let Karpenter launch instances. -* Add the Karpenter node role to the aws-auth configmap to allow nodes to connect. -* Use [AWS EKS managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for the kube-system and karpenter namespaces. Uncomment fargateProfiles settings (and comment out managedNodeGroups settings) to use Fargate for both namespaces instead. -* Set KARPENTER_IAM_ROLE_ARN variables. -* Create a role to allow spot instances. -* Run helm to install karpenter - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}} - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh" language="bash"%}} - -{{% alert title="Windows Support Notice" color="warning" %}} -In order to run Windows workloads, Windows support should be enabled in your EKS Cluster. -See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support) to learn more. -{{% /alert %}} - -### 4. Install Karpenter - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}} - -{{% alert title="Warning" color="warning" %}} -Karpenter creates a mapping between CloudProvider machines and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: - -* `karpenter.sh/managed-by` -* `karpenter.sh/provisioner-name` -* `kubernetes.io/cluster/${CLUSTER_NAME}` - -Because Karpenter takes this dependency, any user that has the ability to Create/Delete these tags on CloudProvider machines will have the ability to orchestrate Karpenter to Create/Delete CloudProvider machines as a side effect. We recommend that you [enforce tag-based IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) on these tags against any EC2 instance resource (`i-*`) for any users that might have [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html)/[DeleteTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteTags.html) permissions but should not have [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html)/[TerminateInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html) permissions. -{{% /alert %}} - -### 5. Create Provisioner - -A single Karpenter provisioner is capable of handling many different pod -shapes. Karpenter makes scheduling and provisioning decisions based on pod -attributes such as labels and affinity. In other words, Karpenter eliminates -the need to manage many different node groups. - -Create a default provisioner using the command below. -This provisioner uses `securityGroupSelector` and `subnetSelector` to discover resources used to launch nodes. -We applied the tag `karpenter.sh/discovery` in the `eksctl` command above. -Depending how these resources are shared between clusters, you may need to use different tagging schemes. - -The `consolidation` value configures Karpenter to reduce cluster cost by removing and replacing nodes. As a result, consolidation will terminate any empty nodes on the cluster. This behavior can be disabled by leaving the value undefined or setting `consolidation.enabled` to `false`. Review the [provisioner CRD]({{}}) for more information. - -Review the [provisioner CRD]({{}}) for more information. For example, -`ttlSecondsUntilExpired` configures Karpenter to terminate nodes when a maximum age is reached. - -Note: This provisioner will create capacity as long as the sum of all created capacity is less than the specified limit. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step12-add-provisioner.sh" language="bash"%}} - -Karpenter is now active and ready to begin provisioning nodes. - -## First Use - -Create some pods using a deployment and watch Karpenter provision nodes in response. - -### Scale up deployment - -This deployment uses the [pause image](https://www.ianlewis.org/en/almighty-pause-container) and starts with zero replicas. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step13-automatic-node-provisioning.sh" language="bash"%}} - -### Scale down deployment - -Now, delete the deployment. After a short amount of time, Karpenter should terminate the empty nodes due to consolidation. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step14-deprovisioning.sh" language="bash"%}} - -## Add optional monitoring with Grafana - -This section describes optional ways to configure Karpenter to enhance its capabilities. -In particular, the following commands deploy a Prometheus and Grafana stack that is suitable for this guide but does not include persistent storage or other configurations that would be necessary for monitoring a production deployment of Karpenter. -This deployment includes two Karpenter dashboards that are automatically onboarded to Grafana. They provide a variety of visualization examples on Karpenter metrics. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh" language="bash"%}} - -The Grafana instance may be accessed using port forwarding. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh" language="bash"%}} - -The new stack has only one user, `admin`, and the password is stored in a secret. The following command will retrieve the password. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh" language="bash"%}} - -## Cleanup - -### Delete Karpenter nodes manually - -If you delete a node with kubectl, Karpenter will gracefully cordon, drain, -and shutdown the corresponding instance. Under the hood, Karpenter adds a -finalizer to the node object, which blocks deletion until all pods are -drained and the instance is terminated. Keep in mind, this only works for -nodes provisioned by Karpenter. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step15-delete-node.sh" language="bash"%}} - -### Delete the cluster -To avoid additional charges, remove the demo infrastructure from your AWS account. - -{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step16-cleanup.sh" language="bash"%}} diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step12-add-provisioner.sh b/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step12-add-provisioner.sh deleted file mode 100755 index 2d487c0449dc..000000000000 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step12-add-provisioner.sh +++ /dev/null @@ -1,28 +0,0 @@ -cat < controller-trust-policy.json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT#*//}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "${OIDC_ENDPOINT#*//}:aud": "sts.amazonaws.com", - "${OIDC_ENDPOINT#*//}:sub": "system:serviceaccount:karpenter:karpenter" - } - } - } - ] -} -EOF - -aws iam create-role --role-name KarpenterControllerRole-${CLUSTER_NAME} \ - --assume-role-policy-document file://controller-trust-policy.json - -cat << EOF > controller-policy.json -{ - "Statement": [ - { - "Action": [ - "ssm:GetParameter", - "ec2:DescribeImages", - "ec2:RunInstances", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DeleteLaunchTemplate", - "ec2:CreateTags", - "ec2:CreateLaunchTemplate", - "ec2:CreateFleet", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts" - ], - "Effect": "Allow", - "Resource": "*", - "Sid": "Karpenter" - }, - { - "Action": "ec2:TerminateInstances", - "Condition": { - "StringLike": { - "ec2:ResourceTag/karpenter.sh/provisioner-name": "*" - } - }, - "Effect": "Allow", - "Resource": "*", - "Sid": "ConditionalEC2Termination" - }, - { - "Effect": "Allow", - "Action": "iam:PassRole", - "Resource": "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}", - "Sid": "PassNodeIAMRole" - }, - { - "Effect": "Allow", - "Action": "eks:DescribeCluster", - "Resource": "arn:${AWS_PARTITION}:eks:${AWS_REGION}:${AWS_ACCOUNT_ID}:cluster/${CLUSTER_NAME}", - "Sid": "EKSClusterEndpointLookup" - } - ], - "Version": "2012-10-17" -} -EOF - -aws iam put-role-policy --role-name KarpenterControllerRole-${CLUSTER_NAME} \ - --policy-name KarpenterControllerPolicy-${CLUSTER_NAME} \ - --policy-document file://controller-policy.json diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step06-tag-subnets.sh b/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step06-tag-subnets.sh deleted file mode 100644 index de972ea2bddd..000000000000 --- a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step06-tag-subnets.sh +++ /dev/null @@ -1,6 +0,0 @@ -for NODEGROUP in $(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ - --query 'nodegroups' --output text); do aws ec2 create-tags \ - --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ - --resources $(aws eks describe-nodegroup --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name $NODEGROUP --query 'nodegroup.subnets' --output text ) -done diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step07-tag-security-groups.sh b/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step07-tag-security-groups.sh deleted file mode 100644 index 397e40904cee..000000000000 --- a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step07-tag-security-groups.sh +++ /dev/null @@ -1,22 +0,0 @@ -NODEGROUP=$(aws eks list-nodegroups --cluster-name ${CLUSTER_NAME} \ - --query 'nodegroups[0]' --output text) - -LAUNCH_TEMPLATE=$(aws eks describe-nodegroup --cluster-name ${CLUSTER_NAME} \ - --nodegroup-name ${NODEGROUP} --query 'nodegroup.launchTemplate.{id:id,version:version}' \ - --output text | tr -s "\t" ",") - -# If your EKS setup is configured to use only Cluster security group, then please execute - - -SECURITY_GROUPS=$(aws eks describe-cluster \ - --name ${CLUSTER_NAME} --query "cluster.resourcesVpcConfig.clusterSecurityGroupId" --output text) - -# If your setup uses the security groups in the Launch template of a managed node group, then : - -SECURITY_GROUPS=$(aws ec2 describe-launch-template-versions \ - --launch-template-id ${LAUNCH_TEMPLATE%,*} --versions ${LAUNCH_TEMPLATE#*,} \ - --query 'LaunchTemplateVersions[0].LaunchTemplateData.[NetworkInterfaces[0].Groups||SecurityGroupIds]' \ - --output text) - -aws ec2 create-tags \ - --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ - --resources ${SECURITY_GROUPS} diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step09-generate-chart.sh b/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step09-generate-chart.sh deleted file mode 100644 index f2bc603e0eeb..000000000000 --- a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step09-generate-chart.sh +++ /dev/null @@ -1,8 +0,0 @@ -helm template karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace karpenter \ - --set settings.aws.defaultInstanceProfile=KarpenterNodeInstanceProfile-${CLUSTER_NAME} \ - --set settings.aws.clusterName=${CLUSTER_NAME} \ - --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterControllerRole-${CLUSTER_NAME}" \ - --set controller.resources.requests.cpu=1 \ - --set controller.resources.requests.memory=1Gi \ - --set controller.resources.limits.cpu=1 \ - --set controller.resources.limits.memory=1Gi > karpenter.yaml diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step10-deploy.sh b/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step10-deploy.sh deleted file mode 100644 index d66941f52ed0..000000000000 --- a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step10-deploy.sh +++ /dev/null @@ -1,8 +0,0 @@ -kubectl create namespace karpenter -kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_provisioners.yaml -kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.k8s.aws_awsnodetemplates.yaml -kubectl create -f \ - https://raw.githubusercontent.com/aws/karpenter-provider-aws/${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_machines.yaml -kubectl apply -f karpenter.yaml diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step11-create-provisioner.sh b/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step11-create-provisioner.sh deleted file mode 100644 index 76642ce51122..000000000000 --- a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step11-create-provisioner.sh +++ /dev/null @@ -1,26 +0,0 @@ -cat < - Learn about upgrading Karpenter ---- - -Karpenter is a controller that runs in your cluster, but it is not tied to a specific Kubernetes version, as the Cluster Autoscaler is. -Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes and keep Karpenter up to date on bug fixes and new features. - -To make upgrading easier we aim to minimize introduction of breaking changes with the following: - -## Compatibility Matrix - -[comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) - -| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | -|------------|---------|---------|---------|---------|---------|--------| -| karpenter | 0.21.x+ | 0.21.x+ | 0.25.x+ | 0.28.x+ | 0.28.x+ | 0.31.x | - -[comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) - -{{% alert title="Note" color="warning" %}} -Karpenter currently does not support the following [new `topologySpreadConstraints` keys](https://kubernetes.io/blog/2023/04/17/fine-grained-pod-topology-spread-features-beta/), promoted to beta in Kubernetes 1.27: -- `matchLabelKeys` -- `nodeAffinityPolicy` -- `nodeTaintsPolicy` - -For more information on Karpenter's support for these keys, view [this tracking issue](https://github.com/aws/karpenter-core/issues/430). -{{% /alert %}} - -## Compatibility issues - -To make upgrading easier, we aim to minimize the introduction of breaking changes with the following components: - -* Provisioner API -* Helm Chart - -We try to maintain compatibility with: - -* The application itself -* The documentation of the application - -When we introduce a breaking change, we do so only as described in this document. - -Karpenter follows [Semantic Versioning 2.0.0](https://semver.org/) in its stable release versions, while in -major version zero (v0.y.z) [anything may change at any time](https://semver.org/#spec-item-4). -However, to further protect users during this phase we will only introduce breaking changes in minor releases (releases that increment y in x.y.z). -Note this does not mean every minor upgrade has a breaking change as we will also increment the -minor version when we release a new feature. - -Users should therefore check to see if there is a breaking change every time they are upgrading to a new minor version. - -### Custom Resource Definition (CRD) Upgrades - -Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are published: -* As an independent helm chart [karpenter-crd](https://gallery.ecr.aws/karpenter/karpenter-crd) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter-crd) that can be used by Helm to manage the lifecycle of these CRDs. - * To upgrade or install `karpenter-crd` run: - ``` - helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version vx.y.z --namespace karpenter --create-namespace - ``` - -{{% alert title="Note" color="warning" %}} -If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. -{{% /alert %}} - -* As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. - -In general, you can reapply the CRDs in the `crds` directory of the Karpenter helm chart: - -```shell -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.31.4/pkg/apis/crds/karpenter.sh_provisioners.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.31.4/pkg/apis/crds/karpenter.sh_machines.yaml -kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.31.4/pkg/apis/crds/karpenter.k8s.aws_awsnodetemplates.yaml -``` - -### How Do We Break Incompatibility? - -When there is a breaking change we will: - -* Increment the minor version when in major version 0 -* Add a permanent separate section named `upgrading to vx.y.z+` under [released upgrade notes](#released-upgrade-notes) - clearly explaining the breaking change and what needs to be done on the user side to ensure a safe upgrade -* Add the sentence “This is a breaking change, please refer to the above link for upgrade instructions” to the top of the release notes and in all our announcements - -### How Do We Find Incompatibilities? - -Besides the peer review process for all changes to the code base we also do the followings in order to find -incompatibilities: -* (To be implemented) To check the compatibility of the application, we will automate tests for installing, uninstalling, upgrading from an older version, and downgrading to an older version -* (To be implemented) To check the compatibility of the documentation with the application, we will turn the commands in our documentation into scripts that we can automatically run - -### Security Patches - -While we are in major version 0 we will not release security patches to older versions. -Rather we provide the patches in the latest versions. -When at major version 1 we will have an EOL (end of life) policy where we provide security patches -for a subset of older versions and deprecate the others. - -## Release Types - -Karpenter offers three types of releases. This section explains the purpose of each release type and how the images for each release type are tagged in our [public image repository](https://gallery.ecr.aws/karpenter). - -### Stable Releases - -Stable releases are the most reliable releases that are released with weekly cadence. Stable releases are our only recommended versions for production environments. -Sometimes we skip a stable release because we find instability or problems that need to be fixed before having a stable release. -Stable releases are tagged with Semantic Versioning. For example `v0.13.0`. - -### Release Candidates - -We consider having release candidates for major and important minor versions. Our release candidates are tagged like `vx.y.z-rc.0`, `vx.y.z-rc.1`. The release candidate will then graduate to `vx.y.z` as a normal stable release. -By adopting this practice we allow our users who are early adopters to test out new releases before they are available to the wider community, thereby providing us with early feedback resulting in more stable releases. - -### Snapshot Releases - -We release a snapshot release for every commit that gets merged into [`aws/karpenter-provider-aws`](https://www.github.com/aws/karpenter-provider-aws). This enables users to immediately try a new feature or fix right after it's merged rather than waiting days or weeks for release. - -Snapshot releases are not made available in the same public ECR repository as other release types, they are instead published to a separate private ECR repository. -Helm charts are published to `oci://{{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com/karpenter/snapshot/karpenter` and are tagged with the git commit hash prefixed by the Karpenter major version (e.g. `v0-fc17bfc89ebb30a3b102a86012b3e3992ec08adf`). -Anyone with an AWS account can pull from this repository, but must first authenticate: - -```bash -aws ecr get-login-password --region {{< param "snapshot_repo.region" >}} | docker login --username AWS --password-stdin {{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com -``` - -{{% alert title="Note" color="warning" %}} -Snapshot releases are suitable for testing, and troubleshooting but they should not be used in production environments. Snapshot releases are ephemeral and will be removed 90 days after they were published. -{{% /alert %}} - -## Released Upgrade Notes - -### Upgrading to v0.31.0+ - -* Karpenter moved its `securityContext` constraints from pod-wide to only applying to the Karpenter container exclusively. If you were previously relying on the pod-wide `securityContext` for your sidecar containers, you will now need to set these values explicitly in your sidecar container configuration. - -### Upgrading to v0.30.0+ - -* Karpenter will now [statically drift]({{}}) on both Provisioner and AWSNodeTemplate Fields. For Provisioner Static Drift, the `karpenter.sh/provisioner-hash` annotation must be present on both the Provisioner and Machine. For AWSNodeTemplate drift, the `karpenter.k8s.aws/nodetemplate-hash` annotation must be present on the AWSNodeTemplate and Machine. Karpenter will not add these annotations to pre-existing nodes, so each of these nodes will need to be recycled one time for the annotations to be added. -* Karpenter will now fail validation on AWSNodeTemplates and Provisioner `spec.provider` that have `amiSelectors`, `subnetSelectors`, or `securityGroupSelectors` set with a combination of id selectors (`aws-ids`, `aws::ids`) and other selectors. -* Karpenter now statically sets the `securityContext` at both the pod and container-levels and doesn't allow override values to be passed through the helm chart. This change was made to adhere to [Restricted Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted), which follows pod hardening best practices. - -{{% alert title="Note" color="primary" %}} -If you have sidecar containers configured to run alongside Karpenter that cannot tolerate the [pod-wide `securityContext` constraints](https://github.com/aws/karpenter/blob/v0.30.0/charts/karpenter/templates/deployment.yaml#L40), you will need to specify overrides to the sidecar `securityContext` in your deployment. -{{% /alert %}} - -### Upgrading to v0.29.0+ - -{{% alert title="Warning" color="warning" %}} -Karpenter `v0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `>v0.29.2` fixes this leak. -{{% /alert %}} - -* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `v0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `v0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. - -* Karpenter will now reconcile nodes that are drifted due to their Security Groups or their Subnets. If your AWSNodeTemplate's Security Groups differ from the Security Groups used for an instance, Karpenter will consider it drifted. If the Subnet used by an instance is not contained in the allowed list of Subnets for an AWSNodeTemplate, Karpenter will also consider it drifted. - * Since Karpenter uses tags for discovery of Subnets and SecurityGroups, check the [Threat Model]({{}}) to see how to manage this IAM Permission. - -### Upgrading to v0.28.0+ - -{{% alert title="Warning" color="warning" %}} -Karpenter `v0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `>v0.28.1` fixes this issue and is compatible with Kubernetes version 1.26+. -{{% /alert %}} - -* The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. -* The `aws.nodeNameConvention` setting is now removed from the [`karpenter-global-settings`]({{}}) ConfigMap. Because Karpenter is now driving its orchestration of capacity through Machines, it no longer needs to know the node name, making this setting obsolete. Karpenter ignores configuration that it doesn't recognize in the [`karpenter-global-settings`]({{}}) ConfigMap, so leaving the `aws.nodeNameConvention` in the ConfigMap will simply cause this setting to be ignored. -* Karpenter now defines a set of "restricted tags" which can't be overridden with custom tagging in the AWSNodeTemplate or in the [`karpenter-global-settings`]({{}}) ConfigMap. If you are currently using any of these tag overrides when tagging your instances, webhook validation will now fail. These tags include: - - * `karpenter.sh/managed-by` - * `karpenter.sh/provisioner-name` - * `kubernetes.io/cluster/${CLUSTER_NAME}` - -* The following metrics changed their meaning, based on the introduction of the Machine resource: - * `karpenter_nodes_terminated`: Use `karpenter_machines_terminated` if you are interested in the reason why a Karpenter machine was deleted. `karpenter_nodes_terminated` now only tracks the count of terminated nodes without any additional labels. - * `karpenter_nodes_created`: Use `karpenter_machines_created` if you are interested in the reason why a Karpenter machine was created. `karpenter_nodes_created` now only tracks the count of created nodes without any additional labels. - * `karpenter_deprovisioning_replacement_node_initialized_seconds`: This metric has been replaced in favor of `karpenter_deprovisioning_replacement_machine_initialized_seconds`. -* `v0.28.0` introduces the Machine CustomResource into the `karpenter.sh` API Group and requires this CustomResourceDefinition to run properly. Karpenter now orchestrates its CloudProvider capacity through these in-cluster Machine CustomResources. When performing a scheduling decision, Karpenter will create a Machine, resulting in launching CloudProvider capacity. The kubelet running on the new capacity will then register the node to the cluster shortly after launch. - * If you are using Helm to upgrade between versions of Karpenter, note that [Helm does not automate the process of upgrading or install the new CRDs into your cluster](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations). To install or upgrade the existing CRDs, follow the guidance under the [Custom Resource Definition (CRD) Upgrades]({{< relref "#custom-resource-definition-crd-upgrades" >}}) section of the upgrade guide. - * Karpenter will hydrate Machines on startup for existing capacity managed by Karpenter into the cluster. Existing capacity launched by an older version of Karpenter is discovered by finding CloudProvider capacity with the `karpenter.sh/provisioner-name` tag or the `karpenter.sh/provisioner-name` label on nodes. -* The metrics port for the Karpenter deployment was changed from 8080 to 8000. Users who scrape the pod directly for metrics rather than the service will need to adjust the commands they use to reference port 8000. Any users who scrape metrics from the service should be unaffected. - -{{% alert title="Warning" color="warning" %}} -Karpenter creates a mapping between CloudProvider machines and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: - -* `karpenter.sh/managed-by` -* `karpenter.sh/provisioner-name` -* `kubernetes.io/cluster/${CLUSTER_NAME}` - -Because Karpenter takes this dependency, any user that has the ability to Create/Delete these tags on CloudProvider machines will have the ability to orchestrate Karpenter to Create/Delete CloudProvider machines as a side effect. Check the [Threat Model]({{}}) to see how this might affect you, and ways to mitigate this. -{{% /alert %}} - -{{% alert title="Rolling Back" color="warning" %}} -If, after upgrading to `v0.28.0+`, a rollback to an older version of Karpenter needs to be performed, Karpenter will continue to function normally, though you will still have the Machine CustomResources on your cluster. You will need to manually delete the Machines and patch out the finalizers to fully complete the rollback. - -Karpenter marks CloudProvider capacity as "managed by" a Machine using the `karpenter-sh/managed-by` tag on the CloudProvider machine. It uses this tag to ensure that the Machine CustomResources in the cluster match the CloudProvider capacity managed by Karpenter. If these states don't match, Karpenter will garbage collect the capacity. Because of this, if performing an upgrade, followed by a rollback, followed by another upgrade to `v0.28.0+`, ensure you remove the `karpenter.sh/managed-by` tags from existing capacity; otherwise, Karpenter will deprovision the capacity without a Machine CR counterpart. -{{% /alert %}} - - -### Upgrading to v0.27.3+ -* The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `v0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. - -```console -kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh -``` - -### Upgrading to v0.27.0+ -* The Karpenter controller pods now deploy with `kubernetes.io/hostname` self anti-affinity by default. If you are running Karpenter in HA (high-availability) mode and you do not have enough nodes to match the number of pod replicas you are deploying with, you will need to scale-out your nodes for Karpenter. -* The following controller metrics changed and moved under the `controller_runtime` metrics namespace: - * `karpenter_metricscraper_...` - * `karpenter_deprovisioning_...` - * `karpenter_provisioner_...` - * `karpenter_interruption_...` -* The following controller metric names changed, affecting the `controller` label value under `controller_runtime_...` metrics. These metrics include: - * `podmetrics` -> `pod_metrics` - * `provisionermetrics` -> `provisioner_metrics` - * `metricscraper` -> `metric_scraper` - * `provisioning` -> `provisioner_trigger` - * `node-state` -> `node_state` - * `pod-state` -> `pod_state` - * `provisioner-state` -> `provisioner_state` -* The `karpenter_allocation_controller_scheduling_duration_seconds` metric name changed to `karpenter_provisioner_scheduling_duration_seconds` - -### Upgrading to v0.26.0+ -* The `karpenter.sh/do-not-evict` annotation no longer blocks node termination when running `kubectl delete node`. This annotation on pods will only block automatic deprovisioning that is considered "voluntary," that is, disruptions that can be avoided. Disruptions that Karpenter deems as "involuntary" and will ignore the `karpenter.sh/do-not-evict` annotation include spot interruption and manual deletion of the node. See [Disabling Deprovisioning]({{}}) for more details. -* Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your helm values and are using Karpenter's defaults, you will now need to set these values in your helm chart deployment. -* The `controller.image` value in the helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. - -### Upgrading to v0.25.0+ -* Cluster Endpoint can now be automatically discovered. If you are using Amazon Elastic Kubernetes Service (EKS), you can now omit the `clusterEndpoint` field in your configuration. In order to allow the resolving, you have to add the permission `eks:DescribeCluster` to the Karpenter Controller IAM role. - -### Upgrading to v0.24.0+ -* Settings are no longer updated dynamically while Karpenter is running. If you manually make a change to the [`karpenter-global-settings`]({{}}) ConfigMap, you will need to reload the containers by restarting the deployment with `kubectl rollout restart -n karpenter deploy/karpenter` -* Karpenter no longer filters out instance types internally. Previously, `g2` (not supported by the NVIDIA device plugin) and FPGA instance types were filtered. The only way to filter instance types now is to set requirements on your provisioner or pods using well-known node labels described [here]({{}}). If you are currently using overly broad requirements that allows all of the `g` instance-category, you will want to tighten the requirement, or add an instance-generation requirement. -* `aws.tags` in [`karpenter-global-settings`]({{}}) ConfigMap is now a top-level field and expects the value associated with this key to be a JSON object of string to string. This is change from previous versions where keys were given implicitly by providing the key-value pair `aws.tags.: value` in the ConfigMap. - -### Upgrading to v0.22.0+ -* Do not upgrade to this version unless you are on Kubernetes >= v1.21. Karpenter no longer supports Kubernetes v1.20, but now supports Kubernetes v1.25. This change is due to the v1 PDB API, which was introduced in K8s v1.20 and subsequent removal of the v1beta1 API in K8s v1.25. - -### Upgrading to v0.20.0+ -* Prior to v0.20.0, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. v0.20.0+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. - -### Upgrading to v0.19.0+ -* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the helm chart. If your Karpenter installation (helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. -* Karpenter now supports native interruption handling. If you were previously using Node Termination Handler for spot interruption handling and health events, you will need to remove the component from your cluster before enabling `aws.interruptionQueueName`. For more details on Karpenter's interruption handling, see the [Interruption Handling Docs]({{< ref "./concepts/deprovisioning/#interruption" >}}). For common questions on the migration process, see the [FAQ]({{< ref "./faq/#interruption-handling" >}}) -* Instance category defaults are now explicitly persisted in the Provisioner, rather than handled implicitly in memory. By default, Provisioners will limit instance category to c,m,r. If any instance type constraints are applied, it will override this default. If you have created Provisioners in the past with unconstrained instance type, family, or category, Karpenter will now more flexibly use instance types than before. If you would like to apply these constraints, they must be included in the Provisioner CRD. -* Karpenter CRD raw YAML URLs have migrated from `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/charts/karpenter/crds/...` to `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/pkg/apis/crds/...`. If you reference static Karpenter CRDs or rely on `kubectl replace -f` to apply these CRDs from their remote location, you will need to migrate to the new location. -* Pods without an ownerRef (also called "controllerless" or "naked" pods) will now be evicted by default during node termination and consolidation. Users can prevent controllerless pods from being voluntarily disrupted by applying the `karpenter.sh/do-not-evict: "true"` annotation to the pods in question. -* The following CLI options/environment variables are now removed and replaced in favor of pulling settings dynamically from the [`karpenter-global-settings`]({{}}) ConfigMap. See the [Settings docs]({{}}) for more details on configuring the new values in the ConfigMap. - - * `CLUSTER_NAME` -> `settings.aws.clusterName` - * `CLUSTER_ENDPOINT` -> `settings.aws.clusterEndpoint` - * `AWS_DEFAULT_INSTANCE_PROFILE` -> `settings.aws.defaultInstanceProfile` - * `AWS_ENABLE_POD_ENI` -> `settings.aws.enablePodENI` - * `AWS_ENI_LIMITED_POD_DENSITY` -> `settings.aws.enableENILimitedPodDensity` - * `AWS_ISOLATED_VPC` -> `settings.aws.isolatedVPC` - * `AWS_NODE_NAME_CONVENTION` -> `settings.aws.nodeNameConvention` - * `VM_MEMORY_OVERHEAD` -> `settings.aws.vmMemoryOverheadPercent` - -### Upgrading to v0.18.0+ -* v0.18.0 removes the `karpenter_consolidation_nodes_created` and `karpenter_consolidation_nodes_terminated` prometheus metrics in favor of the more generic `karpenter_nodes_created` and `karpenter_nodes_terminated` metrics. You can still see nodes created and terminated by consolidation by checking the `reason` label on the metrics. Check out all the metrics published by Karpenter [here]({{}}). - -### Upgrading to v0.17.0+ -Karpenter's Helm chart package is now stored in [Karpenter's OCI (Open Container Initiative) registry](https://gallery.ecr.aws/karpenter/karpenter). The Helm CLI supports the new format since [v3.8.0+](https://helm.sh/docs/topics/registries/). -With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter helm charts look at [Install Karpenter Helm Chart]({{< ref "./getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). - -Users who have scripted the installation or upgrading of Karpenter need to adjust their scripts with the following changes: -1. There is no longer a need to add the Karpenter helm repo to helm -2. The full URL of the Helm chart needs to be present when using the helm commands -3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart, `v0.17.0`. - -### Upgrading to v0.16.2+ -* v0.16.2 adds new kubeletConfiguration fields to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: -```bash -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.2/charts/karpenter/crds/karpenter.sh_provisioners.yaml -``` - -### Upgrading to v0.16.0+ -* v0.16.0 adds a new weight field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: -```bash -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml -``` - -### Upgrading to v0.15.0+ -* v0.15.0 adds a new consolidation field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: -```bash -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.15.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml -``` - -### Upgrading to v0.14.0+ -* v0.14.0 adds new fields to the `provisioners.karpenter.sh` v1alpha5 and `awsnodetemplates.karpenter.k8s.aws` v1alpha1 CRDs. The CRDs will need to be updated to use the new parameters: - -```bash -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml - -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml -``` - -* v0.14.0 changes the way Karpenter discovers its dynamically generated AWS launch templates to use a tag rather than a Name scheme. The previous name scheme was `Karpenter-${CLUSTER_NAME}-*` which could collide with user created launch templates that Karpenter should not manage. The new scheme uses a tag on the launch template `karpenter.k8s.aws/cluster: ${CLUSTER_NAME}`. As a result, Karpenter will not clean-up dynamically generated launch templates using the old name scheme. You can manually clean these up with the following commands: - -```bash -## Find launch templates that match the naming pattern and you do not want to keep -aws ec2 describe-launch-templates --filters="Name=launch-template-name,Values=Karpenter-${CLUSTER_NAME}-*" - -## Delete launch template(s) that match the name but do not have the "karpenter.k8s.aws/cluster" tag -aws ec2 delete-launch-template --launch-template-id -``` - -* v0.14.0 introduces additional instance type filtering if there are no `node.kubernetes.io/instance-type` or `karpenter.k8s.aws/instance-family` or `karpenter.k8s.aws/instance-category` requirements that restrict instance types specified on the provisioner. This prevents Karpenter from launching bare metal and some older non-current generation instance types unless the provisioner has been explicitly configured to allow them. If you specify an instance type or family requirement that supplies a list of instance-types or families, that list will be used regardless of filtering. The filtering can also be completely eliminated by adding an `Exists` requirement for instance type or family. -```yaml - - key: node.kubernetes.io/instance-type - operator: Exists -``` - -* v0.14.0 introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started -* v0.14.0 adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. - -* v0.14.0 deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. - -### Upgrading to v0.13.0+ -* v0.13.0 introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. v0.13.0 also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. Read the [UserData documentation here](../aws/operating-systems) to get started. - - If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: - 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. - 2. `kubectl delete crd awsnodetemplate` - 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.13.2/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` - 4. Perform the Karpenter upgrade to v0.13.x, which will install the new `awsnodetemplates` CRD. - 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. -* v0.13.0 also adds EC2/spot price fetching to Karpenter to allow making more accurate decisions regarding node deployments. Our getting started guide documents this, but if you are upgrading Karpenter you will need to modify your Karpenter controller policy to add the `pricing:GetProducts` and `ec2:DescribeSpotPriceHistory` permissions. - - -### Upgrading to v0.12.0+ -* v0.12.0 adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. -* If you are upgrading from v0.10.1 - v0.11.1, a new CRD `awsnodetemplate` was added. In v0.12.0, this crd was renamed to `awsnodetemplates`. Since helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: - 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. - 2. `kubectl delete crd awsnodetemplate` - 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.12.1/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` - 4. Perform the Karpenter upgrade to v0.12.x, which will install the new `awsnodetemplates` CRD. - 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. - -### Upgrading to v0.11.0+ - -v0.11.0 changes the way that the `vpc.amazonaws.com/pod-eni` resource is reported. Instead of being reported for all nodes that could support the resources regardless of if the cluster is configured to support it, it is now controlled by a command line flag or environment variable. The parameter defaults to false and must be set if your cluster uses [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). This can be enabled by setting the environment variable `AWS_ENABLE_POD_ENI` to true via the helm value `controller.env`. - -Other extended resources must be registered on nodes by their respective device plugins which are typically installed as DaemonSets (e.g. the `nvidia.com/gpu` resource will be registered by the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). Previously, Karpenter would register these resources on nodes at creation and they would be zeroed out by `kubelet` at startup. By allowing the device plugins to register the resources, pods will not bind to the nodes before any device plugin initialization has occurred. - -v0.11.0 adds a `providerRef` field in the Provisioner CRD. To use this new field you will need to replace the Provisioner CRD manually: - -```shell -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.11.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml -``` - -### Upgrading to v0.10.0+ - -v0.10.0 adds a new field, `startupTaints` to the provisioner spec. Standard Helm upgrades [do not upgrade CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) so the field will not be available unless the CRD is manually updated. This can be performed prior to the standard upgrade by applying the new CRD manually: - -```shell -kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.10.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml -``` - -📝 If you don't perform this manual CRD update, Karpenter will work correctly except for rejecting the creation/update of provisioners that use `startupTaints`. - -### Upgrading to v0.6.2+ - -If using Helm, the variable names have changed for the cluster's name and endpoint. You may need to update any configuration -that sets the old variable names. - -- `controller.clusterName` is now `clusterName` -- `controller.clusterEndpoint` is now `clusterEndpoint` diff --git a/website/content/en/v0.31/_index.md b/website/content/en/v0.35/_index.md similarity index 81% rename from website/content/en/v0.31/_index.md rename to website/content/en/v0.35/_index.md index 93ddf9368a25..61ab45550ca3 100755 --- a/website/content/en/v0.31/_index.md +++ b/website/content/en/v0.35/_index.md @@ -1,4 +1,3 @@ - --- title: "Documentation" linkTitle: "Docs" @@ -8,23 +7,23 @@ cascade: tags: - preview --- -Karpenter is an open-source node provisioning project built for Kubernetes. +Karpenter is an open-source node lifecycle management project built for Kubernetes. Adding Karpenter to a Kubernetes cluster can dramatically improve the efficiency and cost of running workloads on that cluster. Karpenter works by: * **Watching** for pods that the Kubernetes scheduler has marked as unschedulable * **Evaluating** scheduling constraints (resource requests, nodeselectors, affinities, tolerations, and topology spread constraints) requested by the pods * **Provisioning** nodes that meet the requirements of the pods -* **Removing** the nodes when the nodes are no longer needed +* **Disrupting** the nodes when the nodes are no longer needed As someone using Karpenter, once your Kubernetes cluster and the Karpenter controller are up and running (see [Getting Started]({{}})), you can: -* **Set up provisioners**: By applying a provisioner to Karpenter, you can configure constraints on node provisioning and set timeout values for node expiry or Kubelet configuration values. -Provisioner-level constraints related to Kubernetes and your cloud provider (AWS, for example) include: +* **Set up NodePools**: By applying a NodePool to Karpenter, you can configure constraints on node provisioning and set values for node expiry, node consolidation, or Kubelet configuration values. + NodePool-level constraints related to Kubernetes and your cloud provider (AWS, for example) include: - Taints (`taints`): Identify taints to add to provisioned nodes. If a pod doesn't have a matching toleration for the taint, the effect set by the taint occurs (NoSchedule, PreferNoSchedule, or NoExecute). See Kubernetes [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for details. - Labels (`labels`): Apply arbitrary key-value pairs to nodes that can be matched by pods. - - Requirements (`requirements`): Set acceptable (`In`) and unacceptable (`Out`) Kubernetes and Karpenter values for node provisioning based on [Well-Known Labels](https://kubernetes.io/docs/reference/labels-annotations-taints/) and [cloud-specific settings]({{}}). These can include [instance types](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type), [zones](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone), [computer architecture](https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-arch), and [capacity type]({{}}) (such as AWS spot or on-demand). + - Requirements (`requirements`): Set acceptable (`In`) and unacceptable (`Out`) Kubernetes and Karpenter values for node provisioning based on [Well-Known Labels](https://kubernetes.io/docs/reference/labels-annotations-taints/) and [cloud-specific settings]({{}}). These can include [instance types](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type), [zones](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone), [computer architecture](https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-arch), and [capacity type]({{}}) (such as AWS spot or on-demand). - Limits (`limits`): Lets you set limits on the total CPU and Memory that can be used by the cluster, effectively stopping further node provisioning when those limits have been reached. * **Deploy workloads**: When deploying workloads, you can request that scheduling constraints be met to direct which nodes Karpenter provisions for those workloads. Use any of the following Pod spec constraints when you deploy pods: diff --git a/website/content/en/v0.35/concepts/_index.md b/website/content/en/v0.35/concepts/_index.md new file mode 100755 index 000000000000..67d573135369 --- /dev/null +++ b/website/content/en/v0.35/concepts/_index.md @@ -0,0 +1,113 @@ +--- +title: "Concepts" +linkTitle: "Concepts" +weight: 20 +description: > + Understand key concepts of Karpenter +--- + +Users fall under two basic roles: [Kubernetes cluster administrators]({{}}) and [application developers]({{}}). This document describes Karpenter concepts through the lens of those two types of users. + +## Cluster Administrator + +As a Kubernetes cluster administrator, you can engage with Karpenter to: + +* Install Karpenter +* Configure NodePools to set constraints and other features for managing nodes +* Disrupting nodes + +Concepts associated with this role are described below. + + +### Installing Karpenter + +Karpenter is designed to run on a node in your Kubernetes cluster. As part of the installation process, you need credentials from the underlying cloud provider to allow nodes to be started up and added to the cluster as they are needed. + +[Getting Started with Karpenter]({{}}) describes the process of installing Karpenter. Because requests to add and delete nodes and schedule pods are made through Kubernetes, AWS IAM Roles for Service Accounts (IRSA) are needed by your Kubernetes cluster to make privileged requests to AWS. For example, Karpenter uses AWS IRSA roles to grant the permissions needed to describe EC2 instance types and create EC2 instances. + +Once privileges are in place, Karpenter is deployed with a Helm chart. + +### Configuring NodePools + +Karpenter's job is to add nodes to handle unschedulable pods, schedule pods on those nodes, and remove the nodes when they are not needed. To configure Karpenter, you create [NodePools]({{}}) that define how Karpenter manages unschedulable pods and configures nodes. You will also define behaviors for your NodePools, capturing details like how Karpenter handles disruption of nodes and setting limits and weights for each NodePool + +Here are some things to know about Karpenter's NodePools: + +* **Unschedulable pods**: Karpenter only attempts to schedule pods that have a status condition `Unschedulable=True`, which the kube scheduler sets when it fails to schedule the pod to existing capacity. + +* [**Defining Constraints**]({{}}): Karpenter defines a Custom Resource called a NodePool to specify configuration. Each NodePool manages a distinct set of nodes, but pods can be scheduled to any NodePool that supports its scheduling constraints. A NodePool contains constraints that impact the nodes that can be provisioned and attributes of those nodes. See the [NodePools Documentation]({{}}) docs for a description of configuration and NodePool examples. + +* [**Defining Disruption**]({{}}): A NodePool can also include values to indicate when nodes should be disrupted. This includes configuration around concepts like [Consolidation]({{}}), [Drift]({{}}), and [Expiration]({{}}). + +* **Well-known labels**: The NodePool can use well-known Kubernetes labels to allow pods to request only certain instance types, architectures, operating systems, or other attributes when creating nodes. See [Well-Known Labels, Annotations and Taints](https://kubernetes.io/docs/reference/labels-annotations-taints/) for details. Keep in mind that only a subset of these labels are supported in Karpenter, as described later. + +* **Multiple NodePools**: Multiple NodePools can be configured on the same cluster. For example, you might want to configure different teams on the same cluster to run on completely separate capacity. One team could run on nodes using BottleRocket, while another uses EKSOptimizedAMI. + +Although most use cases are addressed with a single NodePool for multiple teams, multiple NodePools are useful to isolate nodes for billing, use different node constraints (such as no GPUs for a team), or use different disruption settings. + +### Disrupting nodes + +Karpenter deletes nodes when they are no longer needed. + +* [**Finalizer**]({{}}): Karpenter places a finalizer bit on each node it creates. +When a request comes in to delete one of those nodes (such as a TTL or a manual `kubectl delete node`), Karpenter will cordon the node, drain all the pods, terminate the EC2 instance, and delete the node object. +Karpenter handles all clean-up work needed to properly delete the node. +* [**Expiration**]({{}}): Karpenter will mark nodes as expired and disrupt them after they have lived a set number of seconds, based on the NodePool's `spec.disruption.expireAfter` value. You can use node expiry to periodically recycle nodes due to security concerns. +* [**Consolidation**]({{}}): Karpenter works to actively reduce cluster cost by identifying when: + * Nodes can be removed because the node is empty + * Nodes can be removed as their workloads will run on other nodes in the cluster. + * Nodes can be replaced with cheaper variants due to a change in the workloads. +* [**Drift**]({{}}): Karpenter will mark nodes as drifted and disrupt nodes that have drifted from their desired specification. See [Drift]({{}}) to see which fields are considered. +* [**Interruption**]({{}}): Karpenter will watch for upcoming interruption events that could affect your nodes (health events, spot interruption, etc.) and will cordon, drain, and terminate the node(s) ahead of the event to reduce workload disruption. + +For more details on how Karpenter deletes nodes, see the [Disruption Documentation]({{}}). + +### Scheduling + +Karpenter launches nodes in response to pods that the Kubernetes scheduler has marked unschedulable. After solving scheduling constraints and launching capacity, Karpenter launches a machine in your chosen cloud provider. + +Once Karpenter brings up a node, that node is available for the Kubernetes scheduler to schedule pods on it as well. + +#### Constraints + +The concept of layered constraints is key to using Karpenter. With no constraints defined in NodePools and none requested from pods being deployed, Karpenter chooses from the entire universe of features available to your cloud provider. Nodes can be created using any instance type and run in any zones. + +An application developer can tighten the constraints defined in a NodePool by the cluster administrator by defining additional scheduling constraints in their pod spec. Refer to the description of Karpenter constraints in the Application Developer section below for details. + +### Cloud Provider + +Karpenter makes requests to provision new nodes to the associated cloud provider. The first supported cloud provider is AWS, although Karpenter is designed to work with other cloud providers. Separating Kubernetes and AWS-specific settings allows Karpenter a clean path to integrating with other cloud providers. + +While using Kubernetes well-known labels, the NodePool can set some values that are specific to the cloud provider. For example, to include a certain instance type, you could use the Kubernetes label `node.kubernetes.io/instance-type`, but set its value to an AWS instance type (such as `m5.large` or `m5.2xlarge`). + +### Kubernetes Cluster Autoscaler + +Like Karpenter, [Kubernetes Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) is designed to add nodes when requests come in to run pods that cannot be met by current capacity. Cluster autoscaler is part of the Kubernetes project, with implementations by most major Kubernetes cloud providers. By taking a fresh look at provisioning, Karpenter offers the following improvements: + +* **Designed to handle the full flexibility of the cloud**: Karpenter has the ability to efficiently address the full range of instance types available through AWS. Cluster autoscaler was not originally built with the flexibility to handle hundreds of instance types, zones, and purchase options. + +* **Quick node provisioning**: Karpenter manages each instance directly, without use of additional orchestration mechanisms like node groups. This enables it to retry in milliseconds instead of minutes when capacity is unavailable. It also allows Karpenter to leverage diverse instance types, availability zones, and purchase options without the creation of hundreds of node groups. + +## Application Developer + +As someone deploying pods that might be evaluated by Karpenter, you should know how to request the properties that your pods need of its compute resources. Karpenter's job is to efficiently assess and choose compute assets based on requests from pod deployments. These can include basic Kubernetes features or features that are specific to the cloud provider (such as AWS). + +Layered *constraints* are applied when a pod makes requests for compute resources that cannot be met by current capacity. A pod can specify `nodeAffinity` (to run in a particular zone or instance type) or a `topologySpreadConstraints` spread (to cause a set of pods to be balanced across multiple nodes). +The pod can specify a `nodeSelector` to run only on nodes with a particular label and `resource.requests` to ensure that the node has enough available memory. + +The Kubernetes scheduler tries to match those constraints with available nodes. If the pod is unschedulable, Karpenter creates compute resources that match its needs. When Karpenter tries to provision a node, it analyzes scheduling constraints before choosing the node to create. + +As long as the requests are not outside the NodePool's constraints, Karpenter will look to best match the request, comparing the same well-known labels defined by the pod's scheduling constraints. Note that if the constraints are such that a match is not possible, the pod will remain unscheduled. + +So, what constraints can you use as an application developer deploying pods that could be managed by Karpenter? + +Kubernetes features that Karpenter supports for scheduling pods include nodeAffinity and [nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector). +It also supports [PodDisruptionBudget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/), [topologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/), and [inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity). + +From the Kubernetes [Well-Known Labels, Annotations and Taints](https://kubernetes.io/docs/reference/labels-annotations-taints/) page, you can see a full list of Kubernetes labels, annotations and taints that determine scheduling. Those that are implemented in Karpenter include: + +* **kubernetes.io/arch**: For example, kubernetes.io/arch=amd64 +* **node.kubernetes.io/instance-type**: For example, node.kubernetes.io/instance-type=m3.medium +* **topology.kubernetes.io/zone**: For example, topology.kubernetes.io/zone=us-east-1c + +For more on how, as a developer, you can add constraints to your pod deployment, see [Scheduling](./scheduling/) for details. diff --git a/website/content/en/v0.35/concepts/disruption.md b/website/content/en/v0.35/concepts/disruption.md new file mode 100644 index 000000000000..36e78543e8e8 --- /dev/null +++ b/website/content/en/v0.35/concepts/disruption.md @@ -0,0 +1,306 @@ +--- +title: "Disruption" +linkTitle: "Disruption" +weight: 4 +description: > + Understand different ways Karpenter disrupts nodes +--- + +## Control Flow + +Karpenter sets a Kubernetes [finalizer](https://kubernetes.io/docs/concepts/overview/working-with-objects/finalizers/) on each node and node claim it provisions. +The finalizer blocks deletion of the node object while the Termination Controller taints and drains the node, before removing the underlying NodeClaim. Disruption is triggered by the Disruption Controller, by the user through manual disruption, or through an external system that sends a delete request to the node object. + +### Disruption Controller + +Karpenter automatically discovers disruptable nodes and spins up replacements when needed. Karpenter disrupts nodes by executing one [automatic method](#automatic-methods) at a time, in order of Expiration, Drift, and then Consolidation. Each method varies slightly, but they all follow the standard disruption process. Karpenter uses [disruption budgets]({{}}) to control the speed of disruption. +1. Identify a list of prioritized candidates for the disruption method. + * If there are [pods that cannot be evicted](#pod-eviction) on the node, Karpenter will ignore the node and try disrupting it later. + * If there are no disruptable nodes, continue to the next disruption method. +2. For each disruptable node: + 1. Check if disrupting it would violate its NodePool's disruption budget. + 2. Execute a scheduling simulation with the pods on the node to find if any replacement nodes are needed. +3. Add the `karpenter.sh/disruption:NoSchedule` taint to the node(s) to prevent pods from scheduling to it. +4. Pre-spin any replacement nodes needed as calculated in Step (2), and wait for them to become ready. + * If a replacement node fails to initialize, un-taint the node(s), and restart from Step (1), starting at the first disruption method again. +5. Delete the node(s) and wait for the Termination Controller to gracefully shutdown the node(s). +6. Once the Termination Controller terminates the node, go back to Step (1), starting at the first disruption method again. + +### Termination Controller + +When a Karpenter node is deleted, the Karpenter finalizer will block deletion and the APIServer will set the `DeletionTimestamp` on the node, allowing Karpenter to gracefully shutdown the node, modeled after [Kubernetes Graceful Node Shutdown](https://kubernetes.io/docs/concepts/architecture/nodes/#graceful-node-shutdown). Karpenter's graceful shutdown process will: +1. Add the `karpenter.sh/disruption=disrupting:NoSchedule` taint to the node to prevent pods from scheduling to it. +2. Begin evicting the pods on the node with the [Kubernetes Eviction API](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/) to respect PDBs, while ignoring all [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/), pods tolerating the `karpenter.sh/disruption=disrupting:NoSchedule` taint, and succeeded/failed pods. Wait for the node to be fully drained before proceeding to Step (3). + * While waiting, if the underlying NodeClaim for the node no longer exists, remove the finalizer to allow the APIServer to delete the node, completing termination. +3. Terminate the NodeClaim in the Cloud Provider. +4. Remove the finalizer from the node to allow the APIServer to delete the node, completing termination. + +## Manual Methods +* **Node Deletion**: You can use `kubectl` to manually remove a single Karpenter node or nodeclaim. Since each Karpenter node is owned by a NodeClaim, deleting either the node or the nodeclaim will cause cascade deletion of the other: + + ```bash + # Delete a specific nodeclaim + kubectl delete nodeclaim $NODECLAIM_NAME + + # Delete a specific node + kubectl delete node $NODE_NAME + + # Delete all nodeclaims + kubectl delete nodeclaims --all + + # Delete all nodes owned by any nodepool + kubectl delete nodes -l karpenter.sh/nodepool + + # Delete all nodeclaims owned by a specific nodepoolXS + kubectl delete nodeclaims -l karpenter.sh/nodepool=$NODEPOOL_NAME + ``` +* **NodePool Deletion**: NodeClaims are owned by the NodePool through an [owner reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/#owner-references-in-object-specifications) that launched them. Karpenter will gracefully terminate nodes through cascading deletion when the owning NodePool is deleted. + +{{% alert title="Note" color="primary" %}} +By adding the finalizer, Karpenter improves the default Kubernetes process of node deletion. +When you run `kubectl delete node` on a node without a finalizer, the node is deleted without triggering the finalization logic. The instance will continue running in EC2, even though there is no longer a node object for it. The kubelet isn’t watching for its own existence, so if a node is deleted, the kubelet doesn’t terminate itself. All the pod objects get deleted by a garbage collection process later, because the pods’ node is gone. +{{% /alert %}} + +## Automated Methods + +Automated methods can be rate limited through [NodePool Disruption Budgets]({{}}) + +* **Expiration**: Karpenter will mark nodes as expired and disrupt them after they have lived a set number of seconds, based on the NodePool's `spec.disruption.expireAfter` value. You can use node expiry to periodically recycle nodes due to security concerns. +* [**Consolidation**]({{}}): Karpenter works to actively reduce cluster cost by identifying when: + * Nodes can be removed because the node is empty + * Nodes can be removed as their workloads will run on other nodes in the cluster. + * Nodes can be replaced with lower priced variants due to a change in the workloads. +* [**Drift**]({{}}): Karpenter will mark nodes as drifted and disrupt nodes that have drifted from their desired specification. See [Drift]({{}}) to see which fields are considered. +* [**Interruption**]({{}}): Karpenter will watch for upcoming interruption events that could affect your nodes (health events, spot interruption, etc.) and will taint, drain, and terminate the node(s) ahead of the event to reduce workload disruption. + +{{% alert title="Defaults" color="secondary" %}} +Disruption is configured through the NodePool's disruption block by the `consolidationPolicy`, `expireAfter` and `consolidateAfter` fields. Karpenter will configure these fields with the following values by default if they are not set: + +```yaml +spec: + disruption: + consolidationPolicy: WhenUnderutilized + expireAfter: 720h +``` +{{% /alert %}} + +### Consolidation + +Karpenter has two mechanisms for cluster consolidation: +1. **Deletion** - A node is eligible for deletion if all of its pods can run on free capacity of other nodes in the cluster. +2. **Replace** - A node can be replaced if all of its pods can run on a combination of free capacity of other nodes in the cluster and a single lower price replacement node. + +Consolidation has three mechanisms that are performed in order to attempt to identify a consolidation action: +1. **Empty Node Consolidation** - Delete any entirely empty nodes in parallel +2. **Multi Node Consolidation** - Try to delete two or more nodes in parallel, possibly launching a single replacement whose price is lower than that of all nodes being removed +3. **Single Node Consolidation** - Try to delete any single node, possibly launching a single replacement whose price is lower than that of the node being removed + +It's impractical to examine all possible consolidation options for multi-node consolidation, so Karpenter uses a heuristic to identify a likely set of nodes that can be consolidated. For single-node consolidation we consider each node in the cluster individually. + +When there are multiple nodes that could be potentially deleted or replaced, Karpenter chooses to consolidate the node that overall disrupts your workloads the least by preferring to terminate: + +* Nodes running fewer pods +* Nodes that will expire soon +* Nodes with lower priority pods + +If consolidation is enabled, Karpenter periodically reports events against nodes that indicate why the node can't be consolidated. These events can be used to investigate nodes that you expect to have been consolidated, but still remain in your cluster. + +```bash +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Unconsolidatable 66s karpenter pdb default/inflate-pdb prevents pod evictions + Normal Unconsolidatable 33s (x3 over 30m) karpenter can't replace with a lower-priced node +``` + +{{% alert title="Warning" color="warning" %}} +Using preferred anti-affinity and topology spreads can reduce the effectiveness of consolidation. At node launch, Karpenter attempts to satisfy affinity and topology spread preferences. In order to reduce node churn, consolidation must also attempt to satisfy these constraints to avoid immediately consolidating nodes after they launch. This means that consolidation may not disrupt nodes in order to avoid violating preferences, even if kube-scheduler can fit the host pods elsewhere. Karpenter reports these pods via logging to bring awareness to the possible issues they can cause (e.g. `pod default/inflate-anti-self-55894c5d8b-522jd has a preferred Anti-Affinity which can prevent consolidation`). +{{% /alert %}} + +#### Spot consolidation +For spot nodes, Karpenter has deletion consolidation enabled by default. If you would like to enable replacement with spot consolidation, you need to enable the feature through the [`SpotToSpotConsolidation` feature flag]({{}}). + +Lower priced spot instance types are selected with the [`price-capacity-optimized` strategy](https://aws.amazon.com/blogs/compute/introducing-price-capacity-optimized-allocation-strategy-for-ec2-spot-instances/). Sometimes, the lowest priced spot instance type is not launched due to the likelihood of interruption. As a result, Karpenter uses the number of available instance type options with a price lower than the currently launched spot instance as a heuristic for evaluating whether it should launch a replacement for the current spot node. + +We refer to the number of instances that Karpenter has within its launch decision as a launch's "instance type flexibility." When Karpenter is considering performing a spot-to-spot consolidation replacement, it will check whether replacing the instance type will lead to enough instance type flexibility in the subsequent launch request. As a result, we get the following properties when evaluating for consolidation: +1) We shouldn't continually consolidate down to the lowest priced spot instance which might have very high rates of interruption. +2) We launch with enough instance types that there’s high likelihood that our replacement instance has comparable availability to our current one. + +Karpenter requires a minimum instance type flexibility of 15 instance types when performing single node spot-to-spot consolidations (1 node to 1 node). It does not have the same instance type flexibility requirement for multi-node spot-to-spot consolidations (many nodes to 1 node) since doing so without requiring flexibility won't lead to "race to the bottom" scenarios. + + +### Drift +Drift handles changes to the NodePool/EC2NodeClass. For Drift, values in the NodePool/EC2NodeClass are reflected in the NodeClaimTemplateSpec/EC2NodeClassSpec in the same way that they’re set. A NodeClaim will be detected as drifted if the values in its owning NodePool/EC2NodeClass do not match the values in the NodeClaim. Similar to the upstream `deployment.spec.template` relationship to pods, Karpenter will annotate the owning NodePool and EC2NodeClass with a hash of the NodeClaimTemplateSpec to check for drift. Some special cases will be discovered either from Karpenter or through the CloudProvider interface, triggered by NodeClaim/Instance/NodePool/EC2NodeClass changes. + +#### Special Cases on Drift +In special cases, drift can correspond to multiple values and must be handled differently. Drift on resolved fields can create cases where drift occurs without changes to CRDs, or where CRD changes do not result in drift. For example, if a NodeClaim has `node.kubernetes.io/instance-type: m5.large`, and requirements change from `node.kubernetes.io/instance-type In [m5.large]` to `node.kubernetes.io/instance-type In [m5.large, m5.2xlarge]`, the NodeClaim will not be drifted because its value is still compatible with the new requirements. Conversely, if a NodeClaim is using a NodeClaim image `ami: ami-abc`, but a new image is published, Karpenter's `EC2NodeClass.spec.amiSelectorTerms` will discover that the new correct value is `ami: ami-xyz`, and detect the NodeClaim as drifted. + +##### NodePool +| Fields | +|----------------| +| spec.template.spec.requirements | + +##### EC2NodeClass +| Fields | +|-------------------------------| +| spec.subnetSelectorTerms | +| spec.securityGroupSelectorTerms | +| spec.amiSelectorTerms | + +#### Behavioral Fields +Behavioral Fields are treated as over-arching settings on the NodePool to dictate how Karpenter behaves. These fields don’t correspond to settings on the NodeClaim or instance. They’re set by the user to control Karpenter’s Provisioning and disruption logic. Since these don’t map to a desired state of NodeClaims, __behavioral fields are not considered for Drift__. + +##### NodePool +| Fields | +|---------------------| +| spec.weight | +| spec.limits | +| spec.disruption.* | + +Read the [Drift Design](https://github.com/aws/karpenter-core/blob/main/designs/drift.md) for more. + +To enable the drift feature flag, refer to the [Feature Gates]({{}}). + +Karpenter will add the `Drifted` status condition on NodeClaims if the NodeClaim is drifted from its owning NodePool. Karpenter will also remove the `Drifted` status condition if either: +1. The `Drift` feature gate is not enabled but the NodeClaim is drifted, Karpenter will remove the status condition. +2. The NodeClaim isn't drifted, but has the status condition, Karpenter will remove it. + +### Interruption + +If interruption-handling is enabled, Karpenter will watch for upcoming involuntary interruption events that would cause disruption to your workloads. These interruption events include: + +* Spot Interruption Warnings +* Scheduled Change Health Events (Maintenance Events) +* Instance Terminating Events +* Instance Stopping Events + +When Karpenter detects one of these events will occur to your nodes, it automatically taints, drains, and terminates the node(s) ahead of the interruption event to give the maximum amount of time for workload cleanup prior to compute disruption. This enables scenarios where the `terminationGracePeriod` for your workloads may be long or cleanup for your workloads is critical, and you want enough time to be able to gracefully clean-up your pods. + +For Spot interruptions, the NodePool will start a new node as soon as it sees the Spot interruption warning. Spot interruptions have a __2 minute notice__ before Amazon EC2 reclaims the instance. Karpenter's average node startup time means that, generally, there is sufficient time for the new node to become ready and to move the pods to the new node before the NodeClaim is reclaimed. + +{{% alert title="Note" color="primary" %}} +Karpenter publishes Kubernetes events to the node for all events listed above in addition to [__Spot Rebalance Recommendations__](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/rebalance-recommendations.html). Karpenter does not currently support taint, drain, and terminate logic for Spot Rebalance Recommendations. + +If you require handling for Spot Rebalance Recommendations, you can use the [AWS Node Termination Handler (NTH)](https://github.com/aws/aws-node-termination-handler) alongside Karpenter; however, note that the AWS Node Termination Handler cordons and drains nodes on rebalance recommendations, potentially causing more node churn in the cluster than with interruptions alone. Further information can be found in the [Troubleshooting Guide]({{< ref "../troubleshooting#aws-node-termination-handler-nth-interactions" >}}). +{{% /alert %}} + +Karpenter enables this feature by watching an SQS queue which receives critical events from AWS services which may affect your nodes. Karpenter requires that an SQS queue be provisioned and EventBridge rules and targets be added that forward interruption events from AWS services to the SQS queue. Karpenter provides details for provisioning this infrastructure in the [CloudFormation template in the Getting Started Guide](../../getting-started/getting-started-with-karpenter/#create-the-karpenter-infrastructure-and-iam-roles). + +To enable interruption handling, configure the `--interruption-queue-name` CLI argument with the name of the interruption queue provisioned to handle interruption events. + +## Controls + +### Disruption Budgets + +You can rate limit Karpenter's disruption through the NodePool's `spec.disruption.budgets`. If undefined, Karpenter will default to one budget with `nodes: 10%`. Budgets will consider nodes that are actively being deleted for any reason, and will only block Karpenter from disrupting nodes voluntarily through expiration, drift, emptiness, and consolidation. + +#### Nodes +When calculating if a budget will block nodes from disruption, Karpenter lists the total number of nodes owned by a NodePool, subtracting out the nodes owned by that NodePool that are currently being deleted and nodes that are NotReady. If the number of nodes being deleted by Karpenter or any other processes is greater than the number of allowed disruptions, disruption for this node will not proceed. + +If the budget is configured with a percentage value, such as `20%`, Karpenter will calculate the number of allowed disruptions as `allowed_disruptions = roundup(total * percentage) - total_deleting - total_notready`. If otherwise defined as a non-percentage value, Karpenter will simply subtract the number of nodes from the total `(total - non_percentage_value) - total_deleting - total_notready`. For multiple budgets in a NodePool, Karpenter will take the minimum value (most restrictive) of each of the budgets. + +For example, the following NodePool with three budgets defines the following requirements: +- The first budget will only allow 20% of nodes owned by that NodePool to be disrupted. For instance, if there were 19 nodes owned by the NodePool, 4 disruptions would be allowed, rounding up from `19 * .2 = 3.8`. +- The second budget acts as a ceiling to the previous budget, only allowing 5 disruptions when there are more than 25 nodes. +- The last budget only blocks disruptions during the first 10 minutes of the day, where 0 disruptions are allowed. + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + disruption: + consolidationPolicy: WhenUnderutilized + expireAfter: 720h # 30 * 24h = 720h + budgets: + - nodes: "20%" + - nodes: "5" + - nodes: "0" + schedule: "@daily" + duration: 10m +``` + +#### Schedule +Schedule is a cronjob schedule. Generally, the cron syntax is five space-delimited values with options below, with additional special macros like `@yearly`, `@monthly`, `@weekly`, `@daily`, `@hourly`. +Follow the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#writing-a-cronjob-spec) for more information on how to follow the cron syntax. + +```bash +# ┌───────────── minute (0 - 59) +# │ ┌───────────── hour (0 - 23) +# │ │ ┌───────────── day of the month (1 - 31) +# │ │ │ ┌───────────── month (1 - 12) +# │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday; +# │ │ │ │ │ 7 is also Sunday on some systems) +# │ │ │ │ │ OR sun, mon, tue, wed, thu, fri, sat +# │ │ │ │ │ +# * * * * * +``` + +{{% alert title="Note" color="primary" %}} +Timezones are not supported. Most images default to UTC, but it is best practice to ensure this is the case when considering how to define your budgets. +{{% /alert %}} + +#### Duration +Duration allows compound durations with minutes and hours values such as `10h5m` or `30m` or `160h`. Since cron syntax does not accept denominations smaller than minutes, users can only define minutes or hours. + +{{% alert title="Note" color="primary" %}} +Duration and Schedule must be defined together. When omitted, the budget is always active. When defined, the schedule determines a starting point where the budget will begin being enforced, and the duration determines how long from that starting point the budget will be enforced. +{{% /alert %}} + +### Pod-Level Controls + +You can block Karpenter from voluntarily choosing to disrupt certain pods by setting the `karpenter.sh/do-not-disrupt: "true"` annotation on the pod. This is useful for pods that you want to run from start to finish without disruption. By opting pods out of this disruption, you are telling Karpenter that it should not voluntarily remove a node containing this pod. + +Examples of pods that you might want to opt-out of disruption include an interactive game that you don't want to interrupt or a long batch job (such as you might have with machine learning) that would need to start over if it were interrupted. + +```yaml +apiVersion: apps/v1 +kind: Deployment +spec: + template: + metadata: + annotations: + karpenter.sh/do-not-disrupt: "true" +``` + +{{% alert title="Note" color="primary" %}} +This annotation will be ignored for [terminating pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) and [terminal pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) (Failed/Succeeded). +{{% /alert %}} + +Examples of voluntary node removal that will be prevented by this annotation include: +- [Consolidation]({{}}) +- [Drift]({{}}) +- Expiration + +{{% alert title="Note" color="primary" %}} +Voluntary node removal does not include [Interruption]({{}}) or manual deletion initiated through `kubectl delete node`. Both of these are considered involuntary events, since node removal cannot be delayed. +{{% /alert %}} + +### Node-Level Controls + +You can block Karpenter from voluntarily choosing to disrupt certain nodes by setting the `karpenter.sh/do-not-disrupt: "true"` annotation on the node. This will prevent disruption actions on the node. + +```yaml +apiVersion: v1 +kind: Node +metadata: + annotations: + karpenter.sh/do-not-disrupt: "true" +``` + +#### Example: Disable Disruption on a NodePool + +NodePool `.spec.annotations` allow you to set annotations that will be applied to all nodes launched by this NodePool. By setting the annotation `karpenter.sh/do-not-disrupt: "true"` on the NodePool, you will selectively prevent all nodes launched by this NodePool from being considered in disruption actions. + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + template: + metadata: + annotations: # will be applied to all nodes + karpenter.sh/do-not-disrupt: "true" +``` diff --git a/website/content/en/v0.35/concepts/nodeclasses.md b/website/content/en/v0.35/concepts/nodeclasses.md new file mode 100644 index 000000000000..e95f576cef11 --- /dev/null +++ b/website/content/en/v0.35/concepts/nodeclasses.md @@ -0,0 +1,1243 @@ + --- +title: "NodeClasses" +linkTitle: "NodeClasses" +weight: 2 +description: > + Configure AWS-specific settings with EC2NodeClasses +--- + +Node Classes enable configuration of AWS specific settings. +Each NodePool must reference an EC2NodeClass using `spec.template.spec.nodeClassRef`. +Multiple NodePools may point to the same EC2NodeClass. + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + template: + spec: + nodeClassRef: + apiVersion: karpenter.k8s.aws/v1beta1 + kind: EC2NodeClass + name: default +--- +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass +metadata: + name: default +spec: + # Required, resolves a default ami and userdata + amiFamily: AL2 + + # Required, discovers subnets to attach to instances + # Each term in the array of subnetSelectorTerms is ORed together + # Within a single term, all conditions are ANDed + subnetSelectorTerms: + # Select on any subnet that has the "karpenter.sh/discovery: ${CLUSTER_NAME}" + # AND the "environment: test" tag OR any subnet with ID "subnet-09fa4a0a8f233a921" + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: test + - id: subnet-09fa4a0a8f233a921 + + # Required, discovers security groups to attach to instances + # Each term in the array of securityGroupSelectorTerms is ORed together + # Within a single term, all conditions are ANDed + securityGroupSelectorTerms: + # Select on any security group that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any security group with the "my-security-group" name + # OR any security group with ID "sg-063d7acfb4b06c82c" + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: test + - name: my-security-group + - id: sg-063d7acfb4b06c82c + + # Optional, IAM role to use for the node identity. + # The "role" field is immutable after EC2NodeClass creation. This may change in the + # future, but this restriction is currently in place today to ensure that Karpenter + # avoids leaking managed instance profiles in your account. + # Must specify one of "role" or "instanceProfile" for Karpenter to launch nodes + role: "KarpenterNodeRole-${CLUSTER_NAME}" + + # Optional, IAM instance profile to use for the node identity. + # Must specify one of "role" or "instanceProfile" for Karpenter to launch nodes + instanceProfile: "KarpenterNodeInstanceProfile-${CLUSTER_NAME}" + + # Optional, discovers amis to override the amiFamily's default amis + # Each term in the array of amiSelectorTerms is ORed together + # Within a single term, all conditions are ANDed + amiSelectorTerms: + # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any AMI with the "my-ami" name + # OR any AMI with ID "ami-123" + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: test + - name: my-ami + - id: ami-123 + + # Optional, use instance-store volumes for node ephemeral-storage + instanceStorePolicy: RAID0 + + # Optional, overrides autogenerated userdata with a merge semantic + userData: | + echo "Hello world" + + # Optional, propagates tags to underlying EC2 resources + tags: + team: team-a + app: team-a-app + + # Optional, configures IMDS for the instance + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: required + + # Optional, configures storage devices for the instance + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 100Gi + volumeType: gp3 + iops: 10000 + encrypted: true + kmsKeyID: "1234abcd-12ab-34cd-56ef-1234567890ab" + deleteOnTermination: true + throughput: 125 + snapshotID: snap-0123456789 + + # Optional, configures detailed monitoring for the instance + detailedMonitoring: true + + # Optional, configures if the instance should be launched with an associated public IP address. + # If not specified, the default value depends on the subnet's public IP auto-assign setting. + associatePublicIPAddress: true +status: + # Resolved subnets + subnets: + - id: subnet-0a462d98193ff9fac + zone: us-east-2b + - id: subnet-0322dfafd76a609b6 + zone: us-east-2c + - id: subnet-0727ef01daf4ac9fe + zone: us-east-2b + - id: subnet-00c99aeafe2a70304 + zone: us-east-2a + - id: subnet-023b232fd5eb0028e + zone: us-east-2c + - id: subnet-03941e7ad6afeaa72 + zone: us-east-2a + + # Resolved security groups + securityGroups: + - id: sg-041513b454818610b + name: ClusterSharedNodeSecurityGroup + - id: sg-0286715698b894bca + name: ControlPlaneSecurityGroup-1AQ073TSAAPW + + # Resolved AMIs + amis: + - id: ami-01234567890123456 + name: custom-ami-amd64 + requirements: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - id: ami-01234567890123456 + name: custom-ami-arm64 + requirements: + - key: kubernetes.io/arch + operator: In + values: + - arm64 + + # Generated instance profile name from "role" + instanceProfile: "${CLUSTER_NAME}-0123456778901234567789" +``` +Refer to the [NodePool docs]({{}}) for settings applicable to all providers. To explore various `EC2NodeClass` configurations, refer to the examples provided [in the Karpenter Github repository](https://github.com/aws/karpenter/blob/main/examples/v1beta1/). + +## spec.amiFamily + +AMIFamily is a required field, dictating both the default bootstrapping logic for nodes provisioned through this `EC2NodeClass` but also selecting a group of recommended, latest AMIs by default. Currently, Karpenter supports `amiFamily` values `AL2`, `AL2023`, `Bottlerocket`, `Ubuntu`, `Windows2019`, `Windows2022` and `Custom`. GPUs are only supported by default with `AL2` and `Bottlerocket`. The `AL2` amiFamily does not support ARM64 GPU instance types unless you specify custom [`amiSelectorTerms`]({{}}). Default bootstrapping logic is shown below for each of the supported families. + +### AL2 + +```bash +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash -xe +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ +--dns-cluster-ip '10.100.0.10' \ +--use-max-pods false \ +--kubelet-extra-args '--node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test --max-pods=110' +--//-- +``` + +### AL2023 + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: test-cluster + apiServerEndpoint: https://example.com + certificateAuthority: ca-bundle + cidr: 10.100.0.0/16 + kubelet: + config: + maxPods: 110 + flags: + - --node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test + +--//-- + +``` + +### Bottlerocket + +```toml +[settings] +[settings.kubernetes] +api-server = 'https://test-cluster' +cluster-certificate = 'ca-bundle' +cluster-name = 'test-cluster' +cluster-dns-ip = '10.100.0.10' +max-pods = 110 + +[settings.kubernetes.node-labels] +'karpenter.sh/capacity-type' = 'on-demand' +'karpenter.sh/nodepool' = 'test' +``` + +### Ubuntu + +```bash +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash -xe +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ +--dns-cluster-ip '10.100.0.10' \ +--use-max-pods false \ +--kubelet-extra-args '--node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test" --max-pods=110' +--//-- +``` + +### Windows2019 + +```powershell + +[string]$EKSBootstrapScriptFile = "$env:ProgramFiles\Amazon\EKS\Start-EKSBootstrap.ps1" +& $EKSBootstrapScriptFile -EKSClusterName 'test-cluster' -APIServerEndpoint 'https://test-cluster' -Base64ClusterCA 'ca-bundle' -KubeletExtraArgs '--node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test" --max-pods=110' -DNSClusterIP '10.100.0.10' + +``` + +### Windows2022 + +```powershell + +[string]$EKSBootstrapScriptFile = "$env:ProgramFiles\Amazon\EKS\Start-EKSBootstrap.ps1" +& $EKSBootstrapScriptFile -EKSClusterName 'test-cluster' -APIServerEndpoint 'https://test-cluster' -Base64ClusterCA 'ca-bundle' -KubeletExtraArgs '--node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test" --max-pods=110' -DNSClusterIP '10.100.0.10' + +``` + +{{% alert title="Note" color="primary" %}} +Karpenter will automatically query for the appropriate [EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html) via AWS Systems Manager (SSM). In the case of the `Custom` AMIFamily, no default AMIs are defined. As a result, `amiSelectorTerms` must be specified to inform Karpenter on which custom AMIs are to be used. +{{% /alert %}} + +### Custom + +The `Custom` AMIFamily ships without any default userData to allow you to configure custom bootstrapping for control planes or images that don't support the default methods from the other families. + +## spec.subnetSelectorTerms + +Subnet Selector Terms allow you to specify selection logic for a set of subnet options that Karpenter can choose from when launching an instance from the `EC2NodeClass`. Karpenter discovers subnets through the `EC2NodeClass` using ids or [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). When launching nodes, a subnet is automatically chosen that matches the desired zone. If multiple subnets exist for a zone, the one with the most available IP addresses will be used. + +This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. Effectively, all requirements within a single term are ANDed together. It's possible that you may want to select on two different subnets that have unrelated requirements. In this case, you can specify multiple terms which will be ORed together to form your selection logic. The example below shows how this selection logic is fulfilled. + +```yaml +subnetSelectorTerms: + # Select on any subnet that has the "karpenter.sh/discovery: ${CLUSTER_NAME}" + # AND the "environment: test" tag OR any subnet with ID "subnet-09fa4a0a8f233a921" + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: test + - id: subnet-09fa4a0a8f233a921 +``` + +{{% alert title="Tip" color="secondary" %}} +Subnets may be specified by any tag, including `Name`. Selecting tag values using wildcards (`*`) is supported. +{{% /alert %}} + +#### Examples + +Select all with a specified tag key: +```yaml +spec: + subnetSelectorTerms: + - tags: + karpenter.sh/discovery/MyClusterName: '*' +``` + +Select by name and tag (all criteria must match): +```yaml +spec: + subnetSelectorTerms: + - tags: + Name: my-subnet + MyTag: '' # matches all resources with the tag +``` + +Select using multiple tag terms: +```yaml +spec: + subnetSelectorTerms: + - tags: + Name: "my-subnet-1" + - tags: + Name: "my-subnet-2" +``` + +Select using wildcards: +```yaml +spec: + subnetSelectorTerms: + - tags: + Name: "*Public*" + +``` + +Select using ids: +```yaml +spec: + subnetSelectorTerms: + - id: "subnet-09fa4a0a8f233a921" + - id: "subnet-0471ca205b8a129ae" +``` + + +## spec.securityGroupSelectorTerms + +Security Group Selector Terms allow you to specify selection logic for all security groups that will be attached to an instance launched from the `EC2NodeClass`. The security group of an instance is comparable to a set of firewall rules. +[EKS creates at least two security groups by default](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html). + +This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. Effectively, all requirements within a single term are ANDed together. It's possible that you may want to select on two different security groups that have unrelated requirements. In this case, you can specify multiple terms which will be ORed together to form your selection logic. The example below shows how this selection logic is fulfilled. + +```yaml +securityGroupSelectorTerms: + # Select on any security group that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any security group with the "my-security-group" name + # OR any security group with ID "sg-063d7acfb4b06c82c" + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: test + - name: my-security-group + - id: sg-063d7acfb4b06c82c +``` + +{{% alert title="Tip" color="secondary" %}} +Security groups may be specified by any tag, including "Name". Selecting tags using wildcards (`*`) is supported. +{{% /alert %}} + +{{% alert title="Note" color="primary" %}} +When launching nodes, Karpenter uses all the security groups that match the selector. If you choose to use the `kubernetes.io/cluster/$CLUSTER_NAME` tag for discovery, note that this may result in failures using the AWS Load Balancer controller. The Load Balancer controller only supports a single security group having that tag key. See [this issue](https://github.com/kubernetes-sigs/aws-load-balancer-controller/issues/2367) for more details. + +To verify if this restriction affects you, run the following commands. +```bash +CLUSTER_VPC_ID="$(aws eks describe-cluster --name $CLUSTER_NAME --query cluster.resourcesVpcConfig.vpcId --output text)" + +aws ec2 describe-security-groups --filters Name=vpc-id,Values=$CLUSTER_VPC_ID Name=tag-key,Values=kubernetes.io/cluster/$CLUSTER_NAME --query 'SecurityGroups[].[GroupName]' --output text +``` + +If multiple securityGroups are printed, you will need more specific securityGroupSelectorTerms. We generally recommend that you use the `karpenter.sh/discovery: $CLUSTER_NAME` tag selector instead. +{{% /alert %}} + +#### Examples + +Select all assigned to a cluster: +```yaml +spec: + securityGroupSelectorTerms: + - tags: + kubernetes.io/cluster/$CLUSTER_NAME: "owned" +``` + +Select all with a specified tag key: +```yaml +spec: + securityGroupSelectorTerms: + - tags: + MyTag: '*' +``` + +Select by name and tag (all criteria must match): +```yaml +spec: + securityGroupSelectorTerms: + - name: my-security-group + tags: + MyTag: '*' # matches all resources with the tag +``` + +Select using multiple tag terms: +```yaml +spec: + securityGroupSelectorTerms: + - tags: + Name: "my-security-group-1" + - tags: + Name: "my-security-group-2" +``` + +Select by name using a wildcard: +```yaml +spec: + securityGroupSelectorTerms: + - name: "*Public*" +``` + +Select using ids: +```yaml +spec: + securityGroupSelectorTerms: + - id: "sg-063d7acfb4b06c82c" + - id: "sg-06e0cf9c198874591" +``` + +## spec.amiSelectorTerms + +AMI Selector Terms are used to configure custom AMIs for Karpenter to use, where the AMIs are discovered through ids, owners, name, and [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). **When you specify `amiSelectorTerms`, you fully override the default AMIs that are selected on by your EC2NodeClass [`amiFamily`]({{< ref "#specamifamily" >}}).** + +This selection logic is modeled as terms, where each term contains multiple conditions that must all be satisfied for the selector to match. Effectively, all requirements within a single term are ANDed together. It's possible that you may want to select on two different AMIs that have unrelated requirements. In this case, you can specify multiple terms which will be ORed together to form your selection logic. The example below shows how this selection logic is fulfilled. + +```yaml +amiSelectorTerms: + # Select on any AMI that has both the "karpenter.sh/discovery: ${CLUSTER_NAME}" tag + # AND the "environment: test" tag OR any AMI with the "my-ami" name + # OR any AMI with ID "ami-123" + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: test + - name: my-ami + - id: ami-123 +``` + +This field is optional, and Karpenter will use the latest EKS-optimized AMIs for the AMIFamily if no amiSelectorTerms are specified. To select an AMI by name, use the `name` field in the selector term. To select an AMI by id, use the `id` field in the selector term. To ensure that AMIs are owned by the expected owner, use the `owner` field - you can use a combination of account aliases (e.g. `self` `amazon`, `your-aws-account-name`) and account IDs. + +If owner is not set for `name`, it defaults to `self,amazon`, preventing Karpenter from inadvertently selecting an AMI that is owned by a different account. Tags don't require an owner as tags can only be discovered by the user who created them. + +{{% alert title="Tip" color="secondary" %}} +AMIs may be specified by any AWS tag, including `Name`. Selecting by tag or by name using wildcards (`*`) is supported. +{{% /alert %}} + +{{% alert title="Note" color="primary" %}} +If `amiSelectorTerms` match more than one AMI, Karpenter will automatically determine which AMI best fits the workloads on the launched worker node under the following constraints: + +* When launching nodes, Karpenter automatically determines which architecture a custom AMI is compatible with and will use images that match an instanceType's requirements. +* If multiple AMIs are found that can be used, Karpenter will choose the latest one. +* If no AMIs are found that can be used, then no nodes will be provisioned. +{{% /alert %}} + +#### Examples + +Select all with a specified tag: +```yaml + amiSelectorTerms: + - tags: + karpenter.sh/discovery/MyClusterName: '*' +``` + +Select by name: +```yaml + amiSelectorTerms: + - name: my-ami +``` + +Select by `Name` tag: +```yaml + amiSelectorTerms: + - tags: + Name: my-ami +``` + +Select by name and owner: +```yaml + amiSelectorTerms: + - name: my-ami + owner: self + - name: my-ami + owner: 0123456789 +``` + +Select by name using a wildcard: +```yaml +spec: + amiSelectorTerms: + - name: "*EKS*" +``` + +Select by all under an owner: +```yaml +spec: + amiSelectorTerms: + - name: "*" + owner: self +``` + +Specify using ids: +```yaml + amiSelectorTerms: + - id: "ami-123" + - id: "ami-456" +``` + +## spec.role + +`Role` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If using the [Karpenter Getting Started Guide]({{}}) to deploy Karpenter, you can use the `KarpenterNodeRole-$CLUSTER_NAME` role provisioned by that process. + +```yaml +spec: + role: "KarpenterNodeRole-$CLUSTER_NAME" +``` + +## spec.instanceProfile + +`InstanceProfile` is an optional field and tells Karpenter which IAM identity nodes should assume. You must specify one of `role` or `instanceProfile` when creating a Karpenter `EC2NodeClass`. If you use the `instanceProfile` field instead of `role`, Karpenter will not manage the InstanceProfile on your behalf; instead, it expects that you have pre-provisioned an IAM instance profile and assigned it a role. + +You can provision and assign a role to an IAM instance profile using [CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) or by using the [`aws iam create-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/create-instance-profile.html) and [`aws iam add-role-to-instance-profile`](https://docs.aws.amazon.com/cli/latest/reference/iam/add-role-to-instance-profile.html) commands in the CLI. + +{{% alert title="Note" color="primary" %}} + +For [private clusters](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html) that do not have access to the public internet, using `spec.instanceProfile` is required. `spec.role` cannot be used since Karpenter needs to access IAM endpoints to manage a generated instance profile. IAM [doesn't support private endpoints](https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html) to enable accessing the service without going to the public internet. + +{{% /alert %}} + +## spec.tags + +Karpenter adds tags to all resources it creates, including EC2 Instances, EBS volumes, and Launch Templates. The default set of tags are listed below. + +```yaml +Name: +karpenter.sh/nodeclaim: +karpenter.sh/nodepool: +karpenter.k8s.aws/ec2nodeclass: +kubernetes.io/cluster/: owned +``` + +Additional tags can be added in the tags section, which will be merged with the default tags specified above. +```yaml +spec: + tags: + InternalAccountingTag: 1234 + dev.corp.net/app: Calculator + dev.corp.net/team: MyTeam +``` + +{{% alert title="Note" color="primary" %}} +Karpenter allows overrides of the default "Name" tag but does not allow overrides to restricted domains (such as "karpenter.sh", "karpenter.k8s.aws", and "kubernetes.io/cluster"). This ensures that Karpenter is able to correctly auto-discover nodes that it owns. +{{% /alert %}} + +## spec.metadataOptions + +Control the exposure of [Instance Metadata Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) on EC2 Instances launched by this EC2NodeClass using a generated launch template. + +Refer to [recommended, security best practices](https://aws.github.io/aws-eks-best-practices/security/docs/iam/#restrict-access-to-the-instance-profile-assigned-to-the-worker-node) for limiting exposure of Instance Metadata and User Data to pods. + +If metadataOptions are omitted from this EC2NodeClass, the following default settings are applied: + +```yaml +spec: + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: required +``` + +## spec.blockDeviceMappings + +The `blockDeviceMappings` field in an `EC2NodeClass` can be used to control the [Elastic Block Storage (EBS) volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#instance-block-device-mapping) that Karpenter attaches to provisioned nodes. Karpenter uses default block device mappings for the AMIFamily specified. For example, the `Bottlerocket` AMI Family defaults with two block device mappings, one for Bottlerocket's control volume and the other for container resources such as images and logs. + +```yaml +spec: + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 100Gi + volumeType: gp3 + iops: 10000 + encrypted: true + kmsKeyID: "1234abcd-12ab-34cd-56ef-1234567890ab" + deleteOnTermination: true + throughput: 125 + snapshotID: snap-0123456789 +``` + +The following blockDeviceMapping defaults are used for each `AMIFamily` if no `blockDeviceMapping` overrides are specified in the `EC2NodeClass` + +### AL2 +```yaml +spec: + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 20Gi + volumeType: gp3 + encrypted: true +``` + +### AL2023 +```yaml +spec: + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 20Gi + volumeType: gp3 + encrypted: true +``` + +### Bottlerocket +```yaml +spec: + blockDeviceMappings: + # Root device + - deviceName: /dev/xvda + ebs: + volumeSize: 4Gi + volumeType: gp3 + encrypted: true + # Data device: Container resources such as images and logs + - deviceName: /dev/xvdb + ebs: + volumeSize: 20Gi + volumeType: gp3 + encrypted: true +``` + +### Ubuntu +```yaml +spec: + blockDeviceMappings: + - deviceName: /dev/sda1 + ebs: + volumeSize: 20Gi + volumeType: gp3 + encrypted: true +``` + +### Windows2019/Windows2022 +```yaml +spec: + blockDeviceMappings: + - deviceName: /dev/sda1 + ebs: + volumeSize: 50Gi + volumeType: gp3 + encrypted: true +``` + +### Custom + +The `Custom` AMIFamily ships without any default `blockDeviceMappings`. + +## spec.instanceStorePolicy + +The `instanceStorePolicy` field controls how [instance-store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) volumes are handled. By default, Karpenter and Kubernetes will simply ignore them. + +### RAID0 + +If you intend to use these volumes for faster node ephemeral-storage, set `instanceStorePolicy` to `RAID0`: + +```yaml +spec: + instanceStorePolicy: RAID0 +``` + +This will set the allocatable ephemeral-storage of each node to the total size of the instance-store volume(s). + +The disks must be formatted & mounted in a RAID0 and be the underlying filesystem for the Kubelet & Containerd. Instructions for each AMI family are listed below: + +#### AL2 + +On AL2, Karpenter automatically configures the disks through an additional boostrap argument (`--local-disks raid0`). The device name is `/dev/md/0` and its mount point is `/mnt/k8s-disks/0`. You should ensure any additional disk setup does not interfere with these. + +#### AL2023 + +On AL2023, Karpenter automatically configures the disks via the generated `NodeConfig` object. Like AL2, the device name is `/dev/md/0` and its mount point is `/mnt/k8s-disks/0`. You should ensure any additional disk setup does not interfere with these. + +#### Others + +For all other AMI families, you must configure the disks yourself. Check out the [`setup-local-disks`](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bin/setup-local-disks) script in [amazon-eks-ami](https://github.com/awslabs/amazon-eks-ami) to see how this is done for AL2. + +{{% alert title="Tip" color="secondary" %}} +Since the Kubelet & Containerd will be using the instance-store filesystem, you may consider using a more minimal root volume size. +{{% /alert %}} + +## spec.userData + +You can control the UserData that is applied to your worker nodes via this field. This allows you to run custom scripts or pass-through custom configuration to Karpenter instances on start-up. + +```yaml +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass +metadata: + name: bottlerocket-example +spec: + ... + amiFamily: Bottlerocket + userData: | + [settings.kubernetes] + "kube-api-qps" = 30 + "shutdown-grace-period" = "30s" + "shutdown-grace-period-for-critical-pods" = "30s" + [settings.kubernetes.eviction-hard] + "memory.available" = "20%" +``` + +This example adds SSH keys to allow remote login to the node (replace *my-authorized_keys* with your key file): + +{{% alert title="Note" color="primary" %}} +Instead of using SSH as set up in this example, you can use Session Manager (SSM) or EC2 Instance Connect to gain shell access to Karpenter nodes. +See [Node NotReady]({{< ref "../troubleshooting/#node-notready" >}}) troubleshooting for an example of starting an SSM session from the command line or [EC2 Instance Connect](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-connect-set-up.html) documentation to connect to nodes using SSH. +{{% /alert %}} + +```yaml +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass +metadata: + name: al2-example +spec: + ... + amiFamily: AL2 + userData: | + #!/bin/bash + mkdir -p ~ec2-user/.ssh/ + touch ~ec2-user/.ssh/authorized_keys + cat >> ~ec2-user/.ssh/authorized_keys <}}) section for more details on these defaults. View the sections below to understand the different merge strategies for each AMIFamily. + +### AL2/Ubuntu + +* Your UserData can be in the [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive) format. +* Karpenter will transform your custom user-data as a MIME part, if necessary, and then merge a final MIME part to the end of your UserData parts which will bootstrap the worker node. Karpenter will have full control over all the parameters being passed to the bootstrap script. + * Karpenter will continue to set MaxPods, ClusterDNS and all other parameters defined in `spec.kubeletConfiguration` as before. + +Consider the following example to understand how your custom UserData will be merged - + +#### Passed-in UserData (bash) + +```bash +#!/bin/bash +echo "Running custom user data script (bash)" +``` + +#### Merged UserData (bash) + +```bash +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Running custom user data script (bash)" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash -xe +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ +--use-max-pods false \ +--kubelet-extra-args '--node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test --max-pods=110' +--//-- +``` + +#### Passed-in UserData (MIME) + +```bash +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="BOUNDARY" + +--BOUNDARY +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Running custom user data script (mime)" + +--BOUNDARY-- +``` + +#### Merged UserData (MIME) + +```bash +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Running custom user data script (mime)" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash -xe +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh 'test-cluster' --apiserver-endpoint 'https://test-cluster' --b64-cluster-ca 'ca-bundle' \ +--use-max-pods false \ +--kubelet-extra-args '--node-labels=karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=test --max-pods=110' +--//-- +``` + +{{% alert title="Note" color="primary" %}} +You can also set kubelet-config properties by modifying the kubelet-config.json file before the EKS bootstrap script starts the kubelet: + +```yaml +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass +metadata: + name: kubelet-config-example +spec: + ... + amiFamily: AL2 + userData: | + #!/bin/bash + echo "$(jq '.kubeAPIQPS=50' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json +``` +{{% /alert %}} + +### AL2023 + +* Your UserData may be in one of three formats: a [MIME multi part archive](https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive), a NodeConfig YAML / JSON string, or a shell script. +* Karpenter will transform your custom UserData into a MIME part, if necessary, and then create a MIME multi-part archive. This archive will consist of a generated NodeConfig, containing Karpenter's default values, followed by the transformed custom UserData. For more information on the NodeConfig spec, refer to the [AL2023 EKS Optimized AMI docs](https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/examples/). +* If a value is specified both in the Karpenter generated NodeConfig and the same value is specified in the custom user data, the value in the custom user data will take precedence. + +#### Passed-in UserData (NodeConfig) + +```yaml +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +``` + +#### Merged UserData (NodeConfig) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +# Karpenter Generated NodeConfig +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + apiServerEndpoint: https://test-cluster + certificateAuthority: cluster-ca + cidr: 10.100.0.0/16 + name: test-cluster + kubelet: + config: + clusterDNS: + - 10.100.0.10 + maxPods: 118 + flags: + - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" + +--// +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--//-- +``` + +#### Passed-in UserData (bash) + +```shell +#!/bin/bash +echo "Hello, AL2023!" +``` + +#### Merged UserData (bash) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + apiServerEndpoint: https://test-cluster + certificateAuthority: cluster-ca + cidr: 10.100.0.0/16 + name: test-cluster + kubelet: + config: + clusterDNS: + - 10.100.0.10 + maxPods: 118 + flags: + - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" + +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" +--//-- +``` + +#### Passed-in UserData (MIME) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" +--// +``` + +#### Merged UserData (MIME) + +```text +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +--// +Content-Type: application/node.eks.aws + +# Karpenter Generated NodeConfig +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + apiServerEndpoint: https://test-cluster + certificateAuthority: cluster-ca + cidr: 10.100.0.0/16 + name: test-cluster + kubelet: + config: + clusterDNS: + - 10.100.0.10 + maxPods: 118 + flags: + - --node-labels="karpenter.sh/capacity-type=on-demand,karpenter.sh/nodepool=default" + +--// +Content-Type: application/node.eks.aws + +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + maxPods: 42 +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "Hello, AL2023!" +--//-- +``` + +### Bottlerocket + +* Your UserData must be valid TOML. +* Karpenter will automatically merge settings to ensure successful bootstrap including `cluster-name`, `api-server` and `cluster-certificate`. Any labels and taints that need to be set based on pod requirements will also be specified in the final merged UserData. + * All Kubelet settings that Karpenter applies will override the corresponding settings in the provided UserData. For example, if you've specified `settings.kubernetes.cluster-name`, it will be overridden. + * If MaxPods is specified via the binary arg to Karpenter, the value will override anything specified in the UserData. + * If ClusterDNS is specified via `spec.kubeletConfiguration`, then that value will override anything specified in the UserData. +* Unknown TOML fields will be ignored when the final merged UserData is generated by Karpenter. + +Consider the following example to understand how your custom UserData settings will be merged in. + +#### Passed-in UserData + +```toml +[settings.kubernetes.eviction-hard] +"memory.available" = "12%" +[settings.kubernetes] +"unknown-setting" = "unknown" +[settings.kubernetes.node-labels] +'field.controlled.by/karpenter' = 'will-be-overridden' +``` + +#### Merged UserData + +```toml +[settings] +[settings.kubernetes] +api-server = 'https://cluster' +cluster-certificate = 'ca-bundle' +cluster-name = 'cluster' + +[settings.kubernetes.node-labels] +'karpenter.sh/capacity-type' = 'on-demand' +'karpenter.sh/nodepool' = 'default' + +[settings.kubernetes.node-taints] + +[settings.kubernetes.eviction-hard] +'memory.available' = '12%%' +``` + +### Windows2019/Windows2022 + +* Your UserData must be specified as PowerShell commands. +* The UserData specified will be prepended to a Karpenter managed section that will bootstrap the kubelet. +* Karpenter will continue to set ClusterDNS and all other parameters defined in spec.kubeletConfiguration as before. + +Consider the following example to understand how your custom UserData settings will be merged in. + +#### Passed-in UserData + +```powershell +Write-Host "Running custom user data script" +``` + +#### Merged UserData + +```powershell + +Write-Host "Running custom user data script" +[string]$EKSBootstrapScriptFile = "$env:ProgramFiles\Amazon\EKS\Start-EKSBootstrap.ps1" +& $EKSBootstrapScriptFile -EKSClusterName 'test-cluster' -APIServerEndpoint 'https://test-cluster' -Base64ClusterCA 'ca-bundle' -KubeletExtraArgs '--node-labels="karpenter.sh/capacity-type=spot,karpenter.sh/nodepool=windows2022" --max-pods=110' -DNSClusterIP '10.0.100.10' + +``` + +{{% alert title="Windows Support Notice" color="warning" %}} +Currently, Karpenter does not specify `-ServiceCIDR` to [EKS Windows AMI Bootstrap script](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html#bootstrap-script-configuration-parameters). +Windows worker nodes will use `172.20.0.0/16` or `10.100.0.0/16` for Kubernetes service IP address ranges based on the IP address of the primary interface. +The effective ServiceCIDR can be verified at `$env:ProgramData\Amazon\EKS\cni\config\vpc-bridge.conf` on the worker node. + +Support for the Windows ServiceCIDR argument can be tracked in a [Karpenter Github Issue](https://github.com/aws/karpenter/issues/4088). Currently, if the effective ServiceCIDR is incorrect for your windows worker nodes, you can add the following userData as a workaround. + +```yaml +spec: + userData: | + $global:EKSCluster = Get-EKSCluster -Name my-cluster +``` +{{% /alert %}} + +### Custom + +* No merging is performed, your UserData must perform all setup required of the node to allow it to join the cluster. + +## spec.detailedMonitoring + +Enabling detailed monitoring controls the [EC2 detailed monitoring](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) feature. If you enable this option, the Amazon EC2 console displays monitoring graphs with a 1-minute period for the instances that Karpenter launches. + +```yaml +spec: + detailedMonitoring: true +``` + +## spec.associatePublicIPAddress + +A boolean field that controls whether instances created by Karpenter for this EC2NodeClass will have an associated public IP address. This overrides the `MapPublicIpOnLaunch` setting applied to the subnet the node is launched in. If this field is not set, the `MapPublicIpOnLaunch` field will be respected. + +{{% alert title="Note" color="warning" %}} +If a `NodeClaim` requests `vpc.amazonaws.com/efa` resources, `spec.associatePublicIPAddress` is respected. However, if this `NodeClaim` requests **multiple** EFA resources and the value for `spec.associatePublicIPAddress` is true, the instance will fail to launch. This is due to an EC2 restriction which +requires that the field is only set to true when configuring an instance with a single ENI at launch. When using this field, it is advised that users segregate their EFA workload to use a separate `NodePool` / `EC2NodeClass` pair. +{{% /alert %}} + +## status.subnets +[`status.subnets`]({{< ref "#statussubnets" >}}) contains the resolved `id` and `zone` of the subnets that were selected by the [`spec.subnetSelectorTerms`]({{< ref "#specsubnetselectorterms" >}}) for the node class. The subnets will be sorted by the available IP address count in decreasing order. + +#### Examples + +```yaml +spec: + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" +status: + subnets: + - id: subnet-0a462d98193ff9fac + zone: us-east-2b + - id: subnet-0322dfafd76a609b6 + zone: us-east-2c + - id: subnet-0727ef01daf4ac9fe + zone: us-east-2b + - id: subnet-00c99aeafe2a70304 + zone: us-east-2a + - id: subnet-023b232fd5eb0028e + zone: us-east-2c + - id: subnet-03941e7ad6afeaa72 + zone: us-east-2a +``` + +## status.securityGroups + +[`status.securityGroups`]({{< ref "#statussecuritygroups" >}}) contains the resolved `id` and `name` of the security groups that were selected by the [`spec.securityGroupSelectorTerms`]({{< ref "#specsecuritygroupselectorterms" >}}) for the node class. The subnets will be sorted by the available IP address count in decreasing order. + +#### Examples + +```yaml +spec: + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" +status: + securityGroups: + - id: sg-041513b454818610b + name: ClusterSharedNodeSecurityGroup + - id: sg-0286715698b894bca + name: ControlPlaneSecurityGroup-1AQ073TSAAPW +``` + +## status.amis + +[`status.amis`]({{< ref "#statusamis" >}}) contains the resolved `id`, `name`, and `requirements` of either the default AMIs for the [`spec.amiFamily`]({{< ref "#specamifamily" >}}) or the AMIs selected by the [`spec.amiSelectorTerms`]({{< ref "#specamiselectorterms" >}}) if this field is specified. + +#### Examples + +Default AMIs resolved from the AL2 AMIFamily: + +```yaml +spec: + amiFamily: AL2 +status: + amis: + - id: ami-03c3a3dcda64f5b75 + name: amazon-linux-2-gpu + requirements: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-gpu-count + operator: Exists + - id: ami-03c3a3dcda64f5b75 + name: amazon-linux-2-gpu + requirements: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-accelerator-count + operator: Exists + - id: ami-06afb2d101cc4b8bd + name: amazon-linux-2-arm64 + requirements: + - key: kubernetes.io/arch + operator: In + values: + - arm64 + - key: karpenter.k8s.aws/instance-gpu-count + operator: DoesNotExist + - key: karpenter.k8s.aws/instance-accelerator-count + operator: DoesNotExist + - id: ami-0e28b76d768af234e + name: amazon-linux-2 + requirements: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-gpu-count + operator: DoesNotExist + - key: karpenter.k8s.aws/instance-accelerator-count + operator: DoesNotExist +``` + +AMIs resolved from [`spec.amiSelectorTerms`]({{< ref "#specamiselectorterms" >}}): + +```yaml +spec: + amiFamily: AL2 + amiSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" +status: + amis: + - id: ami-01234567890123456 + name: custom-ami-amd64 + requirements: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - id: ami-01234567890123456 + name: custom-ami-arm64 + requirements: + - key: kubernetes.io/arch + operator: In + values: + - arm64 +``` + +## status.instanceProfile + +[`status.instanceProfile`]({{< ref "#statusinstanceprofile" >}}) contains the resolved instance profile generated by Karpenter from the [`spec.role`]({{< ref "#specrole" >}}) + +```yaml +spec: + role: "KarpenterNodeRole-${CLUSTER_NAME}" +status: + instanceProfile: "${CLUSTER_NAME}-0123456778901234567789" +``` diff --git a/website/content/en/v0.35/concepts/nodepools.md b/website/content/en/v0.35/concepts/nodepools.md new file mode 100644 index 000000000000..f9ee114f47fe --- /dev/null +++ b/website/content/en/v0.35/concepts/nodepools.md @@ -0,0 +1,492 @@ +--- +title: "NodePools" +linkTitle: "NodePools" +weight: 1 +description: > + Configure Karpenter with NodePools +--- + +When you first installed Karpenter, you set up a default NodePool. The NodePool sets constraints on the nodes that can be created by Karpenter and the pods that can run on those nodes. The NodePool can be set to do things like: + +* Define taints to limit the pods that can run on nodes Karpenter creates +* Define any startup taints to inform Karpenter that it should taint the node initially, but that the taint is temporary. +* Limit node creation to certain zones, instance types, and computer architectures +* Set defaults for node expiration + +You can change your NodePool or add other NodePools to Karpenter. +Here are things you should know about NodePools: + +* Karpenter won't do anything if there is not at least one NodePool configured. +* Each NodePool that is configured is looped through by Karpenter. +* If Karpenter encounters a taint in the NodePool that is not tolerated by a Pod, Karpenter won't use that NodePool to provision the pod. +* If Karpenter encounters a startup taint in the NodePool it will be applied to nodes that are provisioned, but pods do not need to tolerate the taint. Karpenter assumes that the taint is temporary and some other system will remove the taint. +* It is recommended to create NodePools that are mutually exclusive. So no Pod should match multiple NodePools. If multiple NodePools are matched, Karpenter will use the NodePool with the highest [weight](#specweight). + +For some example `NodePool` configurations, see the [examples in the Karpenter GitHub repository](https://github.com/aws/karpenter/blob/main/examples/v1beta1/). + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + # Template section that describes how to template out NodeClaim resources that Karpenter will provision + # Karpenter will consider this template to be the minimum requirements needed to provision a Node using this NodePool + # It will overlay this NodePool with Pods that need to schedule to further constrain the NodeClaims + # Karpenter will provision to launch new Nodes for the cluster + template: + metadata: + # Labels are arbitrary key-values that are applied to all nodes + labels: + billing-team: my-team + + # Annotations are arbitrary key-values that are applied to all nodes + annotations: + example.com/owner: "my-team" + spec: + # References the Cloud Provider's NodeClass resource, see your cloud provider specific documentation + nodeClassRef: + name: default + + # Provisioned nodes will have these taints + # Taints may prevent pods from scheduling if they are not tolerated by the pod. + taints: + - key: example.com/special-taint + effect: NoSchedule + + # Provisioned nodes will have these taints, but pods do not need to tolerate these taints to be provisioned by this + # NodePool. These taints are expected to be temporary and some other entity (e.g. a DaemonSet) is responsible for + # removing the taint after it has finished initializing the node. + startupTaints: + - key: example.com/another-taint + effect: NoSchedule + + # Requirements that constrain the parameters of provisioned nodes. + # These requirements are combined with pod.spec.topologySpreadConstraints, pod.spec.affinity.nodeAffinity, pod.spec.affinity.podAffinity, and pod.spec.nodeSelector rules. + # Operators { In, NotIn, Exists, DoesNotExist, Gt, and Lt } are supported. + # https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#operators + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: ["c", "m", "r"] + - key: "karpenter.k8s.aws/instance-cpu" + operator: In + values: ["4", "8", "16", "32"] + - key: "karpenter.k8s.aws/instance-hypervisor" + operator: In + values: ["nitro"] + - key: "karpenter.k8s.aws/instance-generation" + operator: Gt + values: ["2"] + - key: "topology.kubernetes.io/zone" + operator: In + values: ["us-west-2a", "us-west-2b"] + - key: "kubernetes.io/arch" + operator: In + values: ["arm64", "amd64"] + - key: "karpenter.sh/capacity-type" + operator: In + values: ["spot", "on-demand"] + + # Karpenter provides the ability to specify a few additional Kubelet args. + # These are all optional and provide support for additional customization and use cases. + kubelet: + clusterDNS: ["10.0.1.100"] + systemReserved: + cpu: 100m + memory: 100Mi + ephemeral-storage: 1Gi + kubeReserved: + cpu: 200m + memory: 100Mi + ephemeral-storage: 3Gi + evictionHard: + memory.available: 5% + nodefs.available: 10% + nodefs.inodesFree: 10% + evictionSoft: + memory.available: 500Mi + nodefs.available: 15% + nodefs.inodesFree: 15% + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + evictionMaxPodGracePeriod: 60 + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + cpuCFSQuota: true + podsPerCore: 2 + maxPods: 20 + + # Disruption section which describes the ways in which Karpenter can disrupt and replace Nodes + # Configuration in this section constrains how aggressive Karpenter can be with performing operations + # like rolling Nodes due to them hitting their maximum lifetime (expiry) or scaling down nodes to reduce cluster cost + disruption: + # Describes which types of Nodes Karpenter should consider for consolidation + # If using 'WhenUnderutilized', Karpenter will consider all nodes for consolidation and attempt to remove or replace Nodes when it discovers that the Node is underutilized and could be changed to reduce cost + # If using `WhenEmpty`, Karpenter will only consider nodes for consolidation that contain no workload pods + consolidationPolicy: WhenUnderutilized | WhenEmpty + + # The amount of time Karpenter should wait after discovering a consolidation decision + # This value can currently only be set when the consolidationPolicy is 'WhenEmpty' + # You can choose to disable consolidation entirely by setting the string value 'Never' here + consolidateAfter: 30s + + # The amount of time a Node can live on the cluster before being removed + # Avoiding long-running Nodes helps to reduce security vulnerabilities as well as to reduce the chance of issues that can plague Nodes with long uptimes such as file fragmentation or memory leaks from system processes + # You can choose to disable expiration entirely by setting the string value 'Never' here + expireAfter: 720h + + # Budgets control the speed Karpenter can scale down nodes. + # Karpenter will respect the minimum of the currently active budgets, and will round up + # when considering percentages. Duration and Schedule must be set together. + budgets: + - nodes: 10% + # On Weekdays during business hours, don't do any deprovisioning. + - schedule: "0 9 * * mon-fri" + duration: 8h + nodes: "0" + + # Resource limits constrain the total size of the cluster. + # Limits prevent Karpenter from creating new instances once the limit is exceeded. + limits: + cpu: "1000" + memory: 1000Gi + + # Priority given to the NodePool when the scheduler considers which NodePool + # to select. Higher weights indicate higher priority when comparing NodePools. + # Specifying no weight is equivalent to specifying a weight of 0. + weight: 10 +``` + +## spec.template.spec.requirements + +Kubernetes defines the following [Well-Known Labels](https://kubernetes.io/docs/reference/labels-annotations-taints/), and cloud providers (e.g., AWS) implement them. They are defined at the "spec.requirements" section of the NodePool API. + +In addition to the well-known labels from Kubernetes, Karpenter supports AWS-specific labels for more advanced scheduling. See the full list [here](../scheduling/#well-known-labels). + +These well-known labels may be specified at the NodePool level, or in a workload definition (e.g., nodeSelector on a pod.spec). Nodes are chosen using both the NodePool's and pod's requirements. If there is no overlap, nodes will not be launched. In other words, a pod's requirements must be within the NodePool's requirements. If a requirement is not defined for a well known label, any value available to the cloud provider may be chosen. + +For example, an instance type may be specified using a nodeSelector in a pod spec. If the instance type requested is not included in the NodePool list and the NodePool has instance type requirements, Karpenter will not create a node or schedule the pod. + +### Instance Types + +- key: `node.kubernetes.io/instance-type` +- key: `karpenter.k8s.aws/instance-family` +- key: `karpenter.k8s.aws/instance-category` +- key: `karpenter.k8s.aws/instance-generation` + +Generally, instance types should be a list and not a single value. Leaving these requirements undefined is recommended, as it maximizes choices for efficiently placing pods. + +Review [AWS instance types](../instance-types). Most instance types are supported with the exclusion of [non-HVM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/virtualization_types.html). + +### Availability Zones + +- key: `topology.kubernetes.io/zone` +- value example: `us-east-1c` +- value list: `aws ec2 describe-availability-zones --region ` + +Karpenter can be configured to create nodes in a particular zone. Note that the Availability Zone `us-east-1a` for your AWS account might not have the same location as `us-east-1a` for another AWS account. + +[Learn more about Availability Zone +IDs.](https://docs.aws.amazon.com/ram/latest/userguide/working-with-az-ids.html) + +### Architecture + +- key: `kubernetes.io/arch` +- values + - `amd64` + - `arm64` + +Karpenter supports `amd64` nodes, and `arm64` nodes. + +### Operating System + - key: `kubernetes.io/os` + - values + - `linux` + - `windows` + +Karpenter supports `linux` and `windows` operating systems. + +### Capacity Type + +- key: `karpenter.sh/capacity-type` +- values + - `spot` + - `on-demand` + +Karpenter supports specifying capacity type, which is analogous to [EC2 purchase options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-purchasing-options.html). + +Karpenter prioritizes Spot offerings if the NodePool allows Spot and on-demand instances. If the provider API (e.g. EC2 Fleet's API) indicates Spot capacity is unavailable, Karpenter caches that result across all attempts to provision EC2 capacity for that instance type and zone for the next 45 seconds. If there are no other possible offerings available for Spot, Karpenter will attempt to provision on-demand instances, generally within milliseconds. + +Karpenter also allows `karpenter.sh/capacity-type` to be used as a topology key for enforcing topology-spread. + +{{% alert title="Recommended" color="primary" %}} +Karpenter allows you to be extremely flexible with your NodePools by only constraining your instance types in ways that are absolutely necessary for your cluster. By default, Karpenter will enforce that you specify the `spec.template.spec.requirements` field, but will not enforce that you specify any requirements within the field. If you choose to specify `requirements: []`, this means that you will completely flexible to _all_ instance types that your cloud provider supports. + +Though Karpenter doesn't enforce these defaults, for most use-cases, we recommend that you specify _some_ requirements to avoid odd behavior or exotic instance types. Below, is a high-level recommendation for requirements that should fit the majority of use-cases for generic workloads + +```yaml +spec: + template: + spec: + requirements: + - key: kubernetes.io/arch + operator: In + values: ["amd64"] + - key: kubernetes.io/os + operator: In + values: ["linux"] + - key: karpenter.sh/capacity-type + operator: In + values: ["on-demand"] + - key: karpenter.k8s.aws/instance-category + operator: In + values: ["c", "m", "r"] + - key: karpenter.k8s.aws/instance-generation + operator: Gt + values: ["2"] +``` + +{{% /alert %}} + +## spec.template.spec.nodeClassRef + +This field points to the Cloud Provider NodeClass resource. Learn more about [EC2NodeClasses]({{}}). + +## spec.template.spec.kubelet + +Karpenter provides the ability to specify a few additional Kubelet args. These are all optional and provide support for +additional customization and use cases. Adjust these only if you know you need to do so. For more details on kubelet configuration arguments, [see the KubeletConfiguration API specification docs](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). The implemented fields are a subset of the full list of upstream kubelet configuration arguments. Please cut an issue if you'd like to see another field implemented. + +```yaml +kubelet: + clusterDNS: ["10.0.1.100"] + systemReserved: + cpu: 100m + memory: 100Mi + ephemeral-storage: 1Gi + kubeReserved: + cpu: 200m + memory: 100Mi + ephemeral-storage: 3Gi + evictionHard: + memory.available: 5% + nodefs.available: 10% + nodefs.inodesFree: 10% + evictionSoft: + memory.available: 500Mi + nodefs.available: 15% + nodefs.inodesFree: 15% + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + evictionMaxPodGracePeriod: 60 + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + cpuCFSQuota: true + podsPerCore: 2 + maxPods: 20 +``` + +### Reserved Resources + +Karpenter will automatically configure the system and kube reserved resource requests on the fly on your behalf. These requests are used to configure your node and to make scheduling decisions for your pods. If you have specific requirements or know that you will have additional capacity requirements, you can optionally override the `--system-reserved` configuration defaults with the `.spec.template.spec.kubelet.systemReserved` values and the `--kube-reserved` configuration defaults with the `.spec.template.spec.kubelet.kubeReserved` values. + +For more information on the default `--system-reserved` and `--kube-reserved` configuration refer to the [Kubelet Docs](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#kube-reserved) + +### Eviction Thresholds + +The kubelet supports eviction thresholds by default. When enough memory or file system pressure is exerted on the node, the kubelet will begin to evict pods to ensure that system daemons and other system processes can continue to run in a healthy manner. + +Kubelet has the notion of [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds). In hard evictions, pods are evicted as soon as a threshold is met, with no grace period to terminate. Soft evictions, on the other hand, provide an opportunity for pods to be terminated gracefully. They do so by sending a termination signal to pods that are planning to be evicted and allowing those pods to terminate up to their grace period. + +Karpenter supports [hard evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#hard-eviction-thresholds) through the `.spec.template.spec.kubelet.evictionHard` field and [soft evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#soft-eviction-thresholds) through the `.spec.template.spec.kubelet.evictionSoft` field. `evictionHard` and `evictionSoft` are configured by listing [signal names](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals) with either percentage values or resource values. + +```yaml +kubelet: + evictionHard: + memory.available: 500Mi + nodefs.available: 10% + nodefs.inodesFree: 10% + imagefs.available: 5% + imagefs.inodesFree: 5% + pid.available: 7% + evictionSoft: + memory.available: 1Gi + nodefs.available: 15% + nodefs.inodesFree: 15% + imagefs.available: 10% + imagefs.inodesFree: 10% + pid.available: 10% +``` + +#### Supported Eviction Signals + +| Eviction Signal | Description | +|--------------------|---------------------------------------------------------------------------------| +| memory.available | memory.available := node.status.capacity[memory] - node.stats.memory.workingSet | +| nodefs.available | nodefs.available := node.stats.fs.available | +| nodefs.inodesFree | nodefs.inodesFree := node.stats.fs.inodesFree | +| imagefs.available | imagefs.available := node.stats.runtime.imagefs.available | +| imagefs.inodesFree | imagefs.inodesFree := node.stats.runtime.imagefs.inodesFree | +| pid.available | pid.available := node.stats.rlimit.maxpid - node.stats.rlimit.curproc | + +For more information on eviction thresholds, view the [Node-pressure Eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction) section of the official Kubernetes docs. + +#### Soft Eviction Grace Periods + +Soft eviction pairs an eviction threshold with a specified grace period. With soft eviction thresholds, the kubelet will only begin evicting pods when the node exceeds its soft eviction threshold over the entire duration of its grace period. For example, if you specify `evictionSoft[memory.available]` of `500Mi` and a `evictionSoftGracePeriod[memory.available]` of `1m30`, the node must have less than `500Mi` of available memory over a minute and a half in order for the kubelet to begin evicting pods. + +Optionally, you can specify an `evictionMaxPodGracePeriod` which defines the administrator-specified maximum pod termination grace period to use during soft eviction. If a namespace-owner had specified a pod `terminationGracePeriodInSeconds` on pods in their namespace, the minimum of `evictionPodGracePeriod` and `terminationGracePeriodInSeconds` would be used. + +```yaml +kubelet: + evictionSoftGracePeriod: + memory.available: 1m + nodefs.available: 1m30s + nodefs.inodesFree: 2m + imagefs.available: 1m30s + imagefs.inodesFree: 2m + pid.available: 2m + evictionMaxPodGracePeriod: 60 +``` + +### Pod Density + +By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. + +{{% alert title="Note" color="primary" %}} +By default, the VPC CNI allocates IPs for a node and pods from the same subnet. With [VPC CNI Custom Networking](https://aws.github.io/aws-eks-best-practices/networking/custom-networking), the pods will receive IP addresses from another subnet dedicated to pod IPs. This approach makes it easier to manage IP addresses and allows for separate Network Access Control Lists (NACLs) applied to your pods. VPC CNI Custom Networking reduces the pod density of a node since one of the ENI attachments will be used for the node and cannot share the allocated IPs on the interface to pods. Karpenter supports VPC CNI Custom Networking and similar CNI setups where the primary node interface is separated from the pods interfaces through a global [setting](./settings.md#configmap) within the karpenter-global-settings configmap: `aws.reservedENIs`. In the common case, `aws.reservedENIs` should be set to `"1"` if using Custom Networking. +{{% /alert %}} + +{{% alert title="Windows Support Notice" color="warning" %}} +It's currently not possible to specify custom networking with Windows nodes. +{{% /alert %}} + +#### Max Pods + +For small instances that require an increased pod density or large instances that require a reduced pod density, you can override this default value with `.spec.template.spec.kubelet.maxPods`. This value will be used during Karpenter pod scheduling and passed through to `--max-pods` on kubelet startup. + +{{% alert title="Note" color="primary" %}} +When using small instance types, it may be necessary to enable [prefix assignment mode](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/) in the AWS VPC CNI plugin to support a higher pod density per node. Prefix assignment mode was introduced in AWS VPC CNI v1.9 and allows ENIs to manage a broader set of IP addresses. Much higher pod densities are supported as a result. +{{% /alert %}} + +{{% alert title="Windows Support Notice" color="warning" %}} +Presently, Windows worker nodes do not support using more than one ENI. +As a consequence, the number of IP addresses, and subsequently, the number of pods that a Windows worker node can support is limited by the number of IPv4 addresses available on the primary ENI. +Currently, Karpenter will only consider individual secondary IP addresses when calculating the pod density limit. +{{% /alert %}} + +#### Pods Per Core + +An alternative way to dynamically set the maximum density of pods on a node is to use the `.spec.template.spec.kubelet.podsPerCore` value. Karpenter will calculate the pod density during scheduling by multiplying this value by the number of logical cores (vCPUs) on an instance type. This value will also be passed through to the `--pods-per-core` value on kubelet startup to configure the number of allocatable pods the kubelet can assign to the node instance. + +The value generated from `podsPerCore` cannot exceed `maxPods`, meaning, if both are set, the minimum of the `podsPerCore` dynamic pod density and the static `maxPods` value will be used for scheduling. + +{{% alert title="Note" color="primary" %}} +`maxPods` may not be set in the `kubelet` of a NodePool, but may still be restricted by the `ENI_LIMITED_POD_DENSITY` value. You may want to ensure that the `podsPerCore` value that will be used for instance families associated with the NodePool will not cause unexpected behavior by exceeding the `maxPods` value. +{{% /alert %}} + +{{% alert title="Pods Per Core on Bottlerocket" color="warning" %}} +Bottlerocket AMIFamily currently does not support `podsPerCore` configuration. If a NodePool contains a `provider` or `providerRef` to a node template that will launch a Bottlerocket instance, the `podsPerCore` value will be ignored for scheduling and for configuring the kubelet. +{{% /alert %}} + +## spec.disruption + +You can configure Karpenter to disrupt Nodes through your NodePool in multiple ways. You can use `spec.disruption.consolidationPolicy`, `spec.disruption.consolidateAfter` or `spec.disruption.expireAfter`. Read [Disruption]({{}}) for more. + +## spec.limits + +The NodePool spec includes a limits section (`spec.limits`), which constrains the maximum amount of resources that the NodePool will manage. + +Karpenter supports limits of any resource type reported by your cloudprovider. It limits instance types when scheduling to those that will not exceed the specified limits. If a limit has been exceeded, nodes provisioning is prevented until some nodes have been terminated. + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + template: + spec: + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["spot"] + limits: + cpu: 1000 + memory: 1000Gi + nvidia.com/gpu: 2 +``` + +{{% alert title="Note" color="primary" %}} +Karpenter provisioning is highly parallel. Because of this, limit checking is eventually consistent, which can result in overrun during rapid scale outs. +{{% /alert %}} + +CPU limits are described with a `DecimalSI` value. Note that the Kubernetes API will coerce this into a string, so we recommend against using integers to avoid GitOps skew. + +Memory limits are described with a [`BinarySI` value, such as 1000Gi.](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) + +You can view the current consumption of cpu and memory on your cluster by running: +``` +kubectl get nodepool -o=jsonpath='{.items[0].status}' +``` + +Review the [Kubernetes core API](https://github.com/kubernetes/api/blob/37748cca582229600a3599b40e9a82a951d8bbbf/core/v1/resource.go#L23) (`k8s.io/api/core/v1`) for more information on `resources`. + +## spec.weight + +Karpenter allows you to describe NodePool preferences through a `weight` mechanism similar to how weight is described with [pod and node affinities](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). + +For more information on weighting NodePools, see the [Weighting NodePools section]({{}}) in the scheduling details. + +## Examples + +### Isolating Expensive Hardware + +A NodePool can be set up to only provision nodes on particular processor types. +The following example sets a taint that only allows pods with tolerations for Nvidia GPUs to be scheduled: + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: gpu +spec: + disruption: + consolidationPolicy: WhenUnderutilized + template: + spec: + requirements: + - key: node.kubernetes.io/instance-type + operator: In + values: ["p3.8xlarge", "p3.16xlarge"] + taints: + - key: nvidia.com/gpu + value: "true" + effect: NoSchedule +``` +In order for a pod to run on a node defined in this NodePool, it must tolerate `nvidia.com/gpu` in its pod spec. + +### Cilium Startup Taint + +Per the Cilium [docs](https://docs.cilium.io/en/stable/installation/taints/#taint-effects), it's recommended to place a taint of `node.cilium.io/agent-not-ready=true:NoExecute` on nodes to allow Cilium to configure networking prior to other pods starting. This can be accomplished via the use of Karpenter `startupTaints`. These taints are placed on the node, but pods aren't required to tolerate these taints to be considered for provisioning. + +```yaml +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: cilium-startup +spec: + disruption: + consolidationPolicy: WhenUnderutilized + template: + spec: + startupTaints: + - key: node.cilium.io/agent-not-ready + value: "true" + effect: NoExecute +``` diff --git a/website/content/en/v0.31/concepts/scheduling.md b/website/content/en/v0.35/concepts/scheduling.md similarity index 81% rename from website/content/en/v0.31/concepts/scheduling.md rename to website/content/en/v0.35/concepts/scheduling.md index b81d8fcade3f..08947b395b93 100755 --- a/website/content/en/v0.31/concepts/scheduling.md +++ b/website/content/en/v0.35/concepts/scheduling.md @@ -15,10 +15,10 @@ Reasons for constraining where your pods run could include: * Wanting to use techniques like topology spread to help ensure high availability Your Cloud Provider defines the first layer of constraints, including all instance types, architectures, zones, and purchase types available to its cloud. -The cluster administrator adds the next layer of constraints by creating one or more provisioners. +The cluster administrator adds the next layer of constraints by creating one or more NodePools. The final layer comes from you adding specifications to your Kubernetes pod deployments. -Pod scheduling constraints must fall within a provisioner's constraints or the pods will not deploy. -For example, if the provisioner sets limits that allow only a particular zone to be used, and a pod asks for a different zone, it will not be scheduled. +Pod scheduling constraints must fall within a NodePool's constraints or the pods will not deploy. +For example, if the NodePool sets limits that allow only a particular zone to be used, and a pod asks for a different zone, it will not be scheduled. Constraints you can request include: @@ -62,7 +62,7 @@ Its limits are set to 256MiB of memory and 1 CPU. Instance type selection math only uses `requests`, but `limits` may be configured to enable resource oversubscription. -See [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for details on resource types supported by Kubernetes, [Specify a memory request and a memory limit](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/#specify-a-memory-request-and-a-memory-limit) for examples of memory requests, and [Provisioner Configuration]({{}}) for a list of supported resources. +See [Managing Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for details on resource types supported by Kubernetes, [Specify a memory request and a memory limit](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/#specify-a-memory-request-and-a-memory-limit) for examples of memory requests, and [NodePools]({{}}) for a list of supported resources. ### Accelerators/GPU Resources @@ -129,7 +129,7 @@ This can include well-known labels or custom labels you create yourself. You can use `affinity` to define more complicated constraints, see [Node Affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) for the complete specification. ### Labels -Well-known labels may be specified as provisioner requirements or pod scheduling constraints. You can also define your own custom labels by specifying `requirements` or `labels` on your Provisioner and select them using `nodeAffinity` or `nodeSelectors` on your Pods. +Well-known labels may be specified as NodePool requirements or pod scheduling constraints. You can also define your own custom labels by specifying `requirements` or `labels` on your NodePool and select them using `nodeAffinity` or `nodeSelectors` on your Pods. {{% alert title="Warning" color="warning" %}} Take care to ensure the label domains are correct. A well known label like `karpenter.k8s.aws/instance-family` will enforce node properties, but may be confused with `node.kubernetes.io/instance-family`, which is unknown to Karpenter, and treated as a custom label which will not enforce node properties. @@ -167,7 +167,7 @@ Karpenter translates the following deprecated labels to their stable equivalents #### User-Defined Labels -Karpenter is aware of several well-known labels, deriving them from instance type details. If you specify a `nodeSelector` or a required `nodeAffinity` using a label that is not well-known to Karpenter, it will not launch nodes with these labels and pods will remain pending. For Karpenter to become aware that it can schedule for these labels, you must specify the label in the Provisioner requirements with the `Exists` operator: +Karpenter is aware of several well-known labels, deriving them from instance type details. If you specify a `nodeSelector` or a required `nodeAffinity` using a label that is not well-known to Karpenter, it will not launch nodes with these labels and pods will remain pending. For Karpenter to become aware that it can schedule for these labels, you must specify the label in the NodePool requirements with the `Exists` operator: ```yaml requirements: @@ -186,7 +186,7 @@ nodeSelector: ``` This example features a well-known label (`topology.kubernetes.io/zone`) and a label that is well known to Karpenter (`karpenter.sh/capacity-type`). -If you want to create a custom label, you should do that at the provisioner level. +If you want to create a custom label, you should do that at the NodePool level. Then the pod can declare that custom label. @@ -204,8 +204,7 @@ When setting rules, the following Node affinity types define how hard or soft ea The `IgnoredDuringExecution` part of each tells the pod to keep running, even if conditions change on the node so the rules no longer matched. You can think of these concepts as `required` and `preferred`, since Kubernetes never implemented other variants of these rules. -All examples below assume that the provisioner doesn't have constraints to prevent those zones from being used. -The first constraint says you could use `us-west-2a` or `us-west-2b`, the second constraint makes it so only `us-west-2b` can be used. +All examples below assume that the NodePool doesn't have constraints to prevent those zones from being used. The first constraint says you could use `us-west-2a` or `us-west-2b`, the second constraint makes it so only `us-west-2b` can be used. ```yaml affinity: @@ -265,28 +264,28 @@ So if capacity becomes available, it will schedule the pod without user interven ## Taints and tolerations Taints are the opposite of affinity. -Setting a taint on a node tells the scheduler to not run a pod on it unless the pod has explicitly said it can tolerate that taint. -This example shows a Provisioner that was set up with a taint for only running pods that require a GPU, such as the following: - +Setting a taint on a node tells the scheduler to not run a pod on it unless the pod has explicitly said it can tolerate that taint. This example shows a NodePool that was set up with a taint for only running pods that require a GPU, such as the following: ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: gpu spec: - requirements: - - key: karpenter.k8s.aws/instance-family - operator: In - values: - - p3 - taints: - - key: nvidia.com/gpu - value: true - effect: "NoSchedule" + template: + spec: + requirements: + - key: karpenter.k8s.aws/instance-family + operator: In + values: + - p3 + taints: + - key: nvidia.com/gpu + value: true + effect: "NoSchedule" ``` -For a pod to request to run on a node that has provisioner, it could set a toleration as follows: +For a pod to request to run on a node that has this NodePool, it could set a toleration as follows: ```yaml apiVersion: v1 @@ -311,8 +310,7 @@ See [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-evic ## Topology Spread -By using the Kubernetes `topologySpreadConstraints` you can ask the provisioner to have pods push away from each other to limit the blast radius of an outage. -Think of it as the Kubernetes evolution for pod affinity: it lets you relate pods with respect to nodes while still allowing spread. +By using the Kubernetes `topologySpreadConstraints` you can ask the NodePool to have pods push away from each other to limit the blast radius of an outage. Think of it as the Kubernetes evolution for pod affinity: it lets you relate pods with respect to nodes while still allowing spread. For example: ```yaml @@ -351,7 +349,6 @@ The three supported `topologyKey` values that Karpenter supports are: - `kubernetes.io/hostname` - `karpenter.sh/capacity-type` - See [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for details. {{% alert title="Note" color="primary" %}} @@ -360,7 +357,7 @@ NodePools do not attempt to balance or rebalance the availability zones for thei ## Pod affinity/anti-affinity -By using the `podAffinity` and `podAntiAffinity` configuration on a pod spec, you can inform the provisioner of your desire for pods to schedule together or apart with respect to different topology domains. For example: +By using the `podAffinity` and `podAntiAffinity` configuration on a pod spec, you can inform the Karpenter scheduler of your desire for pods to schedule together or apart with respect to different topology domains. For example: ```yaml spec: @@ -445,167 +442,174 @@ The EBS CSI driver uses `topology.ebs.csi.aws.com/zone` instead of the standard The topology key `topology.kubernetes.io/region` is not supported. Legacy in-tree CSI providers specify this label. Instead, install an out-of-tree CSI provider. [Learn more about moving to CSI providers.](https://kubernetes.io/blog/2021/12/10/storage-in-tree-to-csi-migration-status-update/#quick-recap-what-is-csi-migration-and-why-migrate) {{% /alert %}} -## Weighting Provisioners +## Weighted NodePools -Karpenter allows you to order your provisioners using the `.spec.weight` field so that the node scheduler will deterministically attempt to schedule with one provisioner before another. Below are a few example use-cases that are now supported with the provisioner weighting semantic. +Karpenter allows you to order your NodePools using the `.spec.weight` field so that the Karpenter scheduler will attempt to schedule one NodePool before another. ### Savings Plans and Reserved Instances If you have purchased a [Savings Plan](https://aws.amazon.com/savingsplans/) or [Reserved Instances](https://aws.amazon.com/ec2/pricing/reserved-instances/), you may want to tell Karpenter to prioritize this reserved capacity ahead of other instance types. -To enable this, you will need to tell the Karpenter controllers which instance types to prioritize and what is the maximum amount of capacity that should be provisioned using those instance types. We can set the `.spec.limits` on the provisioner to limit the capacity that can be launched by this provisioner. Combined with the `.spec.weight` value, we can tell Karpenter to pull from instance types in the reserved provisioner before defaulting to generic instance types. - -#### Reserved Instance Provisioner +To enable this, you will need to tell the Karpenter controllers which instance types to prioritize and what is the maximum amount of capacity that should be provisioned using those instance types. We can set the `.spec.limits` field on the NodePool to limit the capacity that can be launched by this NodePool. Combined with the `.spec.weight` value, we can tell Karpenter to pull from instance types in the reserved NodePool before defaulting to generic instance types. ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: reserved-instance spec: weight: 50 - requirements: - - key: "node.kubernetes.io/instance-type" - operator: In - values: ["c4.large"] limits: - resources: - cpu: 100 -``` - -#### Default Provisioner - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner + cpu: 100 + template: + spec: + requirements: + - key: "node.kubernetes.io/instance-type" + operator: In + values: ["c4.large"] +--- +apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: default spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["spot", "on-demand"] - - key: kubernetes.io/arch - operator: In - values: ["amd64"] + template: + spec: + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["spot", "on-demand"] + - key: kubernetes.io/arch + operator: In + values: ["amd64"] ``` -### Default Node Configuration +### Fallback Pods that do not specify node selectors or affinities can potentially be assigned to any node with any configuration. There may be cases where you require these pods to schedule to a specific capacity type or architecture but assigning the relevant node selectors or affinities to all these workload pods may be too tedious or infeasible. Instead, we want to define a cluster-wide default configuration for nodes launched using Karpenter. -By assigning a higher `.spec.weight` value and restricting a provisioner to a specific capacity type or architecture, we can set default configuration for the nodes launched by pods that don't have node configuration restrictions. - -#### Default Provisioner +By assigning a higher `.spec.weight` value and restricting a NodePool to a specific capacity type or architecture, we can set default configuration for the nodes launched by pods that don't have node configuration restrictions. ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: default spec: weight: 50 - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["spot", "on-demand"] - - key: kubernetes.io/arch - operator: In - values: ["amd64"] -``` - -#### ARM-64 Specific Provisioner - -```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner + template: + spec: + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["spot", "on-demand"] + - key: kubernetes.io/arch + operator: In + values: ["amd64"] +--- +apiVersion: karpenter.sh/v1beta1 +kind: NodePool metadata: name: arm64-specific spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["spot", "on-demand"] - - key: kubernetes.io/arch - operator: In - values: ["arm64"] - - key: node.kubernetes.io/instance-type - operator: In - values: ["a1.large", "a1.xlarge"] + template: + spec: + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["spot", "on-demand"] + - key: kubernetes.io/arch + operator: In + values: ["arm64"] + - key: node.kubernetes.io/instance-type + operator: In + values: ["a1.large", "a1.xlarge"] ``` {{% alert title="Note" color="primary" %}} -Based on the way that Karpenter performs pod batching and bin packing, it is not guaranteed that Karpenter will always choose the highest priority provisioner given specific requirements. For example, if a pod can't be scheduled with the highest priority provisioner it will force creation of a node using a lower priority provisioner which may allow other pods from that batch to also schedule on that node. The behavior may also occur if existing capacity is available, as the kube-scheduler will schedule the pods instead of allowing Karpenter to provision a new node. +Based on the way that Karpenter performs pod batching and bin packing, it is not guaranteed that Karpenter will always choose the highest priority NodePool given specific requirements. For example, if a pod can't be scheduled with the highest priority NodePool, it will force creation of a node using a lower priority NodePool, allowing other pods from that batch to also schedule on that node. The behavior may also occur if existing capacity is available, as the kube-scheduler will schedule the pods instead of allowing Karpenter to provision a new node. {{% /alert %}} ## Advanced Scheduling Techniques ### `Exists` Operator -The `Exists` operator can be used on a provisioner to provide workload segregation across nodes. +The `Exists` operator can be used on a NodePool to provide workload segregation across nodes. ```yaml ... - requirements: - - key: company.com/team - operator: Exists +requirements: +- key: company.com/team + operator: Exists ... ``` -With the requirement on the provisioner in place, workloads can optionally specify a custom value as a required node affinity or node selector. Karpenter will then label the nodes it launches for these pods which prevents `kube-scheduler` from scheduling conflicting pods to those nodes. This provides a way to more dynamically isolate workloads without requiring a unique provisioner for each workload subset. +With the requirement on the NodePool, workloads can optionally specify a custom value as a required node affinity or node selector. Karpenter will then label the nodes it launches for these pods which prevents `kube-scheduler` from scheduling conflicting pods to those nodes. This provides a way to more dynamically isolate workloads without requiring a unique NodePool for each workload subset. ```yaml - nodeSelector: - company.com/team: team-a +nodeSelector: + company.com/team: team-a ``` {{% alert title="Note" color="primary" %}} -If a workload matches the provisioner but doesn't specify a label, Karpenter will generate a random label for the node. +If a workload matches the NodePool but doesn't specify a label, Karpenter will generate a random label for the node. {{% /alert %}} ### On-Demand/Spot Ratio Split Taking advantage of Karpenter's ability to assign labels to node and using a topology spread across those labels enables a crude method for splitting a workload across on-demand and spot instances in a desired ratio. -To do this, we create a provisioner each for spot and on-demand with disjoint values for a unique new label called `capacity-spread`. In the example below, we provide four unique values for the spot provisioner and one value for the on-demand provisioner. When we spread across our new label evenly, we'll end up with a ratio of 4:1 spot to on-demand nodes. +To do this, we create one NodePool each for spot and on-demand with disjoint values for a unique new label called `capacity-spread`. In the example below, we provide four unique values for the spot NodePool and one value for the on-demand NodePool. When we spread across our new label evenly, we'll end up with a ratio of 4:1 spot to on-demand nodes. {{% alert title="Warning" color="warning" %}} This is not identical to a topology spread with a specified ratio. We are constructing 'virtual domains' to spread evenly across and the ratio of those 'virtual domains' to spot and on-demand happen to coincide with the desired spot to on-demand ratio. As an example, if you launch pods using the provided example, Karpenter will launch nodes with `capacity-spread` labels of 1, 2, 3, 4, and 5. `kube-scheduler` will then schedule evenly across those nodes to give the desired ratio. {{% /alert %}} -#### Spot Provisioner -```yaml - requirements: - - key: "karpenter.sh/capacity-type" - operator: In - values: [ "spot"] - - key: capacity-spread - operator: In - values: - - "2" - - "3" - - "4" - - "5" -``` +#### NodePools -#### On-Demand Provisioner ```yaml - requirements: - - key: "karpenter.sh/capacity-type" - operator: In - values: [ "on-demand"] - - key: capacity-spread - operator: In - values: - - "1" +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: spot +spec: + template: + spec: + requirements: + - key: "karpenter.sh/capacity-type" + operator: In + values: ["spot"] + - key: capacity-spread + operator: In + values: + - "2" + - "3" + - "4" + - "5" +--- +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: on-demand +spec: + template: + spec: + requirements: + - key: "karpenter.sh/capacity-type" + operator: In + values: ["on-demand"] + - key: capacity-spread + operator: In + values: + - "1" ``` #### Workload Topology Spread Constraint ```yaml topologySpreadConstraints: - - maxSkew: 1 - topologyKey: capacity-spread - whenUnsatisfiable: DoNotSchedule - labelSelector: - ... +- maxSkew: 1 + topologyKey: capacity-spread + whenUnsatisfiable: DoNotSchedule + labelSelector: + ... ``` diff --git a/website/content/en/v0.31/contributing/_index.md b/website/content/en/v0.35/contributing/_index.md similarity index 90% rename from website/content/en/v0.31/contributing/_index.md rename to website/content/en/v0.35/contributing/_index.md index 10bb749d39dc..6ec2c3df504e 100644 --- a/website/content/en/v0.31/contributing/_index.md +++ b/website/content/en/v0.35/contributing/_index.md @@ -1,7 +1,7 @@ --- title: "Contributing" linkTitle: "Contributing" -weight: 100 +weight: 40 description: > Learn about how to contribute to Karpenter --- diff --git a/website/content/en/v0.31/contributing/design-guide.md b/website/content/en/v0.35/contributing/design-guide.md similarity index 100% rename from website/content/en/v0.31/contributing/design-guide.md rename to website/content/en/v0.35/contributing/design-guide.md diff --git a/website/content/en/v0.31/contributing/development-guide.md b/website/content/en/v0.35/contributing/development-guide.md similarity index 94% rename from website/content/en/v0.31/contributing/development-guide.md rename to website/content/en/v0.35/contributing/development-guide.md index 8eab45dd2475..2afbb4d2ead7 100644 --- a/website/content/en/v0.31/contributing/development-guide.md +++ b/website/content/en/v0.35/contributing/development-guide.md @@ -73,9 +73,10 @@ make test # E2E correctness tests ### Change Log Level +By default, `make apply` will set the log level to debug. You can change the log level by setting the log level in your Helm values. + ```bash -kubectl patch configmap config-logging -n karpenter --patch '{"data":{"loglevel.controller":"debug"}}' # Debug Level -kubectl patch configmap config-logging -n karpenter --patch '{"data":{"loglevel.controller":"info"}}' # Info Level +--set logLevel=debug ``` ### Debugging Metrics @@ -105,8 +106,8 @@ stern -n karpenter -l app.kubernetes.io/name=karpenter ### AWS For local development on Karpenter you will need a Docker repo which can manage your images for Karpenter components. -You can use the following command to provision an ECR repository. We recommend using a single "dev" repository for -development across multiple projects, and to use specific image hashes instead of image tags. +You can use the following command to provision an ECR repository. We recommend using a single "dev" repository for +development across multiple projects, and to use specific image hashes instead of image tags. ```bash aws ecr create-repository \ diff --git a/website/content/en/v0.31/contributing/working-group.md b/website/content/en/v0.35/contributing/working-group.md similarity index 100% rename from website/content/en/v0.31/contributing/working-group.md rename to website/content/en/v0.35/contributing/working-group.md diff --git a/website/content/en/v0.31/faq.md b/website/content/en/v0.35/faq.md similarity index 50% rename from website/content/en/v0.31/faq.md rename to website/content/en/v0.35/faq.md index b5e0a5af2586..f068290c7626 100644 --- a/website/content/en/v0.31/faq.md +++ b/website/content/en/v0.35/faq.md @@ -1,101 +1,85 @@ --- title: "FAQs" linkTitle: "FAQs" -weight: 90 +weight: 60 description: > Review Karpenter Frequently Asked Questions --- ## General -### How does a provisioner decide to manage a particular node? -See [Configuring provisioners]({{< ref "./concepts/#configuring-provisioners" >}}) for information on how Karpenter provisions and manages nodes. +### How does a NodePool decide to manage a particular node? +See [Configuring NodePools]({{< ref "./concepts/#configuring-nodepools" >}}) for information on how Karpenter configures and manages nodes. ### What cloud providers are supported? AWS is the first cloud provider supported by Karpenter, although it is designed to be used with other cloud providers as well. ### Can I write my own cloud provider for Karpenter? -Yes, but there is no documentation yet for it. -Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.31.4/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. +Yes, but there is no documentation yet for it. Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.35.0/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too. ### What operating system nodes does Karpenter deploy? -By default, Karpenter uses Amazon Linux 2 images. +Karpenter uses the OS defined by the [AMI Family in your EC2NodeClass]({{< ref "./concepts/nodeclasses#specamifamily" >}}). ### Can I provide my own custom operating system images? -Karpenter has multiple mechanisms for configuring the [operating system]({{< ref "./concepts/node-templates/#spec-amiselector" >}}) for your nodes. +Karpenter has multiple mechanisms for configuring the [operating system]({{< ref "./concepts/nodeclasses/#specamiselectorterms" >}}) for your nodes. ### Can Karpenter deal with workloads for mixed architecture cluster (arm vs. amd)? -Karpenter is flexible to multi architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}). +Karpenter is flexible to multi-architecture configurations using [well known labels]({{< ref "./concepts/scheduling/#supported-labels">}}). ### What RBAC access is required? -All of the required RBAC rules can be found in the helm chart template. -See [clusterrolebinding.yaml](https://github.com/aws/karpenter/blob/v0.31.4/charts/karpenter/templates/clusterrolebinding.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.31.4/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.31.4/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.31.4/charts/karpenter/templates/role.yaml) files for details. +All the required RBAC rules can be found in the Helm chart template. See [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole-core.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/role.yaml) files for details. ### Can I run Karpenter outside of a Kubernetes cluster? Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API. +### What do I do if I encounter a security issue with Karpenter? +Refer to [Reporting Security Issues](https://github.com/aws/karpenter/security/policy) for information on how to report Karpenter security issues. Do not create a public GitHub issue. + ## Compatibility ### Which versions of Kubernetes does Karpenter support? -See the [Compatibility Matrix in the Upgrade Guide]({{< ref "./upgrade-guide#compatibility-matrix" >}}) to view the supported Kubernetes versions per Karpenter released version. +See the [Compatibility Matrix in the Upgrade Section]({{< ref "./upgrading/compatibility#compatibility-matrix" >}}) to view the supported Kubernetes versions per Karpenter released version. ### What Kubernetes distributions are supported? -Karpenter documents integration with a fresh or existing install of the latest AWS Elastic Kubernetes Service (EKS). -Other Kubernetes distributions (KOPs, etc.) can be used, but setting up cloud provider permissions for those distributions has not been documented. +Karpenter documents integration with a fresh or existing installation of the latest AWS Elastic Kubernetes Service (EKS). Other Kubernetes distributions (KOPs, etc.) can be used, but setting up cloud provider permissions for those distributions has not been documented. ### How does Karpenter interact with AWS node group features? -Provisioners are designed to work alongside static capacity management solutions like EKS Managed Node Groups and EC2 Auto Scaling Groups. -You can manage all capacity using provisioners, use a mixed model with dynamic and statically managed capacity, or use a fully static approach. -We expect most users will use a mixed approach in the near term and provisioner-managed in the long term. +NodePools are designed to work alongside static capacity management solutions like EKS Managed Node Groups and EC2 Auto Scaling Groups. You can manage all capacity using NodePools, use a mixed model with dynamic and statically managed capacity, or use a fully static approach. We expect most users will use a mixed approach in the near term and NodePool-managed in the long term. ### How does Karpenter interact with Kubernetes features? -* Kubernetes Cluster Autoscaler: Karpenter can work alongside cluster autoscaler. -See [Kubernetes cluster autoscaler]({{< ref "./concepts/#kubernetes-cluster-autoscaler" >}}) for details. -* Kubernetes Scheduler: Karpenter focuses on scheduling pods that the Kubernetes scheduler has marked as unschedulable. -See [Scheduling]({{< ref "./concepts/scheduling" >}}) for details on how Karpenter interacts with the Kubernetes scheduler. +* Kubernetes Cluster Autoscaler: Karpenter can work alongside Cluster Autoscaler. See [Kubernetes Cluster Autoscaler]({{< ref "./concepts/#kubernetes-cluster-autoscaler" >}}) for details. +* Kubernetes Scheduler: Karpenter focuses on scheduling pods that the Kubernetes scheduler has marked as unschedulable. See [Scheduling]({{< ref "./concepts/scheduling" >}}) for details on how Karpenter interacts with the Kubernetes scheduler. ## Provisioning -### What features does the Karpenter provisioner support? -See [Provisioner API]({{< ref "./concepts/provisioners" >}}) for provisioner examples and descriptions of features. +### What features does the Karpenter NodePool support? +See the [NodePool API docs]({{< ref "./concepts/nodepools" >}}) for NodePool examples and descriptions of features. -### Can I create multiple (team-based) provisioners on a cluster? -Yes, provisioners can identify multiple teams based on labels. -See [Provisioner API]({{< ref "./concepts/provisioners" >}}) for details. +### Can I create multiple (team-based) NodePools on a cluster? +Yes, NodePools can identify multiple teams based on labels. See the [NodePool API docs]({{< ref "./concepts/nodepools" >}}) for details. -### If multiple provisioners are defined, which will my pod use? +### If multiple NodePools are defined, which will my pod use? -Pending pods will be handled by any Provisioner that matches the requirements of the pod. -There is no ordering guarantee if multiple provisioners match pod requirements. -We recommend that Provisioners are setup to be mutually exclusive. -Read more about this recommendation in the [EKS Best Practices Guide for Karpenter](https://aws.github.io/aws-eks-best-practices/karpenter/#create-provisioners-that-are-mutually-exclusive). -To select a specific provisioner, use the node selector `karpenter.sh/provisioner-name: my-provisioner`. +Pending pods will be handled by any NodePools that matches the requirements of the pod. There is no ordering guarantee if multiple NodePools match pod requirements. We recommend that NodePools are set-up to be mutually exclusive. To select a specific NodePool, use the node selector `karpenter.sh/nodepool: my-nodepool`. ### How can I configure Karpenter to only provision pods for a particular namespace? -There is no native support for namespaced based provisioning. -Karpenter can be configured to provision a subset of pods based on a combination of taints/tolerations and node selectors. -This allows Karpenter to work in concert with the `kube-scheduler` in that the same mechanisms that `kube-scheduler` uses to determine if a pod can schedule to an existing node are also used for provisioning new nodes. -This avoids scenarios where pods are bound to nodes that were provisioned by Karpenter which Karpenter would not have bound itself. -If this were to occur, a node could remain non-empty and have its lifetime extended due to a pod that wouldn't have caused the node to be provisioned had the pod been unschedulable. +There is no native support for namespaced-based provisioning. Karpenter can be configured to provision a subset of pods based on a combination of taints/tolerations and node selectors. This allows Karpenter to work in concert with the `kube-scheduler` using the same mechanisms to determine if a pod can schedule to an existing node are also used for provisioning new nodes. This avoids scenarios where pods are bound to nodes that were provisioned by Karpenter which Karpenter would not have bound itself. If this were to occur, a node could remain non-empty and have its lifetime extended due to a pod that wouldn't have caused the node to be provisioned had the pod been unschedulable. -We recommend using Kubernetes native scheduling constraints to achieve namespace based scheduling segregation. Using native scheduling constraints ensures that Karpenter, `kube-scheduler` and any other scheduling or auto-provisioning mechanism all have an identical understanding of which pods can be scheduled on which nodes. This can be enforced via policy agents, an example of which can be seen [here](https://blog.mikesir87.io/2022/01/creating-tenant-node-pools-with-karpenter/). +We recommend using Kubernetes native scheduling constraints to achieve namespace-based scheduling segregation. Using native scheduling constraints ensures that Karpenter, `kube-scheduler` and any other scheduling or auto-provisioning mechanism all have an identical understanding of which pods can be scheduled on which nodes. This can be enforced via policy agents, an example of which can be seen [here](https://blog.mikesir87.io/2022/01/creating-tenant-node-pools-with-karpenter/). -### Can I add SSH keys to a provisioner? +### Can I add SSH keys to a NodePool? -Karpenter does not offer a way to add SSH keys via provisioners or secrets to the nodes it manages. -However, you can use Session Manager (SSM) or EC2 Instance Connect to gain shell access to Karpenter nodes. -See [Node NotReady]({{< ref "./troubleshooting/#node-notready" >}}) troubleshooting for an example of starting an SSM session from the command line or [EC2 Instance Connect](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-connect-set-up.html) documentation to connect to nodes using SSH. +Karpenter does not offer a way to add SSH keys via NodePools or secrets to the nodes it manages. +However, you can use Session Manager (SSM) or EC2 Instance Connect to gain shell access to Karpenter nodes. See [Node NotReady]({{< ref "./troubleshooting/#node-notready" >}}) troubleshooting for an example of starting an SSM session from the command line or [EC2 Instance Connect](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-connect-set-up.html) documentation to connect to nodes using SSH. -Though not recommended, if you need to access Karpenter-managed nodes without AWS credentials, you can add SSH keys using AWSNodeTemplate. -See [Custom User Data]({{< ref "./concepts/node-templates/#spec-userdata" >}}) for details. +Though not recommended, if you need to access Karpenter-managed nodes without AWS credentials, you can add SSH keys using EC2NodeClass User Data. See the [User Data section in the EC2NodeClass documentation]({{< ref "./concepts/nodeclasses/#specuserdata" >}}) for details. -### Can I set total limits of CPU and memory for a provisioner? -Yes, the setting is provider-specific. -See examples in [Accelerators, GPU]({{< ref "./concepts/scheduling/#accelerators-gpu-resources" >}}) Karpenter documentation. +### Can I set limits of CPU and memory for a NodePool? +Yes. View the [NodePool API docs]({{< ref "./concepts/nodepools#speclimits" >}}) for NodePool examples and descriptions of how to configure limits. ### Can I mix spot and on-demand EC2 run types? -Yes, see [Provisioning]({{< ref "./concepts/provisioners#examples" >}}) for an example. +Yes, see the [NodePool API docs]({{< ref "./concepts/nodepools#examples" >}}) for an example. ### Can I restrict EC2 instance types? @@ -104,53 +88,48 @@ Yes, see [Provisioning]({{< ref "./concepts/provisioners#examples" >}}) for an e ### Can I use Bare Metal instance types? -Yes, Karpenter supports provisioning metal instance types when a Provisioner's `node.kubernetes.io/instance-type` Requirements only include `metal` instance types. If other instance types fulfill pod requirements, then Karpenter will prioritize all non-metal instance types before metal ones are provisioned. +Yes, Karpenter supports provisioning metal instance types when a NodePool's `node.kubernetes.io/instance-type` Requirements only include `metal` instance types. If other instance types fulfill pod requirements, then Karpenter will prioritize all non-metal instance types before metal ones are provisioned. ### How does Karpenter dynamically select instance types? -Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. -By default Karpenter uses C, M, and R >= Gen 3 instance types, but it can be constrained in the provisioner spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. +Karpenter batches pending pods and then binpacks them based on CPU, memory, and GPUs required, taking into account node overhead, VPC CNI resources required, and daemonsets that will be packed when bringing up a new node. Karpenter [recommends the use of C, M, and R >= Gen 3 instance types]({{< ref "./concepts/nodepools#spectemplatespecrequirements" >}}) for most generic workloads, but it can be constrained in the NodePool spec with the [instance-type](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) well-known label in the requirements section. + After the pods are binpacked on the most efficient instance type (i.e. the smallest instance type that can fit the pod batch), Karpenter takes 59 other instance types that are larger than the most efficient packing, and passes all 60 instance type options to an API called Amazon EC2 Fleet. -The EC2 fleet API attempts to provision the instance type based on an allocation strategy. -If you are using the on-demand capacity type, then Karpenter uses the `lowest-price` allocation strategy. -So fleet will provision the lowest priced instance type it can get from the 60 instance types Karpenter passed to the EC2 fleet API. -If the instance type is unavailable for some reason, then fleet will move on to the next cheapest instance type. -If you are using the spot capacity type, Karpenter uses the price-capacity-optimized allocation strategy. This tells fleet to find the instance type that EC2 has the most capacity for while also considering price. This allocation strategy will balance cost and decrease the probability of a spot interruption happening in the near term. -See [Choose the appropriate allocation strategy](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-allocation-strategy.html#ec2-fleet-allocation-use-cases) for information on fleet optimization. + + +The EC2 fleet API attempts to provision the instance type based on the [Price Capacity Optimized allocation strategy](https://aws.amazon.com/blogs/compute/introducing-price-capacity-optimized-allocation-strategy-for-ec2-spot-instances/). For the on-demand capacity type, this is effectively equivalent to the `lowest-price` allocation strategy. For the spot capacity type, Fleet will determine an instance type that has both the lowest price combined with the lowest chance of being interrupted. Note that this may not give you the instance type with the strictly lowest price for spot. ### How does Karpenter calculate the resource usage of Daemonsets when simulating scheduling? -Karpenter currently calculates the applicable daemonsets at the provisioner level with label selectors/taints, etc. It does not look to see if there are requirements on the daemonsets that would exclude it from running on particular instances that the provisioner could or couldn't launch. -The recommendation for now is to use multiple provisioners with taints/tolerations or label selectors to limit daemonsets to only nodes launched from specific provisioners. +Karpenter currently calculates the applicable daemonsets at the NodePool level with label selectors/taints, etc. It does not look to see if there are requirements on the daemonsets that would exclude it from running on particular instances that the NodePool could or couldn't launch. +The recommendation for now is to use multiple NodePools with taints/tolerations or label selectors to limit daemonsets to only nodes launched from specific NodePoools. ### What if there is no Spot capacity? Will Karpenter use On-Demand? -The best defense against running out of Spot capacity is to allow Karpenter to provision as many different instance types as possible. -Even instance types that have higher specs, e.g. vCPU, memory, etc., than what you need can still be cheaper in the Spot market than using On-Demand instances. -When Spot capacity is constrained, On-Demand capacity can also be constrained since Spot is fundamentally spare On-Demand capacity. -Allowing Karpenter to provision nodes from a large, diverse set of instance types will help you to stay on Spot longer and lower your costs due to Spot’s discounted pricing. -Moreover, if Spot capacity becomes constrained, this diversity will also increase the chances that you’ll be able to continue to launch On-Demand capacity for your workloads. +The best defense against running out of Spot capacity is to allow Karpenter to provision as many distinct instance types as possible. Even instance types that have higher specs (e.g. vCPU, memory, etc.) than what you need can still be cheaper in the Spot market than using On-Demand instances. When Spot capacity is constrained, On-Demand capacity can also be constrained since Spot is fundamentally spare On-Demand capacity. -If your Karpenter Provisioner specifies flexibility to both Spot and On-Demand capacity, Karpenter will attempt to provision On-Demand capacity if there is no Spot capacity available. -However, it’s strongly recommended that you specify at least 20 instance types in your Provisioner (or none and allow Karpenter to pick the best instance types) as our research indicates that this additional diversity increases the chances that your workloads will not need to launch On-Demand capacity at all. -Today, Karpenter will warn you if the number of instances in your Provisioner isn’t sufficiently diverse. +Allowing Karpenter to provision nodes from a large, diverse set of instance types will help you to stay on Spot longer and lower your costs due to Spot’s discounted pricing. Moreover, if Spot capacity becomes constrained, this diversity will also increase the chances that you’ll be able to continue to launch On-Demand capacity for your workloads. -Technically, Karpenter has a concept of an “offering” for each instance type, which is a combination of zone and capacity type (equivalent in the AWS cloud provider to an EC2 purchase option – Spot or On-Demand). -Whenever the Fleet API returns an insufficient capacity error for Spot instances, those particular offerings are temporarily removed from consideration (across the entire provisioner) so that Karpenter can make forward progress with different options. +If your Karpenter NodePool specifies allows both Spot and On-Demand capacity, Karpenter will fallback to provision On-Demand capacity if there is no Spot capacity available. However, it’s strongly recommended that you allow at least 20 instance types in your NodePool since this additional diversity increases the chances that your workloads will not need to launch On-Demand capacity at all. + +Karpenter has a concept of an “offering” for each instance type, which is a combination of zone and capacity type. Whenever the Fleet API returns an insufficient capacity error for Spot instances, those particular offerings are temporarily removed from consideration (across the entire NodePool) so that Karpenter can make forward progress with different options. ### Does Karpenter support IPv6? -Yes! Karpenter dynamically discovers if you are running in an IPv6 cluster by checking the kube-dns service's cluster-ip. When using an AMI Family such as `AL2`, Karpenter will automatically configure the EKS Bootstrap script for IPv6. Some EC2 instance types do not support IPv6 and the Amazon VPC CNI only supports instance types that run on the Nitro hypervisor. It's best to add a requirement to your Provisioner to only allow Nitro instance types: +Yes! Karpenter dynamically discovers if you are running in an IPv6 cluster by checking the kube-dns service's cluster-ip. When using an AMI Family such as `AL2`, Karpenter will automatically configure the EKS Bootstrap script for IPv6. Some EC2 instance types do not support IPv6 and the Amazon VPC CNI only supports instance types that run on the Nitro hypervisor. It's best to add a requirement to your NodePool to only allow Nitro instance types: ``` -kind: Provisioner +apiVersion: karpenter.sh/v1beta1 +kind: NodePool ... spec: - requirements: - - key: karpenter.k8s.aws/instance-hypervisor - operator: In - values: - - nitro + template: + spec: + requirements: + - key: karpenter.k8s.aws/instance-hypervisor + operator: In + values: + - nitro ``` For more documentation on enabling IPv6 with the Amazon VPC CNI, see the [docs](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html). @@ -165,7 +144,7 @@ Windows nodes do not support IPv6. `kube-scheduler` is responsible for the scheduling of pods, while Karpenter launches the capacity. When using any sort of preferred scheduling constraint, `kube-scheduler` will schedule pods to nodes anytime it is possible. -As an example, suppose you scale up a deployment with a preferred zonal topology spread and none of the newly created pods can run on your existing cluster. Karpenter will then launch multiple nodes to satisfy that preference. If a) one of the nodes becomes ready slightly faster than other nodes and b) has enough capacity for multiple pods, `kube-scheduler` will schedule as many pods as possible to the single ready node so they won't remain unschedulable. It doesn't consider the in-flight capacity that will be ready in a few seconds. If all of the pods fit on the single node, the remaining nodes that Karpenter has launched aren't needed when they become ready and consolidation will delete them. +As an example, suppose you scale up a deployment with a preferred zonal topology spread and none of the newly created pods can run on your existing cluster. Karpenter will then launch multiple nodes to satisfy that preference. If a) one of the nodes becomes ready slightly faster than other nodes and b) has enough capacity for multiple pods, `kube-scheduler` will schedule as many pods as possible to the single ready node, so they won't remain unschedulable. It doesn't consider the in-flight capacity that will be ready in a few seconds. If all the pods fit on the single node, the remaining nodes that Karpenter has launched aren't needed when they become ready and consolidation will delete them. ### When deploying an additional DaemonSet to my cluster, why does Karpenter not scale-up my nodes to support the extra DaemonSet? @@ -194,21 +173,26 @@ See [Application developer]({{< ref "./concepts/#application-developer" >}}) for Yes. See [Persistent Volume Topology]({{< ref "./concepts/scheduling#persistent-volume-topology" >}}) for details. ### Can I set `--max-pods` on my nodes? -Yes, see the [KubeletConfiguration Section in the Provisioners Documentation]({{}}) to learn more. +Yes, see the [KubeletConfiguration Section in the NodePool docs]({{}}) to learn more. ### Why do the Windows2019 and Windows2022 AMI families only support Windows Server Core? The difference between the Core and Full variants is that Core is a minimal OS with less components and no graphic user interface (GUI) or desktop experience. `Windows2019` and `Windows2022` AMI families use the Windows Server Core option for simplicity, but if required, you can specify a custom AMI to run Windows Server Full. -You can specify the [Amazon EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html) with Windows Server 2022 Full for Kubernetes 1.27 by configuring an `amiSelector` that references the AMI name. -``` -amiSelector: - aws::name: Windows_Server-2022-English-Full-EKS_Optimized-1.27* +You can specify the [Amazon EKS optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-windows-ami.html) with Windows Server 2022 Full for Kubernetes 1.29 by configuring an `amiSelector` that references the AMI name. +```yaml +amiSelectorTerms: + - name: Windows_Server-2022-English-Full-EKS_Optimized-1.29* ``` +### Can I use Karpenter to scale my workload's pods? +Karpenter is a node autoscaler which will create new nodes in response to unschedulable pods. Scaling the pods themselves is outside of its scope. +This is the realm of pod autoscalers such as the [Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) (for scaling an individual pod's resources) or the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (for scaling replicas). +We also recommend taking a look at [Keda](https://keda.sh/) if you're looking for more advanced autoscaling capabilities for pods. + ## Deprovisioning ### How does Karpenter deprovision nodes? -See [Deprovisioning nodes]({{< ref "./concepts/deprovisioning" >}}) for information on how Karpenter deprovisions nodes. +See [Deprovisioning nodes]({{< ref "./concepts/disruption" >}}) for information on how Karpenter deprovisions nodes. ## Upgrading Karpenter @@ -218,26 +202,24 @@ Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes Karpenter requires proper permissions in the `KarpenterNode IAM Role` and the `KarpenterController IAM Role`. To upgrade Karpenter to version `$VERSION`, make sure that the `KarpenterNode IAM Role` and the `KarpenterController IAM Role` have the right permission described in `https://karpenter.sh/$VERSION/getting-started/getting-started-with-karpenter/cloudformation.yaml`. -Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the helm upgrade command. +Next, locate `KarpenterController IAM Role` ARN (i.e., ARN of the resource created in [Create the KarpenterController IAM Role](../getting-started/getting-started-with-karpenter/#create-the-karpentercontroller-iam-role)) and pass them to the Helm upgrade command. {{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}} -For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgrade-guide/" >}}). - -### Why do I get an `unknown field "startupTaints"` error when creating a provisioner with startupTaints? - -```bash -error: error validating "provisioner.yaml": error validating data: ValidationError(Provisioner.spec): unknown field "startupTaints" in sh.karpenter.v1alpha5.Provisioner.spec; if you choose to ignore these errors, turn validation off with --validate=false -``` - -The `startupTaints` parameter was added in v0.10.0. Helm upgrades do not upgrade the CRD describing the provisioner, so it must be done manually. For specific details, see the [Upgrade Guide]({{< ref "./upgrade-guide/#upgrading-to-v0100" >}}) +For information on upgrading Karpenter, see the [Upgrade Guide]({{< ref "./upgrading/upgrade-guide/" >}}). ## Upgrading Kubernetes Cluster ### How do I upgrade an EKS Cluster with Karpenter? -When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift currently needs to be enabled using a [feature gate]({{}}). Karpenter's default [AWSNodeTemplate `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane. Karpenter's AWSNodeTemplate can be configured to not use the EKS optimized AL2 AMI in favor of a custom AMI by configuring the [`amiSelector`]({{}}). If using a custom AMI, you will need to trigger the rollout of this new worker node image through the publication of a new AMI with tags matching the [`amiSelector`]({{}}), or a change to the [`amiSelector`]({{}}) field. +When upgrading an Amazon EKS cluster, [Karpenter's Drift feature]({{}}) can automatically upgrade the Karpenter-provisioned nodes to stay in-sync with the EKS control plane. Karpenter Drift is enabled by default starting `0.33.0`. + +{{% alert title="Note" color="primary" %}} +Karpenter's default [EC2NodeClass `amiFamily` configuration]({{}}) uses the latest EKS Optimized AL2 AMI for the same major and minor version as the EKS cluster's control plane, meaning that an upgrade of the control plane will cause Karpenter to auto-discover the new AMIs for that version. + +If using a custom AMI, you will need to trigger the rollout of this new worker node image through the publication of a new AMI with tags matching the [`amiSelector`]({{}}), or a change to the [`amiSelector`]({{}}) field. +{{% /alert %}} -Start by [upgrading the EKS Cluster control plane](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). After the EKS Cluster upgrade completes, Karpenter's Drift feature will detect that the Karpenter-provisioned nodes are using EKS Optimized AMIs for the previous cluster version, and [automatically cordon, drain, and replace those nodes]({{}}). To support pods moving to new nodes, follow Kubernetes best practices by setting appropriate pod [Resource Quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/), and using [Pod Disruption Budgets](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) (PDB). Karpenter's Drift feature will spin up replacement nodes based on the pod resource requests, and will respect the PDBs when deprovisioning nodes. +Start by [upgrading the EKS Cluster control plane](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). After the EKS Cluster upgrade completes, Karpenter's Drift feature will detect that the Karpenter-provisioned nodes are using EKS Optimized AMIs for the previous cluster version, and [automatically cordon, drain, and replace those nodes]({{}}). To support pods moving to new nodes, follow Kubernetes best practices by setting appropriate pod [Resource Quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/), and using [Pod Disruption Budgets](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) (PDB). Karpenter's Drift feature will spin up replacement nodes based on the pod resource requests, and will respect the PDBs when deprovisioning nodes. ## Interruption Handling @@ -249,10 +231,10 @@ Karpenter's native interruption handling offers two main benefits over the stand 1. You don't have to manage and maintain a separate component to exclusively handle interruption events. 2. Karpenter's native interruption handling coordinates with other deprovisioning so that consolidation, expiration, etc. can be aware of interruption events and vice-versa. -### Why am I receiving QueueNotFound errors when I set `aws.interruptionQueueName`? +### Why am I receiving QueueNotFound errors when I set `--interruption-queue-name`? Karpenter requires a queue to exist that receives event messages from EC2 and health services in order to handle interruption messages properly for nodes. -Details on the types of events that Karpenter handles can be found in the [Interruption Handling Docs]({{< ref "./concepts/deprovisioning/#interruption" >}}). +Details on the types of events that Karpenter handles can be found in the [Interruption Handling Docs]({{< ref "./concepts/disruption/#interruption" >}}). Details on provisioning the SQS queue and EventBridge rules can be found in the [Getting Started Guide]({{< ref "./getting-started/getting-started-with-karpenter/#create-the-karpenter-infrastructure-and-iam-roles" >}}). diff --git a/website/content/en/v0.31/getting-started/_index.md b/website/content/en/v0.35/getting-started/_index.md similarity index 82% rename from website/content/en/v0.31/getting-started/_index.md rename to website/content/en/v0.35/getting-started/_index.md index c159f68f5e48..a70f6bbc422b 100644 --- a/website/content/en/v0.31/getting-started/_index.md +++ b/website/content/en/v0.35/getting-started/_index.md @@ -1,18 +1,16 @@ --- title: "Getting Started" linkTitle: "Getting Started" -weight: 1 +weight: 10 description: > Choose from different methods to get started with Karpenter -cascade: - type: docs --- To get started with Karpenter, the [Getting Started with Karpenter]({{< relref "getting-started-with-karpenter" >}}) guide provides an end-to-end procedure for creating a cluster (with `eksctl`) and adding Karpenter. If you prefer, the following instructions use Terraform to create a cluster and add Karpenter: -* [Amazon EKS Blueprints for Terraform](https://aws-ia.github.io/terraform-aws-eks-blueprints): Follow a basic [Getting Started](https://aws-ia.github.io/terraform-aws-eks-blueprints/v4.18.0/getting-started/) guide and also add modules and add-ons. This includes a [Karpenter](https://aws-ia.github.io/terraform-aws-eks-blueprints/v4.18.0/add-ons/karpenter/) add-on that lets you bypass the instructions in this guide for setting up Karpenter. +* [Amazon EKS Blueprints for Terraform](https://aws-ia.github.io/terraform-aws-eks-blueprints): Follow a basic [Getting Started](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/) guide and also add modules and add-ons. This includes a [Karpenter](https://aws-ia.github.io/terraform-aws-eks-blueprints/patterns/karpenter/) add-on that lets you bypass the instructions in this guide for setting up Karpenter. Although not supported, you could also try Karpenter on other Kubernetes distributions running on AWS. For example: diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/_index.md b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/_index.md new file mode 100644 index 000000000000..49439c562074 --- /dev/null +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/_index.md @@ -0,0 +1,247 @@ + +--- +title: "Getting Started with Karpenter" +linkTitle: "Getting Started with Karpenter" +weight: 10 +description: > + Set up a cluster and add Karpenter +--- + +Karpenter automatically provisions new nodes in response to unschedulable pods. Karpenter does this by observing events within the Kubernetes cluster, and then sending commands to the underlying cloud provider. + +This guide shows how to get started with Karpenter by creating a Kubernetes cluster and installing Karpenter. +To use Karpenter, you must be running a supported Kubernetes cluster on a supported cloud provider. +Currently, only EKS on AWS is supported. + +## Create a cluster and add Karpenter + +This guide uses `eksctl` to create the cluster. +It should take less than 1 hour to complete, and cost less than $0.25. +Follow the clean-up instructions to reduce any charges. + +### 1. Install utilities + +Karpenter is installed in clusters with a Helm chart. + +Karpenter requires cloud provider permissions to provision nodes, for AWS IAM +Roles for Service Accounts (IRSA) should be used. IRSA permits Karpenter +(within the cluster) to make privileged requests to AWS (as the cloud provider) +via a ServiceAccount. + +Install these tools before proceeding: + +1. [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html) +2. `kubectl` - [the Kubernetes CLI](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) +3. `eksctl` (>= v0.169.0) - [the CLI for AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) +4. `helm` - [the package manager for Kubernetes](https://helm.sh/docs/intro/install/) + +[Configure the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) +with a user that has sufficient privileges to create an EKS cluster. Verify that the CLI can +authenticate properly by running `aws sts get-caller-identity`. + +### 2. Set environment variables + +After setting up the tools, set the Karpenter and Kubernetes version: + +```bash +export KARPENTER_NAMESPACE="kube-system" +export KARPENTER_VERSION="0.35.0" +export K8S_VERSION="1.29" +``` + +Then set the following environment variable: + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step01-config.sh" language="bash"%}} + +{{% alert title="Warning" color="warning" %}} +If you open a new shell to run steps in this procedure, you need to set some or all of the environment variables again. +To remind yourself of these values, type: + +```bash +echo "${KARPENTER_NAMESPACE}" "${KARPENTER_VERSION}" "${K8S_VERSION}" "${CLUSTER_NAME}" "${AWS_DEFAULT_REGION}" "${AWS_ACCOUNT_ID}" "${TEMPOUT}" +``` + +{{% /alert %}} + + +### 3. Create a Cluster + +Create a basic cluster with `eksctl`. +The following cluster configuration will: + +* Use CloudFormation to set up the infrastructure needed by the EKS cluster. See [CloudFormation]({{< relref "../../reference/cloudformation/" >}}) for a complete description of what `cloudformation.yaml` does for Karpenter. +* Create a Kubernetes service account and AWS IAM Role, and associate them using IRSA to let Karpenter launch instances. +* Add the Karpenter node role to the aws-auth configmap to allow nodes to connect. +* Use [AWS EKS managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for the kube-system and karpenter namespaces. Uncomment fargateProfiles settings (and comment out managedNodeGroups settings) to use Fargate for both namespaces instead. +* Set KARPENTER_IAM_ROLE_ARN variables. +* Create a role to allow spot instances. +* Run Helm to install Karpenter + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh" language="bash"%}} + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh" language="bash"%}} + +{{% alert title="Windows Support Notice" color="warning" %}} +In order to run Windows workloads, Windows support should be enabled in your EKS Cluster. +See [Enabling Windows support](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support) to learn more. +{{% /alert %}} + +### 4. Install Karpenter + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh" language="bash"%}} + +{{% alert title="DNS Policy Notice" color="warning" %}} +Karpenter uses the `ClusterFirst` pod DNS policy by default. This is the Kubernetes cluster default and this ensures that Karpetner can reach-out to internal Kubernetes services during its lifetime. There may be cases where you do not have the DNS service that you are using on your cluster up-and-running before Karpenter starts up. The most common case of this is you want Karpenter to manage the node capacity where your DNS service pods are running. + +If you need Karpenter to manage the DNS service pods' capacity, this means that DNS won't be running when Karpenter starts-up. In this case, you will need to set the pod DNS policy to `Default` with `--set dnsPolicy=Default`. This will tell Karpenter to use the host's DNS resolution instead of the internal DNS resolution, ensuring that you don't have a dependency on the DNS service pods to run. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). +{{% /alert %}} + +{{% alert title="Common Expression Language/Webhooks Notice" color="warning" %}} +Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart. +{{% /alert %}} + +{{% alert title="Pod Identity Supports Notice" color="warning" %}} +Karpenter now supports using [Pod Identity](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html) to authenticate AWS SDK to make API requests to AWS services using AWS Identity and Access Management (IAM) permissions. This feature not supported on versions of Kubernetes < 1.24. If you are running an earlier version of Kubernetes, you will need to use the [IAM Roles for Service Accounts(IRSA)](https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html) for pod authentication instead. You can enable these IRSA with `--set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=${KARPENTER_IAM_ROLE_ARN}"` when applying the Karpenter Helm chart. +{{% /alert %}} + +{{% alert title="Warning" color="warning" %}} +Karpenter creates a mapping between CloudProvider machines and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: + +* `karpenter.sh/managed-by` +* `karpenter.sh/nodepool` +* `kubernetes.io/cluster/${CLUSTER_NAME}` + +Because Karpenter takes this dependency, any user that has the ability to Create/Delete these tags on CloudProvider machines will have the ability to orchestrate Karpenter to Create/Delete CloudProvider machines as a side effect. We recommend that you [enforce tag-based IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) on these tags against any EC2 instance resource (`i-*`) for any users that might have [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html)/[DeleteTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteTags.html) permissions but should not have [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html)/[TerminateInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html) permissions. +{{% /alert %}} + +### 5. Create NodePool + +A single Karpenter NodePool is capable of handling many different pod shapes. Karpenter makes scheduling and provisioning decisions based on pod attributes such as labels and affinity. In other words, Karpenter eliminates the need to manage many different node groups. + +Create a default NodePool using the command below. This NodePool uses `securityGroupSelectorTerms` and `subnetSelectorTerms` to discover resources used to launch nodes. We applied the tag `karpenter.sh/discovery` in the `eksctl` command above. Depending on how these resources are shared between clusters, you may need to use different tagging schemes. + +The `consolidationPolicy` set to `WhenUnderutilized` in the `disruption` block configures Karpenter to reduce cluster cost by removing and replacing nodes. As a result, consolidation will terminate any empty nodes on the cluster. This behavior can be disabled by setting `consolidateAfter` to `Never`, telling Karpenter that it should never consolidate nodes. Review the [NodePool API docs]({{}}) for more information. + +Note: This NodePool will create capacity as long as the sum of all created capacity is less than the specified limit. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh" language="bash"%}} + +Karpenter is now active and ready to begin provisioning nodes. + +### 6. Scale up deployment + +This deployment uses the [pause image](https://www.ianlewis.org/en/almighty-pause-container) and starts with zero replicas. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step13-automatic-node-provisioning.sh" language="bash"%}} + +### 7. Scale down deployment + +Now, delete the deployment. After a short amount of time, Karpenter should terminate the empty nodes due to consolidation. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step14-deprovisioning.sh" language="bash"%}} + +### 8. Delete Karpenter nodes manually + +If you delete a node with kubectl, Karpenter will gracefully cordon, drain, +and shutdown the corresponding instance. Under the hood, Karpenter adds a +finalizer to the node object, which blocks deletion until all pods are +drained and the instance is terminated. Keep in mind, this only works for +nodes provisioned by Karpenter. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step16-delete-node.sh" language="bash"%}} + +### 9. Delete the cluster +To avoid additional charges, remove the demo infrastructure from your AWS account. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step17-cleanup.sh" language="bash"%}} + +## Monitoring with Grafana (optional) + +This section describes optional ways to configure Karpenter to enhance its capabilities. +In particular, the following commands deploy a Prometheus and Grafana stack that is suitable for this guide but does not include persistent storage or other configurations that would be necessary for monitoring a production deployment of Karpenter. +This deployment includes two Karpenter dashboards that are automatically onboarded to Grafana. They provide a variety of visualization examples on Karpenter metrics. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh" language="bash"%}} + +The Grafana instance may be accessed using port forwarding. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh" language="bash"%}} + +The new stack has only one user, `admin`, and the password is stored in a secret. The following command will retrieve the password. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh" language="bash"%}} + +## Advanced Installation + +The section below covers advanced installation techniques for installing Karpenter. This includes things such as running Karpenter on a cluster without public internet access or ensuring that Karpenter avoids getting throttled by other components in your cluster. + +### Private Clusters + +You can optionally install Karpenter on a [private cluster](https://docs.aws.amazon.com/eks/latest/userguide/private-clusters.html#private-cluster-requirements) using the `eksctl` installation by setting `privateCluster.enabled` to true in your [ClusterConfig](https://eksctl.io/usage/eks-private-cluster/#eks-fully-private-cluster) and by setting `--set settings.isolatedVPC=true` when installing the `karpenter` Helm chart. + +```bash +privateCluster: + enabled: true +``` + +Private clusters have no outbound access to the internet. This means that in order for Karpenter to reach out to the services that it needs to access, you need to enable specific VPC private endpoints. Below shows the endpoints that you need to enable to successfully run Karpenter in a private cluster: + +```text +com.amazonaws..ec2 +com.amazonaws..ecr.api +com.amazonaws..ecr.dkr +com.amazonaws..s3 – For pulling container images +com.amazonaws..sts – For IAM roles for service accounts +com.amazonaws..ssm - For resolving default AMIs +com.amazonaws..sqs - For accessing SQS if using interruption handling +com.amazonaws..eks - For Karpenter to discover the cluster endpoint +``` + +If you do not currently have these endpoints surfaced in your VPC, you can add the endpoints by running + +```bash +aws ec2 create-vpc-endpoint --vpc-id ${VPC_ID} --service-name ${SERVICE_NAME} --vpc-endpoint-type Interface --subnet-ids ${SUBNET_IDS} --security-group-ids ${SECURITY_GROUP_IDS} +``` + +{{% alert title="Note" color="primary" %}} + +Karpenter (controller and webhook deployment) container images must be in or copied to Amazon ECR private or to another private registry accessible from inside the VPC. If these are not available from within the VPC, or from networks peered with the VPC, you will get Image pull errors when Kubernetes tries to pull these images from ECR public. + +{{% /alert %}} + +{{% alert title="Note" color="primary" %}} + +There is currently no VPC private endpoint for the [IAM API](https://docs.aws.amazon.com/IAM/latest/APIReference/welcome.html). As a result, you cannot use the default `spec.role` field in your `EC2NodeClass`. Instead, you need to provision and manage an instance profile manually and then specify Karpenter to use this instance profile through the `spec.instanceProfile` field. + +You can provision an instance profile manually and assign a Node role to it by calling the following command + +```bash +aws iam create-instance-profile --instance-profile-name "KarpenterNodeInstanceProfile-${CLUSTER_NAME}" +aws iam add-role-to-instance-profile --instance-profile-name "KarpenterNodeInstanceProfile-${CLUSTER_NAME}" --role-name "KarpenterNodeRole-${CLUSTER_NAME}" +``` + +{{% /alert %}} + +{{% alert title="Note" color="primary" %}} + +There is currently no VPC private endpoint for the [Price List Query API](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-price-list-query-api.html). As a result, pricing data can go stale over time. By default, Karpenter ships a static price list that is updated when each binary is released. + +Failed requests for pricing data will result in the following error messages + +```bash +ERROR controller.aws.pricing updating on-demand pricing, RequestError: send request failed +caused by: Post "https://api.pricing.us-east-1.amazonaws.com/": dial tcp 52.94.231.236:443: i/o timeout; RequestError: send request failed +caused by: Post "https://api.pricing.us-east-1.amazonaws.com/": dial tcp 52.94.231.236:443: i/o timeout, using existing pricing data from 2022-08-17T00:19:52Z {"commit": "4b5f953"} +``` + +{{% /alert %}} + +### Preventing APIServer Request Throttling + +Kubernetes uses [FlowSchemas](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#flowschema) and [PriorityLevelConfigurations](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration) to map calls to the API server into buckets which determine each user agent's throttling limits. + +By default, Karpenter is installed into the `kube-system` namespace, which leverages the `system-leader-election` and `kube-system-service-accounts` [FlowSchemas](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#flowschema) to map calls from the `kube-system` namespace to the `leader-election` and `workload-high` PriorityLevelConfigurations respectively. By putting Karpenter in these PriorityLevelConfigurations, we ensure that Karpenter and other critical cluster components are able to run even if other components on the cluster are throttled in other PriorityLevelConfigurations. + +If you install Karpenter in a different namespace than the default `kube-system` namespace, Karpenter will not be put into these higher-priority FlowSchemas by default. Instead, you will need to create custom FlowSchemas for the namespace and service account where Karpenter is installed to ensure that requests are put into this higher PriorityLevelConfiguration. + +{{% script file="./content/en/{VERSION}/getting-started/getting-started-with-karpenter/scripts/step15-apply-flowschemas.sh" language="bash"%}} diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/cloudformation.yaml b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/cloudformation.yaml similarity index 71% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/cloudformation.yaml rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/cloudformation.yaml index b8c8d64614e7..207761007d2d 100644 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/cloudformation.yaml +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/cloudformation.yaml @@ -5,13 +5,6 @@ Parameters: Type: String Description: "EKS cluster name" Resources: - KarpenterNodeInstanceProfile: - Type: "AWS::IAM::InstanceProfile" - Properties: - InstanceProfileName: !Sub "KarpenterNodeInstanceProfile-${ClusterName}" - Path: "/" - Roles: - - !Ref "KarpenterNodeRole" KarpenterNodeRole: Type: "AWS::IAM::Role" Properties: @@ -42,21 +35,36 @@ Resources: "Version": "2012-10-17", "Statement": [ { - "Sid": "AllowScopedEC2InstanceActions", + "Sid": "AllowScopedEC2InstanceAccessActions", "Effect": "Allow", "Resource": [ "arn:${AWS::Partition}:ec2:${AWS::Region}::image/*", "arn:${AWS::Partition}:ec2:${AWS::Region}::snapshot/*", - "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*", "arn:${AWS::Partition}:ec2:${AWS::Region}:*:security-group/*", - "arn:${AWS::Partition}:ec2:${AWS::Region}:*:subnet/*", - "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*" + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:subnet/*" ], "Action": [ "ec2:RunInstances", "ec2:CreateFleet" ] }, + { + "Sid": "AllowScopedEC2LaunchTemplateAccessActions", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + } + } + }, { "Sid": "AllowScopedEC2InstanceActionsWithTags", "Effect": "Allow", @@ -65,7 +73,8 @@ Resources: "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", "arn:${AWS::Partition}:ec2:${AWS::Region}:*:volume/*", "arn:${AWS::Partition}:ec2:${AWS::Region}:*:network-interface/*", - "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*" + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*" ], "Action": [ "ec2:RunInstances", @@ -77,7 +86,7 @@ Resources: "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" }, "StringLike": { - "aws:RequestTag/karpenter.sh/provisioner-name": "*" + "aws:RequestTag/karpenter.sh/nodepool": "*" } } }, @@ -89,7 +98,8 @@ Resources: "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", "arn:${AWS::Partition}:ec2:${AWS::Region}:*:volume/*", "arn:${AWS::Partition}:ec2:${AWS::Region}:*:network-interface/*", - "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*" + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*" ], "Action": "ec2:CreateTags", "Condition": { @@ -102,27 +112,26 @@ Resources: ] }, "StringLike": { - "aws:RequestTag/karpenter.sh/provisioner-name": "*" + "aws:RequestTag/karpenter.sh/nodepool": "*" } } }, { - "Sid": "AllowMachineMigrationTagging", + "Sid": "AllowScopedResourceTagging", "Effect": "Allow", "Resource": "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", "Action": "ec2:CreateTags", "Condition": { "StringEquals": { - "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", - "aws:RequestTag/karpenter.sh/managed-by": "${ClusterName}" + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" }, "StringLike": { - "aws:RequestTag/karpenter.sh/provisioner-name": "*" + "aws:ResourceTag/karpenter.sh/nodepool": "*" }, "ForAllValues:StringEquals": { "aws:TagKeys": [ - "karpenter.sh/provisioner-name", - "karpenter.sh/managed-by" + "karpenter.sh/nodeclaim", + "Name" ] } } @@ -143,7 +152,7 @@ Resources: "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" }, "StringLike": { - "aws:ResourceTag/karpenter.sh/provisioner-name": "*" + "aws:ResourceTag/karpenter.sh/nodepool": "*" } } }, @@ -186,7 +195,6 @@ Resources: "Resource": "${KarpenterInterruptionQueue.Arn}", "Action": [ "sqs:DeleteMessage", - "sqs:GetQueueAttributes", "sqs:GetQueueUrl", "sqs:ReceiveMessage" ] @@ -202,6 +210,68 @@ Resources: } } }, + { + "Sid": "AllowScopedInstanceProfileCreationActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:CreateInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileTagActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:TagInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*", + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowInstanceProfileReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "iam:GetInstanceProfile" + }, { "Sid": "AllowAPIServerEndpointDiscovery", "Effect": "Allow", diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/grafana-values.yaml b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/grafana-values.yaml similarity index 83% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/grafana-values.yaml rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/grafana-values.yaml index 9294e9af9e52..67d28f71217c 100644 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/grafana-values.yaml +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/grafana-values.yaml @@ -22,6 +22,6 @@ dashboardProviders: dashboards: default: capacity-dashboard: - url: https://karpenter.sh/v0.31/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json + url: https://karpenter.sh/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json performance-dashboard: - url: https://karpenter.sh/v0.31/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json + url: https://karpenter.sh/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json similarity index 94% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json index 9d9cb6ad14a8..d474d01f4e16 100644 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-capacity-dashboard.json @@ -484,7 +484,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum by ($distribution_filter)(\n karpenter_pods_state{arch=~\"$arch\", capacity_type=~\"$capacity_type\", instance_type=~\"$instance_type\", provisioner=~\"$provisioner\"}\n)", + "expr": "sum by ($distribution_filter)(\n karpenter_pods_state{arch=~\"$arch\", capacity_type=~\"$capacity_type\", instance_type=~\"$instance_type\", nodepool=~\"$nodepool\"}\n)", "legendFormat": "{{label_name}}", "range": true, "refId": "A" @@ -588,7 +588,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "karpenter_provisioner_usage{resource_type=\"cpu\"} / karpenter_provisioner_limit{resource_type=\"cpu\"}", + "expr": "karpenter_nodepool_usage{resource_type=\"cpu\"} / karpenter_nodepool_limit{resource_type=\"cpu\"}", "format": "table", "instant": true, "legendFormat": "CPU Limit Utilization", @@ -602,7 +602,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count by (provisioner)(karpenter_nodes_allocatable{provisioner!=\"N/A\",resource_type=\"cpu\"}) # Selects a single resource type to get node count", + "expr": "count by (nodepool)(karpenter_nodes_allocatable{nodepool!=\"N/A\",resource_type=\"cpu\"}) # Selects a single resource type to get node count", "format": "table", "hide": false, "instant": true, @@ -616,7 +616,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "karpenter_provisioner_usage{resource_type=\"memory\"} / karpenter_provisioner_limit{resource_type=\"memory\"}", + "expr": "karpenter_nodepool_usage{resource_type=\"memory\"} / karpenter_nodepool_limit{resource_type=\"memory\"}", "format": "table", "hide": false, "instant": true, @@ -631,7 +631,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (provisioner)(karpenter_nodes_allocatable{provisioner!=\"N/A\",resource_type=\"cpu\"})", + "expr": "sum by (nodepool)(karpenter_nodes_allocatable{nodepool!=\"N/A\",resource_type=\"cpu\"})", "format": "table", "hide": false, "instant": true, @@ -645,7 +645,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (provisioner)(karpenter_nodes_allocatable{provisioner!=\"N/A\",resource_type=\"memory\"})", + "expr": "sum by (nodepool)(karpenter_nodes_allocatable{nodepool!=\"N/A\",resource_type=\"memory\"})", "format": "table", "hide": false, "instant": true, @@ -653,12 +653,12 @@ "refId": "Memory Capacity" } ], - "title": "Provisioner Summary", + "title": "Nodepool Summary", "transformations": [ { "id": "seriesToColumns", "options": { - "byField": "provisioner" + "byField": "nodepool" } }, { @@ -697,7 +697,7 @@ "instance 2": 12, "job 1": 9, "job 2": 13, - "provisioner": 0, + "nodepool": 0, "resource_type 1": 10, "resource_type 2": 14 }, @@ -714,7 +714,7 @@ "instance": "", "instance 1": "", "job": "", - "provisioner": "Provisioner" + "nodepool": "Nodepool" } } } @@ -804,7 +804,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "(count(karpenter_nodes_allocatable{arch=~\"$arch\",capacity_type=\"spot\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"}) or vector(0)) / count(karpenter_nodes_allocatable{arch=~\"$arch\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"})", + "expr": "(count(karpenter_nodes_allocatable{arch=~\"$arch\",capacity_type=\"spot\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"}) or vector(0)) / count(karpenter_nodes_allocatable{arch=~\"$arch\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"})", "legendFormat": "Percentage", "range": true, "refId": "A" @@ -946,7 +946,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "((karpenter_nodes_total_daemon_requests{resource_type=\"cpu\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0) + \n(karpenter_nodes_total_pod_requests{resource_type=\"cpu\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0)) / \nkarpenter_nodes_allocatable{resource_type=\"cpu\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"}", + "expr": "((karpenter_nodes_total_daemon_requests{resource_type=\"cpu\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0) + \n(karpenter_nodes_total_pod_requests{resource_type=\"cpu\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0)) / \nkarpenter_nodes_allocatable{resource_type=\"cpu\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"}", "format": "table", "hide": false, "instant": true, @@ -961,7 +961,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "((karpenter_nodes_total_daemon_requests{resource_type=\"memory\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0) + \n(karpenter_nodes_total_pod_requests{resource_type=\"memory\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0)) / \nkarpenter_nodes_allocatable{resource_type=\"memory\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"}", + "expr": "((karpenter_nodes_total_daemon_requests{resource_type=\"memory\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0) + \n(karpenter_nodes_total_pod_requests{resource_type=\"memory\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"} or karpenter_nodes_allocatable*0)) / \nkarpenter_nodes_allocatable{resource_type=\"memory\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"}", "format": "table", "hide": false, "instant": true, @@ -976,7 +976,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "karpenter_nodes_total_daemon_requests{resource_type=\"pods\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"} + \nkarpenter_nodes_total_pod_requests{resource_type=\"pods\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",provisioner=~\"$provisioner\",zone=~\"$zone\"}", + "expr": "karpenter_nodes_total_daemon_requests{resource_type=\"pods\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"} + \nkarpenter_nodes_total_pod_requests{resource_type=\"pods\",arch=~\"$arch\",capacity_type=~\"$capacity_type\",instance_type=~\"$instance_type\",nodepool=~\"$nodepool\",zone=~\"$zone\"}", "format": "table", "hide": false, "instant": true, @@ -1091,9 +1091,9 @@ "os 1": true, "os 2": true, "os 3": true, - "provisioner 1": false, - "provisioner 2": true, - "provisioner 3": true, + "nodepool 1": false, + "nodepool 2": true, + "nodepool 3": true, "resource_type": true, "resource_type 1": true, "resource_type 2": true, @@ -1161,9 +1161,9 @@ "os 1": 23, "os 2": 41, "os 3": 61, - "provisioner 1": 2, - "provisioner 2": 42, - "provisioner 3": 62, + "nodepool 1": 2, + "nodepool 2": 42, + "nodepool 3": 62, "resource_type 1": 24, "resource_type 2": 43, "resource_type 3": 63, @@ -1190,7 +1190,7 @@ "instance_type": "Instance Type", "instance_type 1": "Instance Type", "node_name": "Node Name", - "provisioner 1": "Provisioner", + "nodepool 1": "Nodepool", "zone 1": "Zone" } } @@ -1237,14 +1237,14 @@ "type": "prometheus", "uid": "${datasource}" }, - "definition": "label_values(karpenter_nodes_allocatable, provisioner)", + "definition": "label_values(karpenter_nodes_allocatable, nodepool)", "hide": 0, "includeAll": true, "multi": true, - "name": "provisioner", + "name": "nodepool", "options": [], "query": { - "query": "label_values(karpenter_nodes_allocatable, provisioner)", + "query": "label_values(karpenter_nodes_allocatable, nodepool)", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -1376,8 +1376,8 @@ { "current": { "selected": true, - "text": "provisioner", - "value": "provisioner" + "text": "nodepool", + "value": "nodepool" }, "hide": 0, "includeAll": false, @@ -1411,8 +1411,8 @@ }, { "selected": true, - "text": "provisioner", - "value": "provisioner" + "text": "nodepool", + "value": "nodepool" }, { "selected": false, @@ -1420,7 +1420,7 @@ "value": "zone" } ], - "query": "arch,capacity_type,instance_type,namespace,node,provisioner,zone", + "query": "arch,capacity_type,instance_type,namespace,node,nodepool,zone", "queryValue": "", "skipUrlSync": false, "type": "custom" diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers-allocation.json diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-controllers.json b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers.json similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-controllers.json rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-controllers.json diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/karpenter-performance-dashboard.json diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/prometheus-values.yaml b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/prometheus-values.yaml similarity index 63% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/prometheus-values.yaml rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/prometheus-values.yaml index 4cd78495351b..588762a8c9dc 100644 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/prometheus-values.yaml +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/prometheus-values.yaml @@ -13,8 +13,10 @@ extraScrapeConfigs: | - role: endpoints namespaces: names: - - karpenter + - $KARPENTER_NAMESPACE relabel_configs: - - source_labels: [__meta_kubernetes_endpoint_port_name] - regex: http-metrics + - source_labels: + - __meta_kubernetes_endpoints_name + - __meta_kubernetes_endpoint_port_name action: keep + regex: karpenter;http-metrics diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step01-config.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step01-config.sh similarity index 91% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step01-config.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step01-config.sh index a3af512d02ac..bbaa418df349 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step01-config.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step01-config.sh @@ -2,4 +2,4 @@ export AWS_PARTITION="aws" # if you are not using standard partitions, you may n export CLUSTER_NAME="${USER}-karpenter-demo" export AWS_DEFAULT_REGION="us-west-2" export AWS_ACCOUNT_ID="$(aws sts get-caller-identity --query Account --output text)" -export TEMPOUT=$(mktemp) +export TEMPOUT="$(mktemp)" diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh similarity index 80% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh index d5ca27944cf1..e957e364acd7 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster-fargate.sh @@ -5,13 +5,13 @@ kind: ClusterConfig metadata: name: ${CLUSTER_NAME} region: ${AWS_DEFAULT_REGION} - version: "1.24" + version: "${K8S_VERSION}" tags: karpenter.sh/discovery: ${CLUSTER_NAME} fargateProfiles: - name: karpenter selectors: - - namespace: karpenter + - namespace: "${KARPENTER_NAMESPACE}" iam: withOIDC: true EOF diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh similarity index 57% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh index 7e7d70ce77b0..cdad96eeca23 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step02-create-cluster.sh @@ -1,4 +1,4 @@ -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ @@ -12,20 +12,32 @@ kind: ClusterConfig metadata: name: ${CLUSTER_NAME} region: ${AWS_DEFAULT_REGION} - version: "1.27" + version: "${K8S_VERSION}" tags: karpenter.sh/discovery: ${CLUSTER_NAME} iam: withOIDC: true - serviceAccounts: - - metadata: - name: karpenter - namespace: karpenter + podIdentityAssociations: + - namespace: "${KARPENTER_NAMESPACE}" + serviceAccountName: karpenter roleName: ${CLUSTER_NAME}-karpenter - attachPolicyARNs: + permissionPolicyARNs: - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} - roleOnly: true + +## Optionally run on fargate or on k8s 1.23 +# Pod Identity is not available on fargate +# https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html +# iam: +# withOIDC: true +# serviceAccounts: +# - metadata: +# name: karpenter +# namespace: "${KARPENTER_NAMESPACE}" +# roleName: ${CLUSTER_NAME}-karpenter +# attachPolicyARNs: +# - arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME} +# roleOnly: true iamIdentityMappings: - arn: "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}" @@ -45,14 +57,17 @@ managedNodeGroups: minSize: 1 maxSize: 10 +addons: +- name: eks-pod-identity-agent + ## Optionally run on fargate # fargateProfiles: # - name: karpenter # selectors: -# - namespace: karpenter +# - namespace: "${KARPENTER_NAMESPACE}" EOF -export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} --query "cluster.endpoint" --output text)" +export CLUSTER_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" --query "cluster.endpoint" --output text)" export KARPENTER_IAM_ROLE_ARN="arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/${CLUSTER_NAME}-karpenter" -echo $CLUSTER_ENDPOINT $KARPENTER_IAM_ROLE_ARN +echo "${CLUSTER_ENDPOINT} ${KARPENTER_IAM_ROLE_ARN}" diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh similarity index 62% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh index b8e610f7bee6..54e826db269b 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step03-iam-cloud-formation.sh @@ -1,6 +1,6 @@ -TEMPOUT=$(mktemp) +TEMPOUT="$(mktemp)" -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \ +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > "${TEMPOUT}" \ && aws cloudformation deploy \ --stack-name "Karpenter-${CLUSTER_NAME}" \ --template-file "${TEMPOUT}" \ diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step04-grant-access.sh diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh similarity index 79% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh index 923317b0b93f..6975d31ea9e4 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step05-controller-iam.sh @@ -1,5 +1,5 @@ eksctl create iamserviceaccount \ - --cluster "${CLUSTER_NAME}" --name karpenter --namespace karpenter \ + --cluster "${CLUSTER_NAME}" --name karpenter --namespace "${KARPENTER_NAMESPACE}" \ --role-name "${CLUSTER_NAME}-karpenter" \ --attach-policy-arn "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:policy/KarpenterControllerPolicy-${CLUSTER_NAME}" \ --role-only \ diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step06-add-spot-role.sh diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh similarity index 50% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh index 99fa927b12b6..aca3c191d684 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step08-apply-helm-chart.sh @@ -1,11 +1,9 @@ # Logout of helm registry to perform an unauthenticated pull against the public ECR helm registry logout public.ecr.aws -helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${KARPENTER_VERSION} --namespace karpenter --create-namespace \ - --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"=${KARPENTER_IAM_ROLE_ARN} \ - --set settings.aws.clusterName=${CLUSTER_NAME} \ - --set settings.aws.defaultInstanceProfile=KarpenterNodeInstanceProfile-${CLUSTER_NAME} \ - --set settings.aws.interruptionQueueName=${CLUSTER_NAME} \ +helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" --create-namespace \ + --set "settings.clusterName=${CLUSTER_NAME}" \ + --set "settings.interruptionQueue=${CLUSTER_NAME}" \ --set controller.resources.requests.cpu=1 \ --set controller.resources.requests.memory=1Gi \ --set controller.resources.limits.cpu=1 \ diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh similarity index 62% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh index a539f7491e10..1107c2ec1d24 100755 --- a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step09-add-prometheus-grafana.sh @@ -4,8 +4,8 @@ helm repo update kubectl create namespace monitoring -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | tee prometheus-values.yaml +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/prometheus-values.yaml | envsubst | tee prometheus-values.yaml helm install --namespace monitoring prometheus prometheus-community/prometheus --values prometheus-values.yaml -curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml +curl -fsSL https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/grafana-values.yaml | tee grafana-values.yaml helm install --namespace monitoring grafana grafana-charts/grafana --values grafana-values.yaml diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step10-add-grafana-port-forward.sh diff --git a/website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh similarity index 100% rename from website/content/en/v0.31/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh rename to website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step11-grafana-get-password.sh diff --git a/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh new file mode 100755 index 000000000000..8c518e9e74b8 --- /dev/null +++ b/website/content/en/v0.35/getting-started/getting-started-with-karpenter/scripts/step12-add-nodepool.sh @@ -0,0 +1,46 @@ +cat < ``` @@ -43,35 +44,31 @@ Now attach the required policies to the role {{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step03-node-policies.sh" language="bash" %}} -Attach the IAM role to an EC2 instance profile. - -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step04-instance-profile.sh" language="bash" %}} - Now we need to create an IAM role that the Karpenter controller will use to provision new instances. The controller will be using [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) which requires an OIDC endpoint. If you have another option for using IAM credentials with workloads (e.g. [kube2iam](https://github.com/jtblin/kube2iam)) your steps will be different. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step05-controller-iam.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh" language="bash" %}} ## Add tags to subnets and security groups We need to add tags to our nodegroup subnets so Karpenter will know which subnets to use. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step06-tag-subnets.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh" language="bash" %}} Add tags to our security groups. This command only tags the security groups for the first nodegroup in the cluster. If you have multiple nodegroups or multiple security groups you will need to decide which one Karpenter should use. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step07-tag-security-groups.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh" language="bash" %}} ## Update aws-auth ConfigMap We need to allow nodes that are using the node IAM role we just created to join the cluster. To do that we have to modify the `aws-auth` ConfigMap in the cluster. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step08-edit-aws-auth.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step07-edit-aws-auth.sh" language="bash" %}} You will need to add a section to the mapRoles that looks something like this. Replace the `${AWS_PARTITION}` variable with the account partition, `${AWS_ACCOUNT_ID}` variable with your account ID, and `${CLUSTER_NAME}` variable with the cluster name, but do not replace the `{{EC2PrivateDNSName}}`. @@ -95,12 +92,12 @@ One for your Karpenter node role and one for your existing node group. First set the Karpenter release you want to deploy. ```bash -export KARPENTER_VERSION=v0.31.4 +export KARPENTER_VERSION="0.35.0" ``` -We can now generate a full Karpenter deployment yaml from the helm chart. +We can now generate a full Karpenter deployment yaml from the Helm chart. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step09-generate-chart.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh" language="bash" %}} Modify the following lines in the karpenter.yaml file. @@ -118,7 +115,7 @@ affinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: karpenter.sh/provisioner-name + - key: karpenter.sh/nodepool operator: DoesNotExist - matchExpressions: - key: eks.amazonaws.com/nodegroup @@ -130,16 +127,15 @@ affinity: - topologyKey: "kubernetes.io/hostname" ``` -Now that our deployment is ready we can create the karpenter namespace, create the provisioner CRD, and then deploy the rest of the karpenter resources. +Now that our deployment is ready we can create the karpenter namespace, create the NodePool CRD, and then deploy the rest of the karpenter resources. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step10-deploy.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step09-deploy.sh" language="bash" %}} -## Create default provisioner +## Create default NodePool -We need to create a default provisioner so Karpenter knows what types of nodes we want for unscheduled workloads. -You can refer to some of the [example provisioners](https://github.com/aws/karpenter/tree/v0.31.4/examples/provisioner) for specific needs. +We need to create a default NodePool so Karpenter knows what types of nodes we want for unscheduled workloads. You can refer to some of the [example NodePool](https://github.com/aws/karpenter/tree/v0.35.0/examples/v1beta1) for specific needs. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step11-create-provisioner.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh" language="bash" %}} ## Set nodeAffinity for critical workloads (optional) @@ -170,7 +166,7 @@ affinity: Now that karpenter is running we can disable the cluster autoscaler. To do that we will scale the number of replicas to zero. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step12-scale-cas.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step11-scale-cas.sh" language="bash" %}} To get rid of the instances that were added from the node group we can scale our nodegroup down to a minimum size to support Karpenter and other critical services. @@ -178,11 +174,11 @@ To get rid of the instances that were added from the node group we can scale our If you have a single multi-AZ node group, we suggest a minimum of 2 instances. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step13-scale-single-ng.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step12-scale-single-ng.sh" language="bash" %}} Or, if you have multiple single-AZ node groups, we suggest a minimum of 1 instance each. -{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step13-scale-multiple-ng.sh" language="bash" %}} +{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step12-scale-multiple-ng.sh" language="bash" %}} {{% alert title="Note" color="warning" %}} If you have a lot of nodes or workloads you may want to slowly scale down your node groups by a few instances at a time. It is recommended to watch the transition carefully for workloads that may not have enough replicas running or disruption budgets configured. diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step01-env.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step01-env.sh similarity index 82% rename from website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step01-env.sh rename to website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step01-env.sh index 20645685137b..f456eddc75ee 100644 --- a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step01-env.sh +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step01-env.sh @@ -1,6 +1,6 @@ AWS_PARTITION="aws" # if you are not using standard partitions, you may need to configure to aws-cn / aws-us-gov AWS_REGION="$(aws configure list | grep region | tr -s " " | cut -d" " -f3)" -OIDC_ENDPOINT="$(aws eks describe-cluster --name ${CLUSTER_NAME} \ +OIDC_ENDPOINT="$(aws eks describe-cluster --name "${CLUSTER_NAME}" \ --query "cluster.identity.oidc.issuer" --output text)" AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' \ --output text) diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step02-node-iam.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step02-node-iam.sh similarity index 100% rename from website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step02-node-iam.sh rename to website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step02-node-iam.sh diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step03-node-policies.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step03-node-policies.sh new file mode 100644 index 000000000000..fbc5455e541b --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step03-node-policies.sh @@ -0,0 +1,11 @@ +aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKSWorkerNodePolicy" + +aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEKS_CNI_Policy" + +aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + +aws iam attach-role-policy --role-name "KarpenterNodeRole-${CLUSTER_NAME}" \ + --policy-arn "arn:${AWS_PARTITION}:iam::aws:policy/AmazonSSMManagedInstanceCore" diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh new file mode 100644 index 000000000000..cc3d7f929986 --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step04-controller-iam.sh @@ -0,0 +1,143 @@ +cat << EOF > controller-trust-policy.json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT#*//}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${OIDC_ENDPOINT#*//}:aud": "sts.amazonaws.com", + "${OIDC_ENDPOINT#*//}:sub": "system:serviceaccount:${KARPENTER_NAMESPACE}:karpenter" + } + } + } + ] +} +EOF + +aws iam create-role --role-name "KarpenterControllerRole-${CLUSTER_NAME}" \ + --assume-role-policy-document file://controller-trust-policy.json + +cat << EOF > controller-policy.json +{ + "Statement": [ + { + "Action": [ + "ssm:GetParameter", + "ec2:DescribeImages", + "ec2:RunInstances", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeAvailabilityZones", + "ec2:DeleteLaunchTemplate", + "ec2:CreateTags", + "ec2:CreateLaunchTemplate", + "ec2:CreateFleet", + "ec2:DescribeSpotPriceHistory", + "pricing:GetProducts" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "Karpenter" + }, + { + "Action": "ec2:TerminateInstances", + "Condition": { + "StringLike": { + "ec2:ResourceTag/karpenter.sh/nodepool": "*" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "ConditionalEC2Termination" + }, + { + "Effect": "Allow", + "Action": "iam:PassRole", + "Resource": "arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterNodeRole-${CLUSTER_NAME}", + "Sid": "PassNodeIAMRole" + }, + { + "Effect": "Allow", + "Action": "eks:DescribeCluster", + "Resource": "arn:${AWS_PARTITION}:eks:${AWS_REGION}:${AWS_ACCOUNT_ID}:cluster/${CLUSTER_NAME}", + "Sid": "EKSClusterEndpointLookup" + }, + { + "Sid": "AllowScopedInstanceProfileCreationActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:CreateInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS_REGION}" + }, + "StringLike": { + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileTagActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:TagInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS_REGION}", + "aws:RequestTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS_REGION}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*", + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowScopedInstanceProfileActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${CLUSTER_NAME}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS_REGION}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } + }, + { + "Sid": "AllowInstanceProfileReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "iam:GetInstanceProfile" + } + ], + "Version": "2012-10-17" +} +EOF + +aws iam put-role-policy --role-name "KarpenterControllerRole-${CLUSTER_NAME}" \ + --policy-name "KarpenterControllerPolicy-${CLUSTER_NAME}" \ + --policy-document file://controller-policy.json diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh new file mode 100644 index 000000000000..47df188dc87d --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step05-tag-subnets.sh @@ -0,0 +1,6 @@ +for NODEGROUP in $(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" --query 'nodegroups' --output text); do + aws ec2 create-tags \ + --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ + --resources "$(aws eks describe-nodegroup --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" --query 'nodegroup.subnets' --output text )" +done diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh new file mode 100644 index 000000000000..c63bde3b78dc --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step06-tag-security-groups.sh @@ -0,0 +1,22 @@ +NODEGROUP=$(aws eks list-nodegroups --cluster-name "${CLUSTER_NAME}" \ + --query 'nodegroups[0]' --output text) + +LAUNCH_TEMPLATE=$(aws eks describe-nodegroup --cluster-name "${CLUSTER_NAME}" \ + --nodegroup-name "${NODEGROUP}" --query 'nodegroup.launchTemplate.{id:id,version:version}' \ + --output text | tr -s "\t" ",") + +# If your EKS setup is configured to use only Cluster security group, then please execute - + +SECURITY_GROUPS=$(aws eks describe-cluster \ + --name "${CLUSTER_NAME}" --query "cluster.resourcesVpcConfig.clusterSecurityGroupId" --output text) + +# If your setup uses the security groups in the Launch template of a managed node group, then : + +SECURITY_GROUPS="$(aws ec2 describe-launch-template-versions \ + --launch-template-id "${LAUNCH_TEMPLATE%,*}" --versions "${LAUNCH_TEMPLATE#*,}" \ + --query 'LaunchTemplateVersions[0].LaunchTemplateData.[NetworkInterfaces[0].Groups||SecurityGroupIds]' \ + --output text)" + +aws ec2 create-tags \ + --tags "Key=karpenter.sh/discovery,Value=${CLUSTER_NAME}" \ + --resources "${SECURITY_GROUPS}" diff --git a/website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step08-edit-aws-auth.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step07-edit-aws-auth.sh similarity index 100% rename from website/content/en/v0.31/getting-started/migrating-from-cas/scripts/step08-edit-aws-auth.sh rename to website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step07-edit-aws-auth.sh diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh new file mode 100644 index 000000000000..f81ac0b90cc8 --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step08-generate-chart.sh @@ -0,0 +1,7 @@ +helm template karpenter oci://public.ecr.aws/karpenter/karpenter --version "${KARPENTER_VERSION}" --namespace "${KARPENTER_NAMESPACE}" \ + --set "settings.clusterName=${CLUSTER_NAME}" \ + --set "serviceAccount.annotations.eks\.amazonaws\.com/role-arn=arn:${AWS_PARTITION}:iam::${AWS_ACCOUNT_ID}:role/KarpenterControllerRole-${CLUSTER_NAME}" \ + --set controller.resources.requests.cpu=1 \ + --set controller.resources.requests.memory=1Gi \ + --set controller.resources.limits.cpu=1 \ + --set controller.resources.limits.memory=1Gi > karpenter.yaml diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step09-deploy.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step09-deploy.sh new file mode 100644 index 000000000000..e46742fd22ea --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step09-deploy.sh @@ -0,0 +1,8 @@ +kubectl create namespace "${KARPENTER_NAMESPACE}" || true +kubectl create -f \ + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodepools.yaml" +kubectl create -f \ + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml" +kubectl create -f \ + "https://raw.githubusercontent.com/aws/karpenter-provider-aws/v${KARPENTER_VERSION}/pkg/apis/crds/karpenter.sh_nodeclaims.yaml" +kubectl apply -f karpenter.yaml diff --git a/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh new file mode 100644 index 000000000000..8c518e9e74b8 --- /dev/null +++ b/website/content/en/v0.35/getting-started/migrating-from-cas/scripts/step10-create-nodepool.sh @@ -0,0 +1,46 @@ +cat < + Reference documentation for Karpenter +--- \ No newline at end of file diff --git a/website/content/en/v0.35/reference/cloudformation.md b/website/content/en/v0.35/reference/cloudformation.md new file mode 100644 index 000000000000..791008d60c5e --- /dev/null +++ b/website/content/en/v0.35/reference/cloudformation.md @@ -0,0 +1,614 @@ +--- +title: "CloudFormation" +linkTitle: "CloudFormation" +weight: 5 +description: > + A description of the Getting Started CloudFormation file and permissions +--- +The [Getting Started with Karpenter]({{< relref "../getting-started/getting-started-with-karpenter" >}}) guide uses CloudFormation to bootstrap the cluster to enable Karpenter to create and manage nodes, as well as to allow Karpenter to respond to interruption events. +This document describes the `cloudformation.yaml` file used in that guide. +These descriptions should allow you to understand: + +* What Karpenter is authorized to do with your EKS cluster and AWS resources when using the `cloudformation.yaml` file +* What permissions you need to set up if you are adding Karpenter to an existing cluster + +## Overview + +To download a particular version of `cloudformation.yaml`, set the version and use `curl` to pull the file to your local system: + +```bash +export KARPENTER_VERSION="0.35.0" +curl https://raw.githubusercontent.com/aws/karpenter-provider-aws/v"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > cloudformation.yaml +``` + +Following some header information, the rest of the `cloudformation.yaml` file describes the resources that CloudFormation deploys. +The sections of that file can be grouped together under the following general headings: + +* [**Node Authorization**]({{< relref "#node-authorization" >}}): Creates a NodeInstanceProfile, attaches a NodeRole to it, and connects it to an IAM Identity Mapping used to authorize nodes to the cluster. This defines the permissions each node managed by Karpenter has to access EC2 and other AWS resources. This doesn't actually create the IAM Identity Mapping. That part is orchestrated by `eksctl` in the Getting Started guide. +* [**Controller Authorization**]({{< relref "#controller-authorization" >}}): Creates the `KarpenterControllerPolicy` that is attached to the service account. +Again, the actual service account creation (`karpenter`), that is combined with the `KarpenterControllerPolicy`, is orchestrated by `eksctl` in the Getting Started guide. +* [**Interruption Handling**]({{< relref "#interruption-handling" >}}): Allows the Karpenter controller to see and respond to interruptions that occur with the nodes that Karpenter is managing. See the [Interruption]({{< relref "../concepts/disruption#interruption" >}}) section of the Disruption page for details. + +A lot of the object naming that is done by `cloudformation.yaml` is based on the following: + +* Cluster name: With a username of `bob` the Getting Started Guide would name your cluster `bob-karpenter-demo` +That name would then be appended to any name below where `${ClusterName}` is included. + +* Partition: Any time an ARN is used, it includes the [partition name](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/partitions.html) to identify where the object is found. In most cases, that partition name is `aws`. However, it could also be `aws-cn` (for China Regions) or `aws-us-gov` (for AWS GovCloud US Regions). + +## Node Authorization + +The following sections of the `cloudformation.yaml` file set up IAM permissions for Kubernetes nodes created by Karpenter. +In particular, this involves setting up a node role that can be attached and passed to instance profiles that Karpenter generates at runtime: + +* KarpenterNodeRole + +### KarpenterNodeRole + +This section of the template defines the IAM role attached to generated instance profiles. +Given a cluster name of `bob-karpenter-demo`, this role would end up being named `"KarpenterNodeRole-bob-karpenter-demo`. + +```yaml +KarpenterNodeRole: + Type: "AWS::IAM::Role" + Properties: + RoleName: !Sub "KarpenterNodeRole-${ClusterName}" + Path: / + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + !Sub "ec2.${AWS::URLSuffix}" + Action: + - "sts:AssumeRole" + ManagedPolicyArns: + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonEKS_CNI_Policy" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + - !Sub "arn:${AWS::Partition}:iam::aws:policy/AmazonSSMManagedInstanceCore" +``` + +The role created here includes several AWS managed policies, which are designed to provide permissions for specific uses needed by the nodes to work with EC2 and other AWS resources. These include: + +* [AmazonEKS_CNI_Policy](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonEKS_CNI_Policy.html): Provides the permissions that the Amazon VPC CNI Plugin needs to configure EKS worker nodes. +* [AmazonEKSWorkerNodePolicy](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonEKSWorkerNodePolicy.html): Lets Amazon EKS worker nodes connect to EKS Clusters. +* [AmazonEC2ContainerRegistryReadOnly](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonEC2ContainerRegistryReadOnly.html): Allows read-only access to repositories in the Amazon EC2 Container Registry. +* [AmazonSSMManagedInstanceCore](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonSSMManagedInstanceCore.html): Adds AWS Systems Manager service core functions for Amazon EC2. + +If you were to use a node role from an existing cluster, you could skip this provisioning step and pass this node role to any EC2NodeClasses that you create. Additionally, you would ensure that the [Controller Policy]({{< relref "#controllerpolicy" >}}) has `iam:PassRole` permission to the role attached to the generated instance profiles. + +## Controller Authorization + +This section sets the AWS permissions for the Karpenter Controller. When used in the Getting Started guide, `eksctl` uses these permissions to create a service account (karpenter) that is combined with the KarpenterControllerPolicy. + +The resources defined in this section are associated with: + +* KarpenterControllerPolicy + +Because the scope of the KarpenterControllerPolicy is an AWS region, the cluster's AWS region is included in the `AllowScopedEC2InstanceAccessActions`. + +### KarpenterControllerPolicy + +A `KarpenterControllerPolicy` object sets the name of the policy, then defines a set of resources and actions allowed for those resources. +For our example, the KarpenterControllerPolicy would be named: `KarpenterControllerPolicy-bob-karpenter-demo` + +```yaml +KarpenterControllerPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + ManagedPolicyName: !Sub "KarpenterControllerPolicy-${ClusterName}" + # The PolicyDocument must be in JSON string format because we use a StringEquals condition that uses an interpolated + # value in one of its key parameters which isn't natively supported by CloudFormation + PolicyDocument: !Sub | + { + "Version": "2012-10-17", + "Statement": [ +``` + +Someone wanting to add Karpenter to an existing cluster, instead of using `cloudformation.yaml`, would need to create the IAM policy directly and assign that policy to the role leveraged by the service account using IRSA. + +#### AllowScopedEC2InstanceAccessActions + +The AllowScopedEC2InstanceAccessActions statement ID (Sid) identifies a set of EC2 resources that are allowed to be accessed with +[RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) and [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html) actions. +For `RunInstances` and `CreateFleet` actions, the Karpenter controller can read (but not create) `image`, `snapshot`, `security-group`, `subnet` and `launch-template` EC2 resources, scoped for the particular AWS partition and region. + +```json +{ + "Sid": "AllowScopedEC2InstanceAccessActions", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}::image/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}::snapshot/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:security-group/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:subnet/*" + ], + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet" + ] +} +``` + +#### AllowScopedEC2LaunchTemplateAccessActions + +The AllowScopedEC2InstanceAccessActions statement ID (Sid) identifies launch templates that are allowed to be accessed with +[RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) and [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html) actions. +For `RunInstances` and `CreateFleet` actions, the Karpenter controller can read (but not create) `launch-template` EC2 resources that have the `kubernetes.io/cluster/${ClusterName}` tag be set to `owned` and a `karpenter.sh/nodepool` tag, scoped for the particular AWS partition and region. This ensures that an instance launch can't access launch templates that weren't provisioned by Karpenter. + +```json +{ + "Sid": "AllowScopedEC2LaunchTemplateAccessActions", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + } + } +} +``` + +#### AllowScopedEC2InstanceActionsWithTags + +The AllowScopedEC2InstanceActionsWithTags Sid allows the +[RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html), [CreateFleet](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html), and [CreateLaunchTemplate](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) +actions requested by the Karpenter controller to create all `fleet`, `instance`, `volume`, `network-interface`, `launch-template` or `spot-instances-request` EC2 resources (for the partition and region), and requires that the `kubernetes.io/cluster/${ClusterName}` tag be set to `owned` and a `karpenter.sh/nodepool` tag be set to any value. This ensures that Karpenter is only allowed to create instances for a single EKS cluster. + +```json +{ + "Sid": "AllowScopedEC2InstanceActionsWithTags", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:fleet/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:volume/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:network-interface/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*" + ], + "Action": [ + "ec2:RunInstances", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:RequestTag/karpenter.sh/nodepool": "*" + } + } +} +``` + +#### AllowScopedResourceCreationTagging + +The AllowScopedResourceCreationTagging Sid allows EC2 [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) +actions on `fleet`, `instance`, `volume`, `network-interface`, `launch-template` and `spot-instances-request` resources, While making `RunInstance`, `CreateFleet`, or `CreateLaunchTemplate` calls. Additionally, this ensures that resources can't be tagged arbitrarily by Karpenter after they are created. + +```json +{ + "Sid": "AllowScopedResourceCreationTagging", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:fleet/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:volume/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:network-interface/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:spot-instances-request/*" + ], + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "ec2:CreateAction": [ + "RunInstances", + "CreateFleet", + "CreateLaunchTemplate" + ] + }, + "StringLike": { + "aws:RequestTag/karpenter.sh/nodepool": "*" + } + } +} +``` + +#### AllowScopedResourceTagging + +The AllowScopedResourceTagging Sid allows EC2 [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) actions on all instances created by Karpenter after their creation. It enforces that Karpenter is only able to update the tags on cluster instances it is operating on through the `kubernetes.io/cluster/${ClusterName}`" and `karpenter.sh/nodepool` tags. +```json +{ + "Sid": "AllowScopedResourceTagging", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + }, + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "karpenter.sh/nodeclaim", + "Name" + ] + } + } +} +``` + +#### AllowScopedDeletion + +The AllowScopedDeletion Sid allows [TerminateInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html) and [DeleteLaunchTemplate](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteLaunchTemplate.html) actions to delete instance and launch-template resources, provided that `karpenter.sh/nodepool` and `kubernetes.io/cluster/${ClusterName}` tags are set. These tags must be present on all resources that Karpenter is going to delete. This ensures that Karpenter can only delete instances and launch templates that are associated with it. + +```json +{ + "Sid": "AllowScopedDeletion", + "Effect": "Allow", + "Resource": [ + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:instance/*", + "arn:${AWS::Partition}:ec2:${AWS::Region}:*:launch-template/*" + ], + "Action": [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned" + }, + "StringLike": { + "aws:ResourceTag/karpenter.sh/nodepool": "*" + } + } +} +``` + +#### AllowRegionalReadActions + +The AllowRegionalReadActions Sid allows [DescribeAvailabilityZones](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html), [DescribeImages](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html), [DescribeInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html), [DescribeInstanceTypeOfferings](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypeOfferings.html), [DescribeInstanceTypes](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypes.html), [DescribeLaunchTemplates](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplates.html), [DescribeSecurityGroups](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html), [DescribeSpotPriceHistory](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSpotPriceHistory.html), and [DescribeSubnets](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) actions for the current AWS region. +This allows the Karpenter controller to do any of those read-only actions across all related resources for that AWS region. + +```json +{ + "Sid": "AllowRegionalReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets" + ], + "Condition": { + "StringEquals": { + "aws:RequestedRegion": "${AWS::Region}" + } + } +} +``` + +#### AllowSSMReadActions + +The AllowSSMReadActions Sid allows the Karpenter controller to read SSM parameters (`ssm:GetParameter`) from the current region for SSM parameters generated by ASW services. + +**NOTE**: If potentially sensitive information is stored in SSM parameters, you could consider restricting access to these messages further. +```json +{ + "Sid": "AllowSSMReadActions", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:ssm:${AWS::Region}::parameter/aws/service/*", + "Action": "ssm:GetParameter" +} +``` + +#### AllowPricingReadActions + +Because pricing information does not exist in every region at the moment, the AllowPricingReadActions Sid allows the Karpenter controller to get product pricing information (`pricing:GetProducts`) for all related resources across all regions. + +```json +{ + "Sid": "AllowPricingReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "pricing:GetProducts" +} +``` + +#### AllowInterruptionQueueActions + +Karpenter supports interruption queues, that you can create as described in the [Interruption]({{< relref "../concepts/disruption#interruption" >}}) section of the Disruption page. +This section of the cloudformation.yaml template can give Karpenter permission to access those queues by specifying the resource ARN. +For the interruption queue you created (`${KarpenterInterruptionQueue.Arn}`), the AllowInterruptionQueueActions Sid lets the Karpenter controller have permission to delete messages ([DeleteMessage](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessage.html)), get queue URL ([GetQueueUrl](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueUrl.html)), and receive messages ([ReceiveMessage](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html)). + +```json +{ + "Sid": "AllowInterruptionQueueActions", + "Effect": "Allow", + "Resource": "${KarpenterInterruptionQueue.Arn}", + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ] +} +``` + +#### AllowPassingInstanceRole + +The AllowPassingInstanceRole Sid gives the Karpenter controller permission to pass (`iam:PassRole`) the node role (`KarpenterNodeRole-${ClusterName}`) to generated instance profiles. +This gives EC2 permission explicit permission to use the `KarpenterNodeRole-${ClusterName}` when assigning permissions to generated instance profiles while launching nodes. + +```json +{ + "Sid": "AllowPassingInstanceRole", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/KarpenterNodeRole-${ClusterName}", + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": "ec2.amazonaws.com" + } + } +} +``` + +#### AllowScopedInstanceProfileCreationActions + +The AllowScopedInstanceProfileCreationActions Sid gives the Karpenter controller permission to create a new instance profile with [`iam:CreateInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateInstanceProfile.html), +provided that the request is made to a cluster with `kubernetes.io/cluster/${ClusterName` set to owned and is made in the current region. +Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter can generate instance profiles on your behalf based on roles specified in your `EC2NodeClasses` that you use to configure Karpenter. + +```json +{ + "Sid": "AllowScopedInstanceProfileCreationActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:CreateInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } +} +``` + +#### AllowScopedInstanceProfileTagActions + +The AllowScopedInstanceProfileTagActions Sid gives the Karpenter controller permission to tag an instance profile with [`iam:TagInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_TagInstanceProfile.html), based on the values shown below, +Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This ensures that Karpenter is only able to act on instance profiles that it provisions for this cluster. + +```json +{ + "Sid": "AllowScopedInstanceProfileTagActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:TagInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}", + "aws:RequestTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:RequestTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*", + "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } +} +``` + + +#### AllowScopedInstanceProfileActions + +The AllowScopedInstanceProfileActions Sid gives the Karpenter controller permission to perform [`iam:AddRoleToInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_AddRoleToInstanceProfile.html), [`iam:RemoveRoleFromInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_RemoveRoleFromInstanceProfile.html), and [`iam:DeleteInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_DeleteInstanceProfile.html) actions, +provided that the request is made to a cluster with `kubernetes.io/cluster/${ClusterName` set to owned and is made in the current region. +Also, `karpenter.k8s.aws/ec2nodeclass` must be set to some value. This permission is further enforced by the `iam:PassRole` permission. If Karpenter attempts to add a role to an instance profile that it doesn't have `iam:PassRole` permission on, that call will fail. Therefore, if you configure Karpenter to use a new role through the `EC2NodeClass`, ensure that you also specify that role within your `iam:PassRole` permission. + +```json +{ + "Sid": "AllowScopedInstanceProfileActions", + "Effect": "Allow", + "Resource": "*", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/kubernetes.io/cluster/${ClusterName}": "owned", + "aws:ResourceTag/topology.kubernetes.io/region": "${AWS::Region}" + }, + "StringLike": { + "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass": "*" + } + } +} +``` + +#### AllowInstanceProfileActions + +The AllowInstanceProfileActions Sid gives the Karpenter controller permission to perform [`iam:GetInstanceProfile`](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetInstanceProfile.html) actions to retrieve information about a specified instance profile, including understanding if an instance profile has been provisioned for an `EC2NodeClass` or needs to be re-provisioned. + +```json +{ + "Sid": "AllowInstanceProfileReadActions", + "Effect": "Allow", + "Resource": "*", + "Action": "iam:GetInstanceProfile" +} +``` + +#### AllowAPIServerEndpointDiscovery + +You can optionally allow the Karpenter controller to discover the Kubernetes cluster's external API endpoint to enable EC2 nodes to successfully join the EKS cluster. + +> **Note**: If you are not using an EKS control plane, you will have to specify this endpoint explicitly. See the description of the `aws.clusterEndpoint` setting in the [ConfigMap](.settings/#configmap) documentation for details. + +The AllowAPIServerEndpointDiscovery Sid allows the Karpenter controller to get that information (`eks:DescribeCluster`) for the cluster (`cluster/${ClusterName}`). +```json +{ + "Sid": "AllowAPIServerEndpointDiscovery", + "Effect": "Allow", + "Resource": "arn:${AWS::Partition}:eks:${AWS::Region}:${AWS::AccountId}:cluster/${ClusterName}", + "Action": "eks:DescribeCluster" +} +``` + +## Interruption Handling + +Settings in this section allow the Karpenter controller to stand-up an interruption queue to receive notification messages from other AWS services about the health and status of instances. For example, this interruption queue allows Karpenter to be aware of spot instance interruptions that are sent 2 minutes before spot instances are reclaimed by EC2. Adding this queue allows Karpenter to be proactive in migrating workloads to new nodes. +See the [Interruption]({{< relref "../concepts/disruption#interruption" >}}) section of the Disruption page for details. + +Defining the `KarpenterInterruptionQueuePolicy` allows Karpenter to see and respond to the following: + +* AWS health events +* Spot interruptions +* Spot rebalance recommendations +* Instance state changes + +The resources defined in this section include: + +* KarpenterInterruptionQueue +* KarpenterInterruptionQueuePolicy +* ScheduledChangeRule +* SpotInterruptionRule +* RebalanceRule +* InstanceStateChangeRule + +### KarpenterInterruptionQueue + +The [AWS::SQS::Queue](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sqs-queue.html) resource is used to create an Amazon SQS standard queue. +Properties of that resource set the `QueueName` to the name of your cluster, the time for which SQS retains each message (`MessageRetentionPeriod`) to 300 seconds, and enabling serverside-side encryption using SQS owned encryption keys (`SqsManagedSseEnabled`) to `true`. +See [SetQueueAttributes](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html) for descriptions of some of these attributes. + +```yaml +KarpenterInterruptionQueue: + Type: AWS::SQS::Queue + Properties: + QueueName: !Sub "${ClusterName}" + MessageRetentionPeriod: 300 + SqsManagedSseEnabled: true +``` + +### KarpenterInterruptionQueuePolicy + +The Karpenter interruption queue policy is created to allow AWS services that we want to receive instance notifications from to push notification messages to the queue. +The [AWS::SQS::QueuePolicy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sqs-queuepolicy.html) resource here applies `EC2InterruptionPolicy` to the `KarpenterInterruptionQueue`. The policy allows [sqs:SendMessage](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html) actions to `events.amazonaws.com` and `sqs.amazonaws.com` services. It also allows the `GetAtt` function to get attributes from `KarpenterInterruptionQueue.Arn`. + +```yaml +KarpenterInterruptionQueuePolicy: + Type: AWS::SQS::QueuePolicy + Properties: + Queues: + - !Ref KarpenterInterruptionQueue + PolicyDocument: + Id: EC2InterruptionPolicy + Statement: + - Effect: Allow + Principal: + Service: + - events.amazonaws.com + - sqs.amazonaws.com + Action: sqs:SendMessage + Resource: !GetAtt KarpenterInterruptionQueue.Arn +``` + +### Rules + +This section allows Karpenter to gather [AWS Health Events](https://docs.aws.amazon.com/health/latest/ug/cloudwatch-events-health.html#about-public-events) and direct them to a queue where they can be consumed by Karpenter. +These rules include: + +* ScheduledChangeRule: The [AWS::Events::Rule](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html) creates a rule where the [EventPattern](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html) is set to send events from the `aws.health` source to `KarpenterInterruptionQueue`. + + ```yaml + ScheduledChangeRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.health + detail-type: + - AWS Health Event + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + ``` + +* SpotInterruptionRule: An EC2 Spot Instance Interruption warning tells you that AWS is about to reclaim a Spot instance you are using. This rule allows Karpenter to gather [EC2 Spot Instance Interruption Warning](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html) events and direct them to a queue where they can be consumed by Karpenter. In particular, the [AWS::Events::Rule](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html) here creates a rule where the [EventPattern](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html) is set to send events from the `aws.ec2` source to `KarpenterInterruptionQueue`. + + ```yaml + SpotInterruptionRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.ec2 + detail-type: + - EC2 Spot Instance Interruption Warning + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + ``` + +* RebalanceRule: An EC2 Instance Rebalance Recommendation signal tells you that a Spot instance is at a heightened risk of being interrupted, allowing Karpenter to get new instances or simply rebalance workloads. This rule allows Karpenter to gather [EC2 Instance Rebalance Recommendation](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/rebalance-recommendations.html) signals and direct them to a queue where they can be consumed by Karpenter. In particular, the [AWS::Events::Rule](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html) here creates a rule where the [EventPattern](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html) is set to send events from the `aws.ec2` source to `KarpenterInterruptionQueue`. + + ```yaml + RebalanceRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.ec2 + detail-type: + - EC2 Instance Rebalance Recommendation + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + ``` + +* InstanceStateChangeRule: An EC2 Instance State-change Notification signal tells you that the state of an instance has changed to one of the following states: pending, running, stopping, stopped, shutting-down, or terminated. This rule allows Karpenter to gather [EC2 Instance State-change](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instance-state-changes.html) signals and direct them to a queue where they can be consumed by Karpenter. In particular, the [AWS::Events::Rule](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html) here creates a rule where the [EventPattern](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html) is set to send events from the `aws.ec2` source to `KarpenterInterruptionQueue`. + + ```yaml + InstanceStateChangeRule: + Type: 'AWS::Events::Rule' + Properties: + EventPattern: + source: + - aws.ec2 + detail-type: + - EC2 Instance State-change Notification + Targets: + - Id: KarpenterInterruptionQueueTarget + Arn: !GetAtt KarpenterInterruptionQueue.Arn + ``` diff --git a/website/content/en/v0.31/concepts/instance-types.md b/website/content/en/v0.35/reference/instance-types.md similarity index 93% rename from website/content/en/v0.31/concepts/instance-types.md rename to website/content/en/v0.35/reference/instance-types.md index 95752c33498f..6978c20ef258 100644 --- a/website/content/en/v0.31/concepts/instance-types.md +++ b/website/content/en/v0.35/reference/instance-types.md @@ -25,7 +25,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -50,7 +49,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -75,7 +73,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -100,7 +97,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -125,7 +121,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -150,7 +145,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -175,7 +169,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|1740| - |karpenter.k8s.aws/instance-pods|12| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -198,7 +191,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|7168| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -222,7 +214,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|3840| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -245,7 +236,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|7680| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -268,7 +258,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|15360| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -291,7 +280,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|30720| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -315,7 +303,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|61440| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -339,7 +326,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|3840| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -362,7 +348,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|7680| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -385,7 +370,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|15360| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -408,7 +392,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|30720| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -432,7 +415,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|61440| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -457,7 +439,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -482,7 +463,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -507,7 +487,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -532,7 +511,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -557,7 +535,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|73728| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|9xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -582,7 +559,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -607,7 +583,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|147456| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|18xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -632,7 +607,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -657,7 +631,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -683,7 +656,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -708,7 +680,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -733,7 +704,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -758,7 +728,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -783,7 +752,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -808,7 +776,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -833,7 +800,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -858,7 +824,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -885,7 +850,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -911,7 +875,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -937,7 +900,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -963,7 +925,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -989,7 +950,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1015,7 +975,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1041,7 +1000,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1067,7 +1025,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1094,7 +1051,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|50| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1120,7 +1076,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|100| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1146,7 +1101,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|200| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1172,7 +1126,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|400| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1198,7 +1151,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|73728| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|9xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1224,7 +1176,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1250,7 +1201,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|147456| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|18xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1276,7 +1226,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1302,7 +1251,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1328,7 +1276,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|5376| |karpenter.k8s.aws/instance-network-bandwidth|3000| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1353,7 +1300,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|10752| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1378,7 +1324,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|21504| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1403,7 +1348,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|43008| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1428,7 +1372,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|9xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1440,6 +1383,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|88002Mi| |pods|234| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|54| ### `c5n.18xlarge` #### Labels @@ -1453,7 +1397,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|18xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1465,6 +1408,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|173400Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `c5n.metal` #### Labels @@ -1478,7 +1422,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1490,6 +1433,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|173400Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c6a Family ### `c6a.large` @@ -1504,7 +1448,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1529,7 +1472,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1554,7 +1496,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1579,7 +1520,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1604,7 +1544,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1629,7 +1568,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1654,7 +1592,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1679,7 +1616,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1704,7 +1640,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1729,7 +1664,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1741,6 +1675,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `c6a.metal` #### Labels @@ -1754,7 +1689,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -1766,6 +1700,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c6g Family ### `c6g.medium` @@ -1780,7 +1715,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1805,7 +1739,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1830,7 +1763,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1855,7 +1787,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1880,7 +1811,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1905,7 +1835,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1930,7 +1859,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1955,7 +1883,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -1980,7 +1907,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2007,7 +1933,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2033,7 +1958,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2059,7 +1983,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2085,7 +2008,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2111,7 +2033,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2137,7 +2058,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2163,7 +2083,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2189,7 +2108,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2215,7 +2133,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2241,7 +2158,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|1600| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2266,7 +2182,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|3000| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2291,7 +2206,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|6300| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2316,7 +2230,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2340,8 +2253,7 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|6| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| - |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| + |karpenter.k8s.aws/instance-network-bandwidth|25000| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2366,7 +2278,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2391,7 +2302,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2416,7 +2326,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -2428,6 +2337,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|112720Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c6i Family ### `c6i.large` @@ -2442,7 +2352,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2467,7 +2376,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2492,7 +2400,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2517,7 +2424,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2542,7 +2448,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2567,7 +2472,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2592,7 +2496,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2617,7 +2520,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2642,7 +2544,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2654,6 +2555,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|234021Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `c6i.metal` #### Labels @@ -2667,7 +2569,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2679,6 +2580,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|234021Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c6id Family ### `c6id.large` @@ -2694,7 +2596,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2720,7 +2621,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2746,7 +2646,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2772,7 +2671,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2798,7 +2696,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2824,7 +2721,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2850,7 +2746,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2876,7 +2771,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5700| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2902,7 +2796,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2914,6 +2807,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|234021Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `c6id.metal` #### Labels @@ -2928,7 +2822,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2940,6 +2833,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|234021Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c6in Family ### `c6in.large` @@ -2954,7 +2848,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -2979,7 +2872,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3004,7 +2896,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3029,7 +2920,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3054,7 +2944,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3079,7 +2968,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3104,7 +2992,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3129,7 +3016,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|150000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3154,7 +3040,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3166,6 +3051,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|238333Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ### `c6in.metal` #### Labels @@ -3179,7 +3065,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3191,7 +3076,299 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|238333Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| +## c7a Family +### `c7a.medium` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|2048| + |karpenter.k8s.aws/instance-network-bandwidth|390| + |karpenter.k8s.aws/instance-size|medium| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.medium| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|940m| + |ephemeral-storage|17Gi| + |memory|1451Mi| + |pods|8| + |vpc.amazonaws.com/pod-eni|4| +### `c7a.large` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|4096| + |karpenter.k8s.aws/instance-network-bandwidth|781| + |karpenter.k8s.aws/instance-size|large| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.large| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|1930m| + |ephemeral-storage|17Gi| + |memory|3114Mi| + |pods|29| + |vpc.amazonaws.com/pod-eni|9| +### `c7a.xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|8192| + |karpenter.k8s.aws/instance-network-bandwidth|1562| + |karpenter.k8s.aws/instance-size|xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|3920m| + |ephemeral-storage|17Gi| + |memory|6584Mi| + |pods|58| + |vpc.amazonaws.com/pod-eni|18| +### `c7a.2xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|16384| + |karpenter.k8s.aws/instance-network-bandwidth|3125| + |karpenter.k8s.aws/instance-size|2xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.2xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|7910m| + |ephemeral-storage|17Gi| + |memory|14162Mi| + |pods|58| + |vpc.amazonaws.com/pod-eni|38| +### `c7a.4xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|32768| + |karpenter.k8s.aws/instance-network-bandwidth|6250| + |karpenter.k8s.aws/instance-size|4xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.4xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|15890m| + |ephemeral-storage|17Gi| + |memory|27381Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|54| +### `c7a.8xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|65536| + |karpenter.k8s.aws/instance-network-bandwidth|12500| + |karpenter.k8s.aws/instance-size|8xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.8xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|31850m| + |ephemeral-storage|17Gi| + |memory|57691Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|84| +### `c7a.12xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|98304| + |karpenter.k8s.aws/instance-network-bandwidth|18750| + |karpenter.k8s.aws/instance-size|12xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.12xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|47810m| + |ephemeral-storage|17Gi| + |memory|88002Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|114| +### `c7a.16xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|25000| + |karpenter.k8s.aws/instance-size|16xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.16xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|112779Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `c7a.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|196608| + |karpenter.k8s.aws/instance-network-bandwidth|37500| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|173400Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `c7a.32xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|32xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.32xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|127610m| + |ephemeral-storage|17Gi| + |memory|234021Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `c7a.48xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|48xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.48xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|355262Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +### `c7a.metal-48xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7a.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|355262Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## c7g Family ### `c7g.medium` #### Labels @@ -3205,7 +3382,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|520| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3230,7 +3406,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|937| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3255,7 +3430,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1876| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3280,7 +3454,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3750| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3305,7 +3478,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3330,7 +3502,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3355,7 +3526,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|22500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3380,7 +3550,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3392,6 +3561,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|112720Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `c7g.metal` #### Labels @@ -3405,7 +3575,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3417,6 +3586,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|112720Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c7gd Family ### `c7gd.medium` @@ -3432,7 +3602,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|520| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3458,7 +3627,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|937| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3484,7 +3652,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1876| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3510,7 +3677,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3750| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3536,7 +3702,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3562,7 +3727,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3588,7 +3752,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|22500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3614,7 +3777,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3626,6 +3788,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|112720Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c7gn Family ### `c7gn.medium` @@ -3640,7 +3803,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3665,7 +3827,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3690,7 +3851,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3715,7 +3875,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3740,7 +3899,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3765,7 +3923,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3790,7 +3947,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|150000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3815,7 +3971,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -3827,6 +3982,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|112720Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## c7i Family ### `c7i.large` @@ -3841,7 +3997,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3853,6 +4008,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|3114Mi| |pods|29| + |vpc.amazonaws.com/pod-eni|9| ### `c7i.xlarge` #### Labels | Label | Value | @@ -3865,7 +4021,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3877,6 +4032,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|6584Mi| |pods|58| + |vpc.amazonaws.com/pod-eni|18| ### `c7i.2xlarge` #### Labels | Label | Value | @@ -3889,7 +4045,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3901,6 +4056,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|14162Mi| |pods|58| + |vpc.amazonaws.com/pod-eni|38| ### `c7i.4xlarge` #### Labels | Label | Value | @@ -3913,7 +4069,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3925,6 +4080,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|27381Mi| |pods|234| + |vpc.amazonaws.com/pod-eni|54| ### `c7i.8xlarge` #### Labels | Label | Value | @@ -3937,7 +4093,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3949,6 +4104,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|57691Mi| |pods|234| + |vpc.amazonaws.com/pod-eni|84| ### `c7i.12xlarge` #### Labels | Label | Value | @@ -3961,7 +4117,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3973,6 +4128,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|88002Mi| |pods|234| + |vpc.amazonaws.com/pod-eni|114| ### `c7i.16xlarge` #### Labels | Label | Value | @@ -3985,7 +4141,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -3997,6 +4152,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|112779Mi| |pods|737| + |vpc.amazonaws.com/pod-eni|107| ### `c7i.24xlarge` #### Labels | Label | Value | @@ -4009,7 +4165,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4021,6 +4176,31 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|173400Mi| |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `c7i.metal-24xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7i| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|196608| + |karpenter.k8s.aws/instance-network-bandwidth|37500| + |karpenter.k8s.aws/instance-size|metal-24xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7i.metal-24xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|173400Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| ### `c7i.48xlarge` #### Labels | Label | Value | @@ -4033,7 +4213,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4045,6 +4224,33 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +### `c7i.metal-48xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|c| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|c7i| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|c7i.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|355262Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## d2 Family ### `d2.xlarge` #### Labels @@ -4057,7 +4263,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|31232| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4080,7 +4285,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|62464| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4103,7 +4307,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4127,7 +4330,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4153,7 +4355,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5940| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3000| - |karpenter.k8s.aws/instance-pods|10| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4179,7 +4380,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|11880| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|6000| - |karpenter.k8s.aws/instance-pods|18| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4205,7 +4405,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|23760| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|38| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4231,7 +4430,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|47520| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|59| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4258,7 +4456,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|27960| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|6000| - |karpenter.k8s.aws/instance-pods|10| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4284,7 +4481,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|55920| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|18| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4310,7 +4506,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|111840| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|38| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4336,7 +4531,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|167760| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|40000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|6xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4362,7 +4556,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|223680| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|78| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4388,7 +4581,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|335520| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|89| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4401,6 +4593,38 @@ below are the resources available with some assumptions and after the instance o |memory|180528Mi| |pods|89| |vpc.amazonaws.com/pod-eni|119| +## dl1 Family +### `dl1.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|dl| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|dl1| + |karpenter.k8s.aws/instance-generation|1| + |karpenter.k8s.aws/instance-gpu-count|8| + |karpenter.k8s.aws/instance-gpu-manufacturer|habana| + |karpenter.k8s.aws/instance-gpu-memory|32768| + |karpenter.k8s.aws/instance-gpu-name|gaudi-hl-205| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|4000| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|400000| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|dl1.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |habana.ai/gaudi|8| + |memory|718987Mi| + |pods|737| + |vpc.amazonaws.com/efa|4| + |vpc.amazonaws.com/pod-eni|62| ## f1 Family ### `f1.2xlarge` #### Labels @@ -4415,7 +4639,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|470| |karpenter.k8s.aws/instance-memory|124928| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4440,7 +4663,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|940| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4465,7 +4687,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3760| |karpenter.k8s.aws/instance-memory|999424| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|394| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4477,63 +4698,6 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|919778Mi| |pods|394| -## g2 Family -### `g2.2xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|g| - |karpenter.k8s.aws/instance-cpu|8| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| - |karpenter.k8s.aws/instance-family|g2| - |karpenter.k8s.aws/instance-generation|2| - |karpenter.k8s.aws/instance-gpu-count|1| - |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| - |karpenter.k8s.aws/instance-gpu-memory|4096| - |karpenter.k8s.aws/instance-gpu-name|k520| - |karpenter.k8s.aws/instance-hypervisor|xen| - |karpenter.k8s.aws/instance-memory|15360| - |karpenter.k8s.aws/instance-pods|58| - |karpenter.k8s.aws/instance-size|2xlarge| - |kubernetes.io/arch|amd64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|g2.2xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|7910m| - |ephemeral-storage|17Gi| - |memory|13215Mi| - |nvidia.com/gpu|1| - |pods|58| -### `g2.8xlarge` -#### Labels - | Label | Value | - |--|--| - |karpenter.k8s.aws/instance-category|g| - |karpenter.k8s.aws/instance-cpu|32| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| - |karpenter.k8s.aws/instance-family|g2| - |karpenter.k8s.aws/instance-generation|2| - |karpenter.k8s.aws/instance-gpu-count|4| - |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| - |karpenter.k8s.aws/instance-gpu-memory|4096| - |karpenter.k8s.aws/instance-gpu-name|k520| - |karpenter.k8s.aws/instance-hypervisor|xen| - |karpenter.k8s.aws/instance-memory|61440| - |karpenter.k8s.aws/instance-pods|234| - |karpenter.k8s.aws/instance-size|8xlarge| - |kubernetes.io/arch|amd64| - |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|g2.8xlarge| -#### Resources - | Resource | Quantity | - |--|--| - |cpu|31850m| - |ephemeral-storage|17Gi| - |memory|53903Mi| - |nvidia.com/gpu|4| - |pods|234| ## g3 Family ### `g3.4xlarge` #### Labels @@ -4551,7 +4715,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4580,7 +4743,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4609,7 +4771,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|499712| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4638,7 +4799,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-name|m60| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|31232| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4669,7 +4829,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2000| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4700,7 +4859,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|4167| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4731,7 +4889,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|8333| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4762,7 +4919,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4793,7 +4949,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4825,7 +4980,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|125| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4856,7 +5010,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|225| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4887,7 +5040,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|225| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4918,7 +5070,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4931,6 +5082,7 @@ below are the resources available with some assumptions and after the instance o |memory|120248Mi| |nvidia.com/gpu|1| |pods|58| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|58| ### `g4dn.12xlarge` #### Labels @@ -4949,7 +5101,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4962,6 +5113,7 @@ below are the resources available with some assumptions and after the instance o |memory|178933Mi| |nvidia.com/gpu|4| |pods|234| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|54| ### `g4dn.16xlarge` #### Labels @@ -4980,7 +5132,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -4993,6 +5144,7 @@ below are the resources available with some assumptions and after the instance o |memory|241490Mi| |nvidia.com/gpu|1| |pods|58| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|118| ### `g4dn.metal` #### Labels @@ -5011,7 +5163,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5024,6 +5175,7 @@ below are the resources available with some assumptions and after the instance o |memory|355262Mi| |nvidia.com/gpu|8| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## g5 Family ### `g5.xlarge` @@ -5043,7 +5195,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|250| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5074,7 +5225,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|450| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5105,7 +5255,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5136,7 +5285,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5149,6 +5297,7 @@ below are the resources available with some assumptions and after the instance o |memory|118312Mi| |nvidia.com/gpu|1| |pods|234| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|84| ### `g5.12xlarge` #### Labels @@ -5167,7 +5316,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|40000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5180,6 +5328,7 @@ below are the resources available with some assumptions and after the instance o |memory|173400Mi| |nvidia.com/gpu|4| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `g5.16xlarge` #### Labels @@ -5198,7 +5347,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5211,6 +5359,7 @@ below are the resources available with some assumptions and after the instance o |memory|239554Mi| |nvidia.com/gpu|1| |pods|234| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|114| ### `g5.24xlarge` #### Labels @@ -5229,7 +5378,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5242,6 +5390,7 @@ below are the resources available with some assumptions and after the instance o |memory|355262Mi| |nvidia.com/gpu|4| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `g5.48xlarge` #### Labels @@ -5260,7 +5409,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5273,6 +5421,7 @@ below are the resources available with some assumptions and after the instance o |memory|723299Mi| |nvidia.com/gpu|8| |pods|345| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|115| ## g5g Family ### `g5g.xlarge` @@ -5291,7 +5440,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -5321,7 +5469,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -5351,7 +5498,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -5381,7 +5527,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -5411,7 +5556,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -5441,7 +5585,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -5468,7 +5611,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5492,7 +5634,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5516,7 +5657,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5540,7 +5680,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5550,8 +5689,81 @@ below are the resources available with some assumptions and after the instance o |--|--| |cpu|63770m| |ephemeral-storage|17Gi| - |memory|234021Mi| - |pods|737| + |memory|237794Mi| + |pods|394| +## hpc7g Family +### `hpc7g.4xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|hpc| + |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|hpc7g| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|200000| + |karpenter.k8s.aws/instance-size|4xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|hpc7g.4xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|15890m| + |ephemeral-storage|17Gi| + |memory|118649Mi| + |pods|198| + |vpc.amazonaws.com/efa|1| +### `hpc7g.8xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|hpc| + |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|hpc7g| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|200000| + |karpenter.k8s.aws/instance-size|8xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|hpc7g.8xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|31850m| + |ephemeral-storage|17Gi| + |memory|118649Mi| + |pods|198| + |vpc.amazonaws.com/efa|1| +### `hpc7g.16xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|hpc| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|hpc7g| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|200000| + |karpenter.k8s.aws/instance-size|16xlarge| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|hpc7g.16xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|118649Mi| + |pods|198| + |vpc.amazonaws.com/efa|1| ## i2 Family ### `i2.xlarge` #### Labels @@ -5564,7 +5776,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|31232| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5587,7 +5798,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|62464| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5610,7 +5820,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5634,7 +5843,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5660,7 +5868,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|475| |karpenter.k8s.aws/instance-memory|15616| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5685,7 +5892,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|31232| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5710,7 +5916,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|62464| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5735,7 +5940,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|124928| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5760,7 +5964,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5785,7 +5988,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15200| |karpenter.k8s.aws/instance-memory|499712| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5810,7 +6012,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15200| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5837,7 +6038,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1250| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2100| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5863,7 +6063,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2500| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|4200| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5889,7 +6088,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5000| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|8400| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5915,7 +6113,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7500| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|3xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5941,7 +6138,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15000| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|6xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5967,7 +6163,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|30000| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -5979,6 +6174,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|360795Mi| |pods|234| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|54| ### `i3en.24xlarge` #### Labels @@ -5993,7 +6189,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|60000| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6005,6 +6200,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `i3en.metal` #### Labels @@ -6019,7 +6215,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|60000| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6031,6 +6226,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## i4g Family ### `i4g.large` @@ -6046,7 +6242,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|468| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6072,7 +6267,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|937| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1875| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6098,7 +6292,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1875| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|4687| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6124,7 +6317,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3750| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|9375| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6150,7 +6342,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7500| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6176,7 +6367,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15000| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6188,6 +6378,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476445Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## i4i Family ### `i4i.large` @@ -6203,7 +6394,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|468| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6228,7 +6418,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|937| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1875| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6254,7 +6443,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1875| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|4687| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6280,7 +6468,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3750| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|9375| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6306,7 +6493,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7500| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6319,6 +6505,31 @@ below are the resources available with some assumptions and after the instance o |memory|239554Mi| |pods|234| |vpc.amazonaws.com/pod-eni|112| +### `i4i.12xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|i| + |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|i4i| + |karpenter.k8s.aws/instance-generation|4| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|11250| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|28120| + |karpenter.k8s.aws/instance-size|12xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|i4i.12xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|47810m| + |ephemeral-storage|17Gi| + |memory|360795Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|112| ### `i4i.16xlarge` #### Labels | Label | Value | @@ -6332,7 +6543,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15000| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6345,6 +6555,31 @@ below are the resources available with some assumptions and after the instance o |memory|476504Mi| |pods|737| |vpc.amazonaws.com/pod-eni|120| +### `i4i.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|i| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|i4i| + |karpenter.k8s.aws/instance-generation|4| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|22500| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|56250| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|i4i.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|722287Mi| + |pods|437| + |vpc.amazonaws.com/pod-eni|105| ### `i4i.32xlarge` #### Labels | Label | Value | @@ -6358,7 +6593,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|30000| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6370,6 +6604,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|120| ### `i4i.metal` #### Labels @@ -6384,7 +6619,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|30000| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6396,6 +6630,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|120| ## im4gn Family ### `im4gn.large` @@ -6411,7 +6646,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|937| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6437,7 +6671,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1875| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6463,7 +6696,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3750| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6489,7 +6721,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7500| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6515,7 +6746,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15000| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6541,7 +6771,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|30000| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6553,6 +6782,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|233962Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## inf1 Family ### `inf1.xlarge` @@ -6570,7 +6800,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|38| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6599,7 +6828,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|38| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6628,7 +6856,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|49152| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|6xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6657,7 +6884,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|321| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6670,6 +6896,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|177976Mi| |pods|321| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|111| ## inf2 Family ### `inf2.xlarge` @@ -6687,7 +6914,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2083| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6716,7 +6942,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|16667| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6745,7 +6970,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6774,7 +6998,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6802,7 +7025,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|937| |karpenter.k8s.aws/instance-memory|6144| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6828,7 +7050,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1875| |karpenter.k8s.aws/instance-memory|12288| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6854,7 +7075,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3750| |karpenter.k8s.aws/instance-memory|24576| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6880,7 +7100,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7500| |karpenter.k8s.aws/instance-memory|49152| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6906,7 +7125,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|15000| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6932,7 +7150,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|30000| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -6957,7 +7174,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|1740| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|small| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -6980,7 +7196,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|3788| - |karpenter.k8s.aws/instance-pods|12| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7003,7 +7218,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|7680| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7026,7 +7240,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|15360| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7050,7 +7263,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|17510| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7073,7 +7285,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|35020| - |karpenter.k8s.aws/instance-pods|118| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7096,7 +7307,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|70041| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7120,7 +7330,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|3840| - |karpenter.k8s.aws/instance-pods|12| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7143,7 +7352,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|7680| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7166,7 +7374,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|15360| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7189,7 +7396,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|30720| - |karpenter.k8s.aws/instance-pods|118| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7213,7 +7419,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|8192| - |karpenter.k8s.aws/instance-pods|20| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7236,7 +7441,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|16384| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7259,7 +7463,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|32768| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7282,7 +7485,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|4| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|65536| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7306,7 +7508,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|163840| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|10xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7330,7 +7531,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7355,7 +7555,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7380,7 +7579,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7405,7 +7603,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7430,7 +7627,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7455,7 +7651,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7480,7 +7675,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7505,7 +7699,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7530,7 +7723,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7555,7 +7747,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7581,7 +7772,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7606,7 +7796,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7631,7 +7820,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7656,7 +7844,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7681,7 +7868,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7706,7 +7892,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7731,7 +7916,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7756,7 +7940,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7783,7 +7966,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7809,7 +7991,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7835,7 +8016,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7861,7 +8041,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7887,7 +8066,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7913,7 +8091,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7939,7 +8116,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7965,7 +8141,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -7992,7 +8167,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8018,7 +8192,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8044,7 +8217,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8070,7 +8242,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8096,7 +8267,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8122,7 +8292,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8148,7 +8317,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8174,7 +8342,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8200,7 +8367,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8227,7 +8393,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|2100| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8253,7 +8418,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|4100| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8279,7 +8443,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|8125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8305,7 +8468,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|16250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8331,7 +8493,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8357,7 +8518,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8383,7 +8543,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8409,7 +8568,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8421,6 +8579,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m5dn.metal` #### Labels @@ -8435,7 +8594,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8447,6 +8605,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m5n Family ### `m5n.large` @@ -8461,7 +8620,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|2100| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8486,7 +8644,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|4100| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8511,7 +8668,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|8125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8536,7 +8692,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|16250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8561,7 +8716,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8586,7 +8740,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8611,7 +8764,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8636,7 +8788,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8648,6 +8799,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m5n.metal` #### Labels @@ -8661,7 +8813,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8673,6 +8824,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|355262Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m5zn Family ### `m5zn.large` @@ -8687,7 +8839,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|3000| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8712,7 +8863,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8737,7 +8887,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8762,7 +8911,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|49152| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|3xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8787,7 +8935,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|6xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8812,7 +8959,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8824,6 +8970,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|173400Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m5zn.metal` #### Labels @@ -8837,7 +8984,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8849,6 +8995,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|173400Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m6a Family ### `m6a.large` @@ -8863,7 +9010,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8888,7 +9034,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8913,7 +9058,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8938,7 +9082,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8963,7 +9106,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -8988,7 +9130,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9013,7 +9154,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9038,7 +9178,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9063,7 +9202,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9088,7 +9226,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9100,6 +9237,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m6a.metal` #### Labels @@ -9113,7 +9251,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9125,6 +9262,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m6g Family ### `m6g.medium` @@ -9139,7 +9277,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9164,7 +9301,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9189,7 +9325,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9214,7 +9349,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9239,7 +9373,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9264,7 +9397,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9289,7 +9421,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9314,7 +9445,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9339,7 +9469,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9366,7 +9495,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9392,7 +9520,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9418,7 +9545,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9444,7 +9570,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9470,7 +9595,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9496,7 +9620,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9522,7 +9645,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9548,7 +9670,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9574,7 +9695,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -9600,7 +9720,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9625,7 +9744,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9650,7 +9768,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9675,7 +9792,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9700,7 +9816,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9725,7 +9840,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9750,7 +9864,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9775,7 +9888,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9800,7 +9912,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9812,6 +9923,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476504Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m6i.metal` #### Labels @@ -9825,7 +9937,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9837,6 +9948,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476504Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m6id Family ### `m6id.large` @@ -9852,7 +9964,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9878,7 +9989,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9904,7 +10014,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9930,7 +10039,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9956,7 +10064,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -9982,7 +10089,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10008,7 +10114,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10034,7 +10139,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5700| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10060,7 +10164,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10072,6 +10175,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476504Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m6id.metal` #### Labels @@ -10086,7 +10190,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10098,6 +10201,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476504Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m6idn Family ### `m6idn.large` @@ -10113,7 +10217,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10139,7 +10242,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10165,7 +10267,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10191,7 +10292,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10217,7 +10317,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10243,7 +10342,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10269,7 +10367,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10295,7 +10392,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5700| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|150000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10321,7 +10417,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10333,6 +10428,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|480816Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ### `m6idn.metal` #### Labels @@ -10347,7 +10443,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10359,6 +10454,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|480816Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ## m6in Family ### `m6in.large` @@ -10373,7 +10469,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10398,7 +10493,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10423,7 +10517,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10448,7 +10541,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10473,7 +10565,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10498,7 +10589,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10523,7 +10613,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10548,7 +10637,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|150000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10573,7 +10661,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10585,6 +10672,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|480816Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ### `m6in.metal` #### Labels @@ -10598,7 +10686,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10610,6 +10697,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|480816Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ## m7a Family ### `m7a.medium` @@ -10624,7 +10712,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|390| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10649,7 +10736,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10674,7 +10760,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10699,7 +10784,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10724,7 +10808,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10749,7 +10832,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10774,7 +10856,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10799,7 +10880,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10824,7 +10904,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10849,7 +10928,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10874,7 +10952,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10886,6 +10963,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m7a.metal-48xl` #### Labels @@ -10899,7 +10977,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal-48xl| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -10911,6 +10988,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m7g Family ### `m7g.medium` @@ -10925,7 +11003,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|520| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -10950,7 +11027,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|937| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -10975,7 +11051,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1876| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11000,7 +11075,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3750| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11025,7 +11099,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11050,7 +11123,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11075,7 +11147,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|22500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11100,7 +11171,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11112,6 +11182,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|233962Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `m7g.metal` #### Labels @@ -11125,7 +11196,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11137,6 +11207,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|233962Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## m7gd Family ### `m7gd.medium` @@ -11152,7 +11223,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|520| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11178,7 +11248,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|937| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11204,7 +11273,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1876| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11230,7 +11298,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3750| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11256,7 +11323,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11282,7 +11348,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11308,7 +11373,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|22500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11334,7 +11398,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -11346,7 +11409,32 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|233962Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| +### `m7gd.metal` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|m| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|m7gd| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-local-nvme|3800| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-size|metal| + |kubernetes.io/arch|arm64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|m7gd.metal| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|233962Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| ## m7i Family ### `m7i.large` #### Labels @@ -11360,7 +11448,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11385,7 +11472,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11410,7 +11496,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11435,7 +11520,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11460,7 +11544,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11485,7 +11568,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11510,7 +11592,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11535,7 +11616,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11548,6 +11628,30 @@ below are the resources available with some assumptions and after the instance o |memory|355262Mi| |pods|737| |vpc.amazonaws.com/pod-eni|107| +### `m7i.metal-24xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|m| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|m7i| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|37500| + |karpenter.k8s.aws/instance-size|metal-24xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|m7i.metal-24xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|355262Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| ### `m7i.48xlarge` #### Labels | Label | Value | @@ -11560,7 +11664,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11572,21 +11675,46 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| -## m7i-flex Family -### `m7i-flex.large` +### `m7i.metal-48xl` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|m| - |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-cpu|192| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|m7i-flex| + |karpenter.k8s.aws/instance-family|m7i| |karpenter.k8s.aws/instance-generation|7| - |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|m7i.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|718987Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +## m7i-flex Family +### `m7i-flex.large` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|m| + |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|m7i-flex| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|390| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11611,7 +11739,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11636,7 +11763,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11661,7 +11787,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11686,7 +11811,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11715,7 +11839,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-gpu-name|k80| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|62464| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11744,7 +11867,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|499712| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11773,7 +11895,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|749568| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11786,6 +11907,186 @@ below are the resources available with some assumptions and after the instance o |memory|690421Mi| |nvidia.com/gpu|16| |pods|234| +## p3 Family +### `p3.2xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|p| + |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| + |karpenter.k8s.aws/instance-family|p3| + |karpenter.k8s.aws/instance-generation|3| + |karpenter.k8s.aws/instance-gpu-count|1| + |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| + |karpenter.k8s.aws/instance-gpu-memory|16384| + |karpenter.k8s.aws/instance-gpu-name|v100| + |karpenter.k8s.aws/instance-hypervisor|xen| + |karpenter.k8s.aws/instance-memory|62464| + |karpenter.k8s.aws/instance-size|2xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|p3.2xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|7910m| + |ephemeral-storage|17Gi| + |memory|56786Mi| + |nvidia.com/gpu|1| + |pods|58| +### `p3.8xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|p| + |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| + |karpenter.k8s.aws/instance-family|p3| + |karpenter.k8s.aws/instance-generation|3| + |karpenter.k8s.aws/instance-gpu-count|4| + |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| + |karpenter.k8s.aws/instance-gpu-memory|16384| + |karpenter.k8s.aws/instance-gpu-name|v100| + |karpenter.k8s.aws/instance-hypervisor|xen| + |karpenter.k8s.aws/instance-memory|249856| + |karpenter.k8s.aws/instance-network-bandwidth|10000| + |karpenter.k8s.aws/instance-size|8xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|p3.8xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|31850m| + |ephemeral-storage|17Gi| + |memory|228187Mi| + |nvidia.com/gpu|4| + |pods|234| +### `p3.16xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|p| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| + |karpenter.k8s.aws/instance-family|p3| + |karpenter.k8s.aws/instance-generation|3| + |karpenter.k8s.aws/instance-gpu-count|8| + |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| + |karpenter.k8s.aws/instance-gpu-memory|16384| + |karpenter.k8s.aws/instance-gpu-name|v100| + |karpenter.k8s.aws/instance-hypervisor|xen| + |karpenter.k8s.aws/instance-memory|499712| + |karpenter.k8s.aws/instance-network-bandwidth|25000| + |karpenter.k8s.aws/instance-size|16xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|p3.16xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|459304Mi| + |nvidia.com/gpu|8| + |pods|234| +## p3dn Family +### `p3dn.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|p| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|p3dn| + |karpenter.k8s.aws/instance-generation|3| + |karpenter.k8s.aws/instance-gpu-count|8| + |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| + |karpenter.k8s.aws/instance-gpu-memory|32768| + |karpenter.k8s.aws/instance-gpu-name|v100| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|1800| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|100000| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|p3dn.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|718987Mi| + |nvidia.com/gpu|8| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +## p4d Family +### `p4d.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|p| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|p4d| + |karpenter.k8s.aws/instance-generation|4| + |karpenter.k8s.aws/instance-gpu-count|8| + |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| + |karpenter.k8s.aws/instance-gpu-memory|40960| + |karpenter.k8s.aws/instance-gpu-name|a100| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|8000| + |karpenter.k8s.aws/instance-memory|1179648| + |karpenter.k8s.aws/instance-network-bandwidth|400000| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|p4d.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|1082712Mi| + |nvidia.com/gpu|8| + |pods|737| + |vpc.amazonaws.com/efa|4| + |vpc.amazonaws.com/pod-eni|62| +## p5 Family +### `p5.48xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|p| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|p5| + |karpenter.k8s.aws/instance-generation|5| + |karpenter.k8s.aws/instance-gpu-count|8| + |karpenter.k8s.aws/instance-gpu-manufacturer|nvidia| + |karpenter.k8s.aws/instance-gpu-memory|81920| + |karpenter.k8s.aws/instance-gpu-name|h100| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|30400| + |karpenter.k8s.aws/instance-memory|2097152| + |karpenter.k8s.aws/instance-network-bandwidth|3200000| + |karpenter.k8s.aws/instance-size|48xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|p5.48xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1938410Mi| + |nvidia.com/gpu|8| + |pods|100| + |vpc.amazonaws.com/efa|32| + |vpc.amazonaws.com/pod-eni|120| ## r3 Family ### `r3.large` #### Labels @@ -11798,7 +12099,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|15360| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11821,7 +12121,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|31232| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11844,7 +12143,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|62464| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11867,7 +12165,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|3| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11891,7 +12188,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11916,7 +12212,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|15616| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11940,7 +12235,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|31232| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11964,7 +12258,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|62464| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -11988,7 +12281,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12012,7 +12304,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12036,7 +12327,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|499712| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12061,7 +12351,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12086,7 +12375,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12111,7 +12399,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12136,7 +12423,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12161,7 +12447,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12186,7 +12471,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12211,7 +12495,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12236,7 +12519,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12261,7 +12543,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12287,7 +12568,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12312,7 +12592,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12337,7 +12616,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12362,7 +12640,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12387,7 +12664,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12412,7 +12688,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12437,7 +12712,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12462,7 +12736,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12489,7 +12762,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12515,7 +12787,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12541,7 +12812,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12567,7 +12837,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12593,7 +12862,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12619,7 +12887,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12645,7 +12912,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12671,7 +12937,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12697,7 +12962,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12722,7 +12986,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12747,7 +13010,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12772,7 +13034,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12797,7 +13058,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12822,7 +13082,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12847,7 +13106,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12872,7 +13130,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12897,7 +13154,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12924,7 +13180,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12950,7 +13205,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -12976,7 +13230,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13002,7 +13255,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13028,7 +13280,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13054,7 +13305,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13080,7 +13330,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13106,7 +13355,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13132,7 +13380,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13159,7 +13406,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2100| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13185,7 +13431,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|4100| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13211,7 +13456,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|8125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13237,7 +13481,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|600| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|16250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13263,7 +13506,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1200| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13289,7 +13531,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13315,7 +13556,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2400| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13341,7 +13581,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13353,6 +13592,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `r5dn.metal` #### Labels @@ -13367,7 +13607,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3600| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13379,6 +13618,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## r5n Family ### `r5n.large` @@ -13393,7 +13633,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|2100| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13418,7 +13657,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|4100| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13443,7 +13681,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|8125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13468,7 +13705,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|16250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13493,7 +13729,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13518,7 +13753,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13543,7 +13777,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13568,7 +13801,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13580,6 +13812,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `r5n.metal` #### Labels @@ -13593,7 +13826,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13605,6 +13837,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## r6a Family ### `r6a.large` @@ -13619,7 +13852,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13644,7 +13876,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13669,7 +13900,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13694,7 +13924,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13719,7 +13948,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13744,7 +13972,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13769,7 +13996,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13794,7 +14020,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13819,7 +14044,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13844,7 +14068,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1572864| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13856,6 +14079,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1446437Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `r6a.metal` #### Labels @@ -13869,7 +14093,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|1572864| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -13881,6 +14104,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1446437Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## r6g Family ### `r6g.medium` @@ -13895,7 +14119,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -13920,7 +14143,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -13945,7 +14167,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -13970,7 +14191,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -13995,7 +14215,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14020,7 +14239,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14045,7 +14263,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14070,7 +14287,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14095,7 +14311,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14122,7 +14337,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14148,7 +14362,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14174,7 +14387,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14200,7 +14412,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14226,7 +14437,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14252,7 +14462,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14278,7 +14487,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14304,7 +14512,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14330,7 +14537,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -14356,7 +14562,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14381,7 +14586,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14406,7 +14610,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14431,7 +14634,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14456,7 +14658,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14481,7 +14682,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14506,7 +14706,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14531,7 +14730,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14556,7 +14754,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14568,6 +14765,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `r6i.metal` #### Labels @@ -14581,7 +14779,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14593,6 +14790,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## r6id Family ### `r6id.large` @@ -14608,7 +14806,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14634,7 +14831,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14660,7 +14856,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14686,7 +14881,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14712,7 +14906,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14738,7 +14931,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14764,7 +14956,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14790,7 +14981,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5700| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14816,7 +15006,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14828,6 +15017,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `r6id.metal` #### Labels @@ -14842,7 +15032,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14854,6 +15043,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## r6idn Family ### `r6idn.large` @@ -14869,7 +15059,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14895,7 +15084,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14921,7 +15109,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14947,7 +15134,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14973,7 +15159,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -14999,7 +15184,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15025,7 +15209,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15051,7 +15234,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|5700| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|150000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15077,7 +15259,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15089,6 +15270,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|965782Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ### `r6idn.metal` #### Labels @@ -15103,7 +15285,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15115,6 +15296,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|965782Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ## r6in Family ### `r6in.large` @@ -15129,7 +15311,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15154,7 +15335,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15179,7 +15359,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15204,7 +15383,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15229,7 +15407,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15254,7 +15431,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15279,7 +15455,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15304,7 +15479,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|150000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15329,7 +15503,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15341,6 +15514,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|965782Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ### `r6in.metal` #### Labels @@ -15354,7 +15528,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|200000| - |karpenter.k8s.aws/instance-pods|345| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15366,6 +15539,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|965782Mi| |pods|345| + |vpc.amazonaws.com/efa|2| |vpc.amazonaws.com/pod-eni|108| ## r7a Family ### `r7a.medium` @@ -15380,7 +15554,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|390| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15392,6 +15565,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|7134Mi| |pods|8| + |vpc.amazonaws.com/pod-eni|4| ### `r7a.large` #### Labels | Label | Value | @@ -15404,7 +15578,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15416,6 +15589,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|14481Mi| |pods|29| + |vpc.amazonaws.com/pod-eni|9| ### `r7a.xlarge` #### Labels | Label | Value | @@ -15428,7 +15602,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15440,6 +15613,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|29317Mi| |pods|58| + |vpc.amazonaws.com/pod-eni|18| ### `r7a.2xlarge` #### Labels | Label | Value | @@ -15452,7 +15626,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15464,6 +15637,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|59627Mi| |pods|58| + |vpc.amazonaws.com/pod-eni|38| ### `r7a.4xlarge` #### Labels | Label | Value | @@ -15476,7 +15650,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15488,6 +15661,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|118312Mi| |pods|234| + |vpc.amazonaws.com/pod-eni|54| ### `r7a.8xlarge` #### Labels | Label | Value | @@ -15500,7 +15674,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15512,6 +15685,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|239554Mi| |pods|234| + |vpc.amazonaws.com/pod-eni|84| ### `r7a.12xlarge` #### Labels | Label | Value | @@ -15524,7 +15698,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|18750| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15536,6 +15709,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|360795Mi| |pods|234| + |vpc.amazonaws.com/pod-eni|114| ### `r7a.16xlarge` #### Labels | Label | Value | @@ -15548,7 +15722,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15560,6 +15733,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476504Mi| |pods|737| + |vpc.amazonaws.com/pod-eni|107| ### `r7a.24xlarge` #### Labels | Label | Value | @@ -15572,7 +15746,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|37500| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15584,6 +15757,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|718987Mi| |pods|737| + |vpc.amazonaws.com/pod-eni|107| ### `r7a.32xlarge` #### Labels | Label | Value | @@ -15596,7 +15770,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15608,6 +15781,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|961470Mi| |pods|737| + |vpc.amazonaws.com/pod-eni|107| ### `r7a.48xlarge` #### Labels | Label | Value | @@ -15620,7 +15794,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1572864| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|48xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -15632,6 +15805,33 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1446437Mi| |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +### `r7a.metal-48xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7a| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7a.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446437Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## r7g Family ### `r7g.medium` #### Labels @@ -15645,7 +15845,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|520| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15670,7 +15869,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|937| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15695,7 +15893,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1876| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15720,7 +15917,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3750| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15745,7 +15941,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15770,7 +15965,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15795,7 +15989,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|22500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15820,7 +16013,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15832,6 +16024,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476445Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `r7g.metal` #### Labels @@ -15845,7 +16038,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15857,6 +16049,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476445Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## r7gd Family ### `r7gd.medium` @@ -15872,7 +16065,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|520| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15898,7 +16090,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|937| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15924,7 +16115,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1876| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15950,7 +16140,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|474| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3750| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -15976,7 +16165,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|7500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16000,9 +16188,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|1900| - |karpenter.k8s.aws/instance-memory|196608| + |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16012,7 +16199,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |cpu|31850m| |ephemeral-storage|17Gi| - |memory|178874Mi| + |memory|239495Mi| |pods|234| |vpc.amazonaws.com/pod-eni|54| ### `r7gd.12xlarge` @@ -16026,9 +16213,8 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-local-nvme|2850| - |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|22500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16038,7 +16224,7 @@ below are the resources available with some assumptions and after the instance o |--|--| |cpu|47810m| |ephemeral-storage|17Gi| - |memory|239495Mi| + |memory|360736Mi| |pods|234| |vpc.amazonaws.com/pod-eni|54| ### `r7gd.16xlarge` @@ -16054,7 +16240,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|30000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16066,25 +16251,25 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476445Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| -## r7iz Family -### `r7iz.large` +## r7i Family +### `r7i.large` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|2| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|781| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.large| + |node.kubernetes.io/instance-type|r7i.large| #### Resources | Resource | Quantity | |--|--| @@ -16092,23 +16277,23 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|14481Mi| |pods|29| -### `r7iz.xlarge` + |vpc.amazonaws.com/pod-eni|9| +### `r7i.xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|4| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1562| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.xlarge| + |node.kubernetes.io/instance-type|r7i.xlarge| #### Resources | Resource | Quantity | |--|--| @@ -16116,23 +16301,23 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|29317Mi| |pods|58| -### `r7iz.2xlarge` + |vpc.amazonaws.com/pod-eni|18| +### `r7i.2xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|8| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|3125| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.2xlarge| + |node.kubernetes.io/instance-type|r7i.2xlarge| #### Resources | Resource | Quantity | |--|--| @@ -16140,23 +16325,23 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|59627Mi| |pods|58| -### `r7iz.4xlarge` + |vpc.amazonaws.com/pod-eni|38| +### `r7i.4xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|16| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|6250| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.4xlarge| + |node.kubernetes.io/instance-type|r7i.4xlarge| #### Resources | Resource | Quantity | |--|--| @@ -16164,23 +16349,23 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|118312Mi| |pods|234| -### `r7iz.8xlarge` + |vpc.amazonaws.com/pod-eni|54| +### `r7i.8xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|32| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.8xlarge| + |node.kubernetes.io/instance-type|r7i.8xlarge| #### Resources | Resource | Quantity | |--|--| @@ -16188,23 +16373,23 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|239554Mi| |pods|234| -### `r7iz.12xlarge` + |vpc.amazonaws.com/pod-eni|84| +### `r7i.12xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|48| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|393216| - |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| + |karpenter.k8s.aws/instance-network-bandwidth|18750| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.12xlarge| + |node.kubernetes.io/instance-type|r7i.12xlarge| #### Resources | Resource | Quantity | |--|--| @@ -16212,23 +16397,23 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|360795Mi| |pods|234| -### `r7iz.16xlarge` + |vpc.amazonaws.com/pod-eni|114| +### `r7i.16xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| |karpenter.k8s.aws/instance-cpu|64| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.16xlarge| + |node.kubernetes.io/instance-type|r7i.16xlarge| #### Resources | Resource | Quantity | |--|--| @@ -16236,43 +16421,360 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|476504Mi| |pods|737| -### `r7iz.32xlarge` + |vpc.amazonaws.com/pod-eni|107| +### `r7i.24xlarge` #### Labels | Label | Value | |--|--| |karpenter.k8s.aws/instance-category|r| - |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-cpu|96| |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| - |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-family|r7i| |karpenter.k8s.aws/instance-generation|7| |karpenter.k8s.aws/instance-hypervisor|nitro| - |karpenter.k8s.aws/instance-memory|1048576| - |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| - |karpenter.k8s.aws/instance-size|32xlarge| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|37500| + |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| - |node.kubernetes.io/instance-type|r7iz.32xlarge| + |node.kubernetes.io/instance-type|r7i.24xlarge| #### Resources | Resource | Quantity | |--|--| - |cpu|127610m| + |cpu|95690m| |ephemeral-storage|17Gi| - |memory|961470Mi| + |memory|718987Mi| |pods|737| -## t1 Family -### `t1.micro` + |vpc.amazonaws.com/pod-eni|107| +### `r7i.metal-24xl` #### Labels | Label | Value | |--|--| - |karpenter.k8s.aws/instance-category|t| - |karpenter.k8s.aws/instance-cpu|1| - |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7i| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|786432| + |karpenter.k8s.aws/instance-network-bandwidth|37500| + |karpenter.k8s.aws/instance-size|metal-24xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7i.metal-24xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|718987Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `r7i.48xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7i| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|48xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7i.48xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446437Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +### `r7i.metal-48xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|192| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7i| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|1572864| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-48xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7i.metal-48xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|191450m| + |ephemeral-storage|17Gi| + |memory|1446437Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +## r7iz Family +### `r7iz.large` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|2| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|16384| + |karpenter.k8s.aws/instance-network-bandwidth|781| + |karpenter.k8s.aws/instance-size|large| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.large| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|1930m| + |ephemeral-storage|17Gi| + |memory|14481Mi| + |pods|29| + |vpc.amazonaws.com/pod-eni|9| +### `r7iz.xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|4| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|32768| + |karpenter.k8s.aws/instance-network-bandwidth|1562| + |karpenter.k8s.aws/instance-size|xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|3920m| + |ephemeral-storage|17Gi| + |memory|29317Mi| + |pods|58| + |vpc.amazonaws.com/pod-eni|18| +### `r7iz.2xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|65536| + |karpenter.k8s.aws/instance-network-bandwidth|3125| + |karpenter.k8s.aws/instance-size|2xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.2xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|7910m| + |ephemeral-storage|17Gi| + |memory|59627Mi| + |pods|58| + |vpc.amazonaws.com/pod-eni|38| +### `r7iz.4xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|16| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|131072| + |karpenter.k8s.aws/instance-network-bandwidth|6250| + |karpenter.k8s.aws/instance-size|4xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.4xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|15890m| + |ephemeral-storage|17Gi| + |memory|118312Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|54| +### `r7iz.8xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|32| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|262144| + |karpenter.k8s.aws/instance-network-bandwidth|12500| + |karpenter.k8s.aws/instance-size|8xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.8xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|31850m| + |ephemeral-storage|17Gi| + |memory|239554Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|84| +### `r7iz.12xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|48| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|393216| + |karpenter.k8s.aws/instance-network-bandwidth|25000| + |karpenter.k8s.aws/instance-size|12xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.12xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|47810m| + |ephemeral-storage|17Gi| + |memory|360795Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|114| +### `r7iz.16xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|524288| + |karpenter.k8s.aws/instance-network-bandwidth|25000| + |karpenter.k8s.aws/instance-size|16xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.16xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|476504Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `r7iz.metal-16xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|64| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|524288| + |karpenter.k8s.aws/instance-network-bandwidth|25000| + |karpenter.k8s.aws/instance-size|metal-16xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.metal-16xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|63770m| + |ephemeral-storage|17Gi| + |memory|476504Mi| + |pods|737| + |vpc.amazonaws.com/pod-eni|107| +### `r7iz.32xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|1048576| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|32xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.32xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|127610m| + |ephemeral-storage|17Gi| + |memory|961470Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +### `r7iz.metal-32xl` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|r| + |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|r7iz| + |karpenter.k8s.aws/instance-generation|7| + |karpenter.k8s.aws/instance-hypervisor|| + |karpenter.k8s.aws/instance-memory|1048576| + |karpenter.k8s.aws/instance-network-bandwidth|50000| + |karpenter.k8s.aws/instance-size|metal-32xl| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|r7iz.metal-32xl| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|127610m| + |ephemeral-storage|17Gi| + |memory|961470Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| +## t1 Family +### `t1.micro` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|t| + |karpenter.k8s.aws/instance-cpu|1| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|false| |karpenter.k8s.aws/instance-family|t1| |karpenter.k8s.aws/instance-generation|1| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|627| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|micro| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16296,7 +16798,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|512| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|nano| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16319,7 +16820,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|1024| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|micro| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16342,7 +16842,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|2048| - |karpenter.k8s.aws/instance-pods|11| |karpenter.k8s.aws/instance-size|small| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16365,7 +16864,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|4096| - |karpenter.k8s.aws/instance-pods|17| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16388,7 +16886,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|8192| - |karpenter.k8s.aws/instance-pods|35| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16411,7 +16908,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|16384| - |karpenter.k8s.aws/instance-pods|44| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16434,7 +16930,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-generation|2| |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|32768| - |karpenter.k8s.aws/instance-pods|44| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16459,7 +16954,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|512| |karpenter.k8s.aws/instance-network-bandwidth|32| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|nano| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16483,7 +16977,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1024| |karpenter.k8s.aws/instance-network-bandwidth|64| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|micro| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16507,7 +17000,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|128| - |karpenter.k8s.aws/instance-pods|11| |karpenter.k8s.aws/instance-size|small| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16531,7 +17023,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|256| - |karpenter.k8s.aws/instance-pods|17| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16555,7 +17046,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|512| - |karpenter.k8s.aws/instance-pods|35| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16579,7 +17069,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1024| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16603,7 +17092,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2048| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16628,7 +17116,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|512| |karpenter.k8s.aws/instance-network-bandwidth|32| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|nano| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16652,7 +17139,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1024| |karpenter.k8s.aws/instance-network-bandwidth|64| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|micro| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16676,7 +17162,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|128| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|small| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16700,7 +17185,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|256| - |karpenter.k8s.aws/instance-pods|17| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16724,7 +17208,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|512| - |karpenter.k8s.aws/instance-pods|35| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16748,7 +17231,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1024| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16772,7 +17254,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2048| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16797,7 +17278,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|512| |karpenter.k8s.aws/instance-network-bandwidth|32| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|nano| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16821,7 +17301,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1024| |karpenter.k8s.aws/instance-network-bandwidth|64| - |karpenter.k8s.aws/instance-pods|4| |karpenter.k8s.aws/instance-size|micro| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16845,7 +17324,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|2048| |karpenter.k8s.aws/instance-network-bandwidth|128| - |karpenter.k8s.aws/instance-pods|11| |karpenter.k8s.aws/instance-size|small| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16869,7 +17347,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|4096| |karpenter.k8s.aws/instance-network-bandwidth|256| - |karpenter.k8s.aws/instance-pods|17| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16893,7 +17370,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|8192| |karpenter.k8s.aws/instance-network-bandwidth|512| - |karpenter.k8s.aws/instance-pods|35| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16917,7 +17393,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|1024| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16941,7 +17416,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|2048| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -16954,6 +17428,35 @@ below are the resources available with some assumptions and after the instance o |memory|29258Mi| |pods|58| ## trn1 Family +### `trn1.2xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-accelerator-count|1| + |karpenter.k8s.aws/instance-accelerator-manufacturer|aws| + |karpenter.k8s.aws/instance-accelerator-name|inferentia| + |karpenter.k8s.aws/instance-category|trn| + |karpenter.k8s.aws/instance-cpu|8| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|trn1| + |karpenter.k8s.aws/instance-generation|1| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|474| + |karpenter.k8s.aws/instance-memory|32768| + |karpenter.k8s.aws/instance-network-bandwidth|3125| + |karpenter.k8s.aws/instance-size|2xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|trn1.2xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |aws.amazon.com/neuron|1| + |cpu|7910m| + |ephemeral-storage|17Gi| + |memory|29317Mi| + |pods|58| + |vpc.amazonaws.com/pod-eni|17| ### `trn1.32xlarge` #### Labels | Label | Value | @@ -16970,7 +17473,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|7600| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|800000| - |karpenter.k8s.aws/instance-pods|247| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -16983,7 +17485,39 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|481894Mi| |pods|247| + |vpc.amazonaws.com/efa|8| |vpc.amazonaws.com/pod-eni|82| +## trn1n Family +### `trn1n.32xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-accelerator-count|16| + |karpenter.k8s.aws/instance-accelerator-manufacturer|aws| + |karpenter.k8s.aws/instance-accelerator-name|inferentia| + |karpenter.k8s.aws/instance-category|trn| + |karpenter.k8s.aws/instance-cpu|128| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|trn1n| + |karpenter.k8s.aws/instance-generation|1| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-local-nvme|7600| + |karpenter.k8s.aws/instance-memory|524288| + |karpenter.k8s.aws/instance-network-bandwidth|1600000| + |karpenter.k8s.aws/instance-size|32xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|trn1n.32xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |aws.amazon.com/neuron|16| + |cpu|127610m| + |ephemeral-storage|17Gi| + |memory|481894Mi| + |pods|247| + |vpc.amazonaws.com/efa|16| + |vpc.amazonaws.com/pod-eni|120| ## u-12tb1 Family ### `u-12tb1.112xlarge` #### Labels @@ -16997,7 +17531,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|12582912| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|112xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17022,7 +17555,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|18874368| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|112xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17047,7 +17579,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|25165824| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|112xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17072,7 +17603,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|3145728| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|56xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17098,7 +17628,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|6291456| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|56xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17122,7 +17651,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|6291456| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|112xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17147,7 +17675,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|9437184| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|112xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17159,6 +17686,80 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|8720933Mi| |pods|737| +## vt1 Family +### `vt1.3xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|vt| + |karpenter.k8s.aws/instance-cpu|12| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|vt1| + |karpenter.k8s.aws/instance-generation|1| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|24576| + |karpenter.k8s.aws/instance-network-bandwidth|3120| + |karpenter.k8s.aws/instance-size|3xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|vt1.3xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|11900m| + |ephemeral-storage|17Gi| + |memory|21739Mi| + |pods|58| + |vpc.amazonaws.com/pod-eni|38| +### `vt1.6xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|vt| + |karpenter.k8s.aws/instance-cpu|24| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|vt1| + |karpenter.k8s.aws/instance-generation|1| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|49152| + |karpenter.k8s.aws/instance-network-bandwidth|6250| + |karpenter.k8s.aws/instance-size|6xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|vt1.6xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|23870m| + |ephemeral-storage|17Gi| + |memory|42536Mi| + |pods|234| + |vpc.amazonaws.com/pod-eni|54| +### `vt1.24xlarge` +#### Labels + | Label | Value | + |--|--| + |karpenter.k8s.aws/instance-category|vt| + |karpenter.k8s.aws/instance-cpu|96| + |karpenter.k8s.aws/instance-encryption-in-transit-supported|true| + |karpenter.k8s.aws/instance-family|vt1| + |karpenter.k8s.aws/instance-generation|1| + |karpenter.k8s.aws/instance-hypervisor|nitro| + |karpenter.k8s.aws/instance-memory|196608| + |karpenter.k8s.aws/instance-network-bandwidth|25000| + |karpenter.k8s.aws/instance-size|24xlarge| + |kubernetes.io/arch|amd64| + |kubernetes.io/os|linux| + |node.kubernetes.io/instance-type|vt1.24xlarge| +#### Resources + | Resource | Quantity | + |--|--| + |cpu|95690m| + |ephemeral-storage|17Gi| + |memory|173400Mi| + |pods|737| + |vpc.amazonaws.com/efa|1| + |vpc.amazonaws.com/pod-eni|107| ## x1 Family ### `x1.16xlarge` #### Labels @@ -17172,7 +17773,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|999424| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17196,7 +17796,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|1998848| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17221,7 +17820,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|124928| |karpenter.k8s.aws/instance-network-bandwidth|625| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17245,7 +17843,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|249856| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17269,7 +17866,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|499712| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17293,7 +17889,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|999424| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17317,7 +17912,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|1998848| |karpenter.k8s.aws/instance-network-bandwidth|10000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17341,7 +17935,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|xen| |karpenter.k8s.aws/instance-memory|3997696| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17367,7 +17960,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|59| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|500| - |karpenter.k8s.aws/instance-pods|8| |karpenter.k8s.aws/instance-size|medium| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17393,7 +17985,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17419,7 +18010,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17445,7 +18035,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|475| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17471,7 +18060,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17497,7 +18085,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17523,7 +18110,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|20000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17549,7 +18135,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17575,7 +18160,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|arm64| |kubernetes.io/os|linux| @@ -17602,7 +18186,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17628,7 +18211,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|1572864| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17654,7 +18236,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|2097152| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17666,6 +18247,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1931403Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `x2idn.metal` #### Labels @@ -17680,7 +18262,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|2097152| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17692,6 +18273,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1931403Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## x2iedn Family ### `x2iedn.xlarge` @@ -17707,7 +18289,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|118| |karpenter.k8s.aws/instance-memory|131072| |karpenter.k8s.aws/instance-network-bandwidth|1875| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17733,7 +18314,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|237| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17759,7 +18339,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|475| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17785,7 +18364,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|950| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17811,7 +18389,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1900| |karpenter.k8s.aws/instance-memory|2097152| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|16xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17837,7 +18414,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|2850| |karpenter.k8s.aws/instance-memory|3145728| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|24xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17863,7 +18439,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|4194304| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|32xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17875,6 +18450,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|3871269Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `x2iedn.metal` #### Labels @@ -17889,7 +18465,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|3800| |karpenter.k8s.aws/instance-memory|4194304| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17901,6 +18476,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|3871269Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## x2iezn Family ### `x2iezn.2xlarge` @@ -17915,7 +18491,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|262144| |karpenter.k8s.aws/instance-network-bandwidth|12500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17940,7 +18515,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|524288| |karpenter.k8s.aws/instance-network-bandwidth|15000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|4xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17965,7 +18539,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|786432| |karpenter.k8s.aws/instance-network-bandwidth|50000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|6xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -17990,7 +18563,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1048576| |karpenter.k8s.aws/instance-network-bandwidth|75000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|8xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18015,7 +18587,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|nitro| |karpenter.k8s.aws/instance-memory|1572864| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18027,6 +18598,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1446437Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ### `x2iezn.metal` #### Labels @@ -18040,7 +18612,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-hypervisor|| |karpenter.k8s.aws/instance-memory|1572864| |karpenter.k8s.aws/instance-network-bandwidth|100000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18052,6 +18623,7 @@ below are the resources available with some assumptions and after the instance o |ephemeral-storage|17Gi| |memory|1446437Mi| |pods|737| + |vpc.amazonaws.com/efa|1| |vpc.amazonaws.com/pod-eni|107| ## z1d Family ### `z1d.large` @@ -18067,7 +18639,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|75| |karpenter.k8s.aws/instance-memory|16384| |karpenter.k8s.aws/instance-network-bandwidth|750| - |karpenter.k8s.aws/instance-pods|29| |karpenter.k8s.aws/instance-size|large| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18093,7 +18664,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|150| |karpenter.k8s.aws/instance-memory|32768| |karpenter.k8s.aws/instance-network-bandwidth|1250| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18119,7 +18689,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|300| |karpenter.k8s.aws/instance-memory|65536| |karpenter.k8s.aws/instance-network-bandwidth|2500| - |karpenter.k8s.aws/instance-pods|58| |karpenter.k8s.aws/instance-size|2xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18145,7 +18714,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|450| |karpenter.k8s.aws/instance-memory|98304| |karpenter.k8s.aws/instance-network-bandwidth|5000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|3xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18171,7 +18739,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|900| |karpenter.k8s.aws/instance-memory|196608| |karpenter.k8s.aws/instance-network-bandwidth|12000| - |karpenter.k8s.aws/instance-pods|234| |karpenter.k8s.aws/instance-size|6xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18197,7 +18764,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|12xlarge| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| @@ -18223,7 +18789,6 @@ below are the resources available with some assumptions and after the instance o |karpenter.k8s.aws/instance-local-nvme|1800| |karpenter.k8s.aws/instance-memory|393216| |karpenter.k8s.aws/instance-network-bandwidth|25000| - |karpenter.k8s.aws/instance-pods|737| |karpenter.k8s.aws/instance-size|metal| |kubernetes.io/arch|amd64| |kubernetes.io/os|linux| diff --git a/website/content/en/v0.31/concepts/metrics.md b/website/content/en/v0.35/reference/metrics.md similarity index 56% rename from website/content/en/v0.31/concepts/metrics.md rename to website/content/en/v0.35/reference/metrics.md index 112f63b0fe9c..b7cf1366f50f 100644 --- a/website/content/en/v0.31/concepts/metrics.md +++ b/website/content/en/v0.35/reference/metrics.md @@ -8,164 +8,186 @@ description: > --- Karpenter makes several metrics available in Prometheus format to allow monitoring cluster provisioning status. These metrics are available by default at `karpenter.karpenter.svc.cluster.local:8000/metrics` configurable via the `METRICS_PORT` environment variable documented [here](../settings) -## Controller Runtime Metrics +### `karpenter_build_info` +A metric with a constant '1' value labeled by version from which karpenter was built. -### `controller_runtime_active_workers` -Number of currently used workers per controller +## Nodepool Metrics -### `controller_runtime_max_concurrent_reconciles` -Maximum number of concurrent reconciles per controller +### `karpenter_nodepool_usage` +The nodepool usage is the amount of resources that have been provisioned by a particular nodepool. Labeled by nodepool name and resource type. -### `controller_runtime_reconcile_errors_total` -Total number of reconciliation errors per controller +### `karpenter_nodepool_limit` +The nodepool limits are the limits specified on the nodepool that restrict the quantity of resources provisioned. Labeled by nodepool name and resource type. -### `controller_runtime_reconcile_time_seconds` -Length of time per reconciliation per controller +## Nodes Metrics -### `controller_runtime_reconcile_total` -Total number of reconciliations per controller +### `karpenter_nodes_total_pod_requests` +Node total pod requests are the resources requested by non-DaemonSet pods bound to nodes. -## Consistency Metrics +### `karpenter_nodes_total_pod_limits` +Node total pod limits are the resources specified by non-DaemonSet pod limits. -### `karpenter_consistency_errors` -Number of consistency checks that have failed. +### `karpenter_nodes_total_daemon_requests` +Node total daemon requests are the resource requested by DaemonSet pods bound to nodes. + +### `karpenter_nodes_total_daemon_limits` +Node total daemon limits are the resources specified by DaemonSet pod limits. + +### `karpenter_nodes_termination_time_seconds` +The time taken between a node's deletion request and the removal of its finalizer -## Deprovisioning Metrics +### `karpenter_nodes_terminated` +Number of nodes terminated in total by Karpenter. Labeled by owning nodepool. -### `karpenter_deprovisioning_actions_performed` -Number of deprovisioning actions performed. Labeled by deprovisioner. +### `karpenter_nodes_system_overhead` +Node system daemon overhead are the resources reserved for system overhead, the difference between the node's capacity and allocatable values are reported by the status. -### `karpenter_deprovisioning_consolidation_timeouts` -Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. +### `karpenter_nodes_leases_deleted` +Number of deleted leaked leases. -### `karpenter_deprovisioning_eligible_machines` -Number of machines eligible for deprovisioning by Karpenter. Labeled by deprovisioner +### `karpenter_nodes_created` +Number of nodes created in total by Karpenter. Labeled by owning nodepool. -### `karpenter_deprovisioning_evaluation_duration_seconds` -Duration of the deprovisioning evaluation process in seconds. +### `karpenter_nodes_allocatable` +Node allocatable are the resources allocatable by nodes. -### `karpenter_deprovisioning_replacement_machine_initialized_seconds` -Amount of time required for a replacement machine to become initialized. +## Pods Metrics -### `karpenter_deprovisioning_replacement_machine_launch_failure_counter` -The number of times that Karpenter failed to launch a replacement node for deprovisioning. Labeled by deprovisioner. +### `karpenter_pods_state` +Pod state is the current state of pods. This metric can be used several ways as it is labeled by the pod name, namespace, owner, node, nodepool name, zone, architecture, capacity type, instance type and pod phase. -## Interruption Metrics +### `karpenter_pods_startup_time_seconds` +The time from pod creation until the pod is running. -### `karpenter_interruption_actions_performed` -Number of notification actions performed. Labeled by action +## Provisioner Metrics -### `karpenter_interruption_deleted_messages` -Count of messages deleted from the SQS queue. +### `karpenter_provisioner_scheduling_simulation_duration_seconds` +Duration of scheduling simulations used for deprovisioning and provisioning in seconds. -### `karpenter_interruption_message_latency_time_seconds` -Length of time between message creation in queue and an action taken on the message by the controller. +### `karpenter_provisioner_scheduling_duration_seconds` +Duration of scheduling process in seconds. -### `karpenter_interruption_received_messages` -Count of messages received from the SQS queue. Broken down by message type and whether the message was actionable. +## Nodeclaims Metrics -## Machines Metrics +### `karpenter_nodeclaims_terminated` +Number of nodeclaims terminated in total by Karpenter. Labeled by reason the nodeclaim was terminated and the owning nodepool. -### `karpenter_machines_created` -Number of machines created in total by Karpenter. Labeled by reason the machine was created and the owning provisioner. +### `karpenter_nodeclaims_registered` +Number of nodeclaims registered in total by Karpenter. Labeled by the owning nodepool. -### `karpenter_machines_disrupted` -Number of machines disrupted in total by Karpenter. Labeled by disruption type of the machine and the owning provisioner. +### `karpenter_nodeclaims_launched` +Number of nodeclaims launched in total by Karpenter. Labeled by the owning nodepool. -### `karpenter_machines_drifted` -Number of machine drifted reasons in total by Karpenter. Labeled by drift type of the machine and the owning provisioner.. +### `karpenter_nodeclaims_initialized` +Number of nodeclaims initialized in total by Karpenter. Labeled by the owning nodepool. -### `karpenter_machines_initialized` -Number of machines initialized in total by Karpenter. Labeled by the owning provisioner. +### `karpenter_nodeclaims_drifted` +Number of nodeclaims drifted reasons in total by Karpenter. Labeled by drift type of the nodeclaim and the owning nodepool. -### `karpenter_machines_launched` -Number of machines launched in total by Karpenter. Labeled by the owning provisioner. +### `karpenter_nodeclaims_disrupted` +Number of nodeclaims disrupted in total by Karpenter. Labeled by disruption type of the nodeclaim and the owning nodepool. -### `karpenter_machines_registered` -Number of machines registered in total by Karpenter. Labeled by the owning provisioner. +### `karpenter_nodeclaims_created` +Number of nodeclaims created in total by Karpenter. Labeled by reason the nodeclaim was created and the owning nodepool. -### `karpenter_machines_terminated` -Number of machines terminated in total by Karpenter. Labeled by reason the machine was terminated and the owning provisioner. +## Interruption Metrics -## Provisioner Metrics +### `karpenter_interruption_received_messages` +Count of messages received from the SQS queue. Broken down by message type and whether the message was actionable. -### `karpenter_provisioner_limit` -The Provisioner Limits are the limits specified on the provisioner that restrict the quantity of resources provisioned. Labeled by provisioner name and resource type. +### `karpenter_interruption_message_latency_time_seconds` +Length of time between message creation in queue and an action taken on the message by the controller. -### `karpenter_provisioner_scheduling_duration_seconds` -Duration of scheduling process in seconds. +### `karpenter_interruption_deleted_messages` +Count of messages deleted from the SQS queue. -### `karpenter_provisioner_scheduling_simulation_duration_seconds` -Duration of scheduling simulations used for deprovisioning and provisioning in seconds. +### `karpenter_interruption_actions_performed` +Number of notification actions performed. Labeled by action -### `karpenter_provisioner_usage` -The Provisioner Usage is the amount of resources that have been provisioned by a particular provisioner. Labeled by provisioner name and resource type. +## Disruption Metrics -### `karpenter_provisioner_usage_pct` -The Provisioner Usage Percentage is the percentage of each resource used based on the resources provisioned and the limits that have been configured in the range [0,100]. Labeled by provisioner name and resource type. +### `karpenter_disruption_replacement_nodeclaim_initialized_seconds` +Amount of time required for a replacement nodeclaim to become initialized. -## Nodes Metrics +### `karpenter_disruption_replacement_nodeclaim_failures_total` +The number of times that Karpenter failed to launch a replacement node for disruption. Labeled by disruption method. -### `karpenter_nodes_allocatable` -Node allocatable are the resources allocatable by nodes. +### `karpenter_disruption_queue_depth` +The number of commands currently being waited on in the disruption orchestration queue. -### `karpenter_nodes_created` -Number of nodes created in total by Karpenter. Labeled by owning provisioner. +### `karpenter_disruption_pods_disrupted_total` +Total number of reschedulable pods disrupted on nodes. Labeled by NodePool, disruption action, method, and consolidation type. -### `karpenter_nodes_leases_deleted` -Number of deleted leaked leases. +### `karpenter_disruption_nodes_disrupted_total` +Total number of nodes disrupted. Labeled by NodePool, disruption action, method, and consolidation type. -### `karpenter_nodes_system_overhead` -Node system daemon overhead are the resources reserved for system overhead, the difference between the node's capacity and allocatable values are reported by the status. +### `karpenter_disruption_evaluation_duration_seconds` +Duration of the disruption evaluation process in seconds. Labeled by method and consolidation type. -### `karpenter_nodes_terminated` -Number of nodes terminated in total by Karpenter. Labeled by owning provisioner. +### `karpenter_disruption_eligible_nodes` +Number of nodes eligible for disruption by Karpenter. Labeled by disruption method and consolidation type. -### `karpenter_nodes_termination_time_seconds` -The time taken between a node's deletion request and the removal of its finalizer +### `karpenter_disruption_consolidation_timeouts_total` +Number of times the Consolidation algorithm has reached a timeout. Labeled by consolidation type. -### `karpenter_nodes_total_daemon_limits` -Node total daemon limits are the resources specified by DaemonSet pod limits. +### `karpenter_disruption_budgets_allowed_disruptions` +The number of nodes for a given NodePool that can be disrupted at a point in time. Labeled by NodePool. Note that allowed disruptions can change very rapidly, as new nodes may be created and others may be deleted at any point. -### `karpenter_nodes_total_daemon_requests` -Node total daemon requests are the resource requested by DaemonSet pods bound to nodes. +### `karpenter_disruption_actions_performed_total` +Number of disruption actions performed. Labeled by disruption action, method, and consolidation type. -### `karpenter_nodes_total_pod_limits` -Node total pod limits are the resources specified by non-DaemonSet pod limits. +## Consistency Metrics -### `karpenter_nodes_total_pod_requests` -Node total pod requests are the resources requested by non-DaemonSet pods bound to nodes. +### `karpenter_consistency_errors` +Number of consistency checks that have failed. -## Pods Metrics +## Cluster State Metrics -### `karpenter_pods_startup_time_seconds` -The time from pod creation until the pod is running. +### `karpenter_cluster_state_synced` +Returns 1 if cluster state is synced and 0 otherwise. Synced checks that nodeclaims and nodes that are stored in the APIServer have the same representation as Karpenter's cluster state -### `karpenter_pods_state` -Pod state is the current state of pods. This metric can be used several ways as it is labeled by the pod name, namespace, owner, node, provisioner name, zone, architecture, capacity type, instance type and pod phase. +### `karpenter_cluster_state_node_count` +Current count of nodes in cluster state ## Cloudprovider Metrics -### `karpenter_cloudprovider_duration_seconds` -Duration of cloud provider method calls. Labeled by the controller, method name and provider. +### `karpenter_cloudprovider_instance_type_price_estimate` +Estimated hourly price used when making informed decisions on node cost calculation. This is updated once on startup and then every 12 hours. -### `karpenter_cloudprovider_errors_total` -Total number of errors returned from CloudProvider calls. +### `karpenter_cloudprovider_instance_type_memory_bytes` +Memory, in bytes, for a given instance type. ### `karpenter_cloudprovider_instance_type_cpu_cores` VCPUs cores for a given instance type. -### `karpenter_cloudprovider_instance_type_memory_bytes` -Memory, in bytes, for a given instance type. +### `karpenter_cloudprovider_errors_total` +Total number of errors returned from CloudProvider calls. -### `karpenter_cloudprovider_instance_type_price_estimate` -Estimated hourly price used when making informed decisions on node cost calculation. This is updated once on startup and then every 12 hours. +### `karpenter_cloudprovider_duration_seconds` +Duration of cloud provider method calls. Labeled by the controller, method name and provider. ## Cloudprovider Batcher Metrics +### `karpenter_cloudprovider_batcher_batch_time_seconds` +Duration of the batching window per batcher + ### `karpenter_cloudprovider_batcher_batch_size` Size of the request batch per batcher -### `karpenter_cloudprovider_batcher_batch_time_seconds` -Duration of the batching window per batcher +## Controller Runtime Metrics + +### `controller_runtime_reconcile_total` +Total number of reconciliations per controller + +### `controller_runtime_reconcile_time_seconds` +Length of time per reconciliation per controller + +### `controller_runtime_reconcile_errors_total` +Total number of reconciliation errors per controller + +### `controller_runtime_max_concurrent_reconciles` +Maximum number of concurrent reconciles per controller + +### `controller_runtime_active_workers` +Number of currently used workers per controller diff --git a/website/content/en/v0.35/reference/settings.md b/website/content/en/v0.35/reference/settings.md new file mode 100644 index 000000000000..4150586483ea --- /dev/null +++ b/website/content/en/v0.35/reference/settings.md @@ -0,0 +1,68 @@ +--- +title: "Settings" +linkTitle: "Settings" +weight: 5 +description: > + Configure Karpenter +--- + +Karpenter surfaces environment variables and CLI parameters to allow you to configure certain global settings on the controllers. These settings are described below. + +[comment]: <> (the content below is generated from hack/docs/configuration_gen_docs.go) + +| Environment Variable | CLI Flag | Description | +|--|--|--| +| ASSUME_ROLE_ARN | \-\-assume-role-arn | Role to assume for calling AWS services.| +| ASSUME_ROLE_DURATION | \-\-assume-role-duration | Duration of assumed credentials in minutes. Default value is 15 minutes. Not used unless aws.assumeRole set. (default = 15m0s)| +| BATCH_IDLE_DURATION | \-\-batch-idle-duration | The maximum amount of time with no new pending pods that if exceeded ends the current batching window. If pods arrive faster than this time, the batching window will be extended up to the maxDuration. If they arrive slower, the pods will be batched separately. (default = 1s)| +| BATCH_MAX_DURATION | \-\-batch-max-duration | The maximum length of a batch window. The longer this is, the more pods we can consider for provisioning at one time which usually results in fewer but larger nodes. (default = 10s)| +| CLUSTER_CA_BUNDLE | \-\-cluster-ca-bundle | Cluster CA bundle for nodes to use for TLS connections with the API server. If not set, this is taken from the controller's TLS configuration.| +| CLUSTER_ENDPOINT | \-\-cluster-endpoint | The external kubernetes cluster endpoint for new nodes to connect with. If not specified, will discover the cluster endpoint using DescribeCluster API.| +| CLUSTER_NAME | \-\-cluster-name | [REQUIRED] The kubernetes cluster name for resource discovery.| +| DISABLE_WEBHOOK | \-\-disable-webhook | Disable the admission and validation webhooks| +| ENABLE_PROFILING | \-\-enable-profiling | Enable the profiling on the metric endpoint| +| FEATURE_GATES | \-\-feature-gates | Optional features can be enabled / disabled using feature gates. Current options are: Drift,SpotToSpotConsolidation (default = Drift=true,SpotToSpotConsolidation=false)| +| HEALTH_PROBE_PORT | \-\-health-probe-port | The port the health probe endpoint binds to for reporting controller health (default = 8081)| +| INTERRUPTION_QUEUE | \-\-interruption-queue | Interruption queue is disabled if not specified. Enabling interruption handling may require additional permissions on the controller service account. Additional permissions are outlined in the docs.| +| ISOLATED_VPC | \-\-isolated-vpc | If true, then assume we can't reach AWS services which don't have a VPC endpoint. This also has the effect of disabling look-ups to the AWS pricing endpoint.| +| KARPENTER_SERVICE | \-\-karpenter-service | The Karpenter Service name for the dynamic webhook certificate| +| KUBE_CLIENT_BURST | \-\-kube-client-burst | The maximum allowed burst of queries to the kube-apiserver (default = 300)| +| KUBE_CLIENT_QPS | \-\-kube-client-qps | The smoothed rate of qps to kube-apiserver (default = 200)| +| LEADER_ELECT | \-\-leader-elect | Start leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.| +| LOG_LEVEL | \-\-log-level | Log verbosity level. Can be one of 'debug', 'info', or 'error' (default = info)| +| MEMORY_LIMIT | \-\-memory-limit | Memory limit on the container running the controller. The GC soft memory limit is set to 90% of this value. (default = -1)| +| METRICS_PORT | \-\-metrics-port | The port the metric endpoint binds to for operating metrics about the controller itself (default = 8000)| +| RESERVED_ENIS | \-\-reserved-enis | Reserved ENIs are not included in the calculations for max-pods or kube-reserved. This is most often used in the VPC CNI custom networking setup https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html. (default = 0)| +| VM_MEMORY_OVERHEAD_PERCENT | \-\-vm-memory-overhead-percent | The VM memory overhead as a percent that will be subtracted from the total memory for all instance types. (default = 0.075)| +| WEBHOOK_METRICS_PORT | \-\-webhook-metrics-port | The port the webhook metric endpoing binds to for operating metrics about the webhook (default = 8001)| +| WEBHOOK_PORT | \-\-webhook-port | The port the webhook endpoint binds to for validation and mutation of resources (default = 8443)| + +[comment]: <> (end docs generated content from hack/docs/configuration_gen_docs.go) + +### Feature Gates + +Karpenter uses [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) You can enable the feature gates through the `--feature-gates` CLI environment variable or the `FEATURE_GATES` environment variable in the Karpenter deployment. For example, you can configure drift, spotToSpotConsolidation by setting the CLI argument: `--feature-gates Drift=true,SpotToSpotConsolidation=true`. + +| Feature | Default | Stage | Since | Until | +|-------------------------|---------|-------|---------|---------| +| Drift | false | Alpha | v0.21.x | v0.32.x | +| Drift | true | Beta | v0.33.x | | +| SpotToSpotConsolidation | false | Beta | v0.34.x | | + +### Batching Parameters + +The batching parameters control how Karpenter batches an incoming stream of pending pods. Reducing these values may trade off a slightly faster time from pending pod to node launch, in exchange for launching smaller nodes. Increasing the values can do the inverse. Karpenter provides reasonable defaults for these values, but if you have specific knowledge about your workloads you can tweak these parameters to match the expected rate of incoming pods. + +For a standard deployment scale-up, the pods arrive at the QPS setting of the `kube-controller-manager`, and the default values are typically fine. These settings are intended for use cases where other systems may create large numbers of pods over a period of many seconds or minutes and there is a desire to batch them together. + +#### Batch Idle Duration + +The batch idle duration duration is the period of time that a new pending pod extends the current batching window. This can be increased to handle scenarios where pods arrive slower than one second part, but it would be preferable if they were batched together onto a single larger node. + +This value is expressed as a string value like `10s`, `1m` or `2h45m`. The valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. + +#### Batch Max Duration + +The batch max duration is the maximum period of time a batching window can be extended to. Increasing this value will allow the maximum batch window size to increase to collect more pending pods into a single batch at the expense of a longer delay from when the first pending pod was created. + +This value is expressed as a string value like `10s`, `1m` or `2h45m`. The valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. diff --git a/website/content/en/v0.35/reference/threat-model.md b/website/content/en/v0.35/reference/threat-model.md new file mode 100644 index 000000000000..8bac8d7bc5a4 --- /dev/null +++ b/website/content/en/v0.35/reference/threat-model.md @@ -0,0 +1,120 @@ +--- +title: "Threat Model" +linkTitle: "Threat Model" +weight: 999 +--- + +Karpenter observes Kubernetes pods and launches nodes in response to those pods’ scheduling constraints. Karpenter does not perform the actual scheduling and instead waits for [kube-scheduler](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/) to schedule the pods. + +When running in AWS, Karpenter is typically installed onto EC2 instances that run in EKS Clusters. Karpenter relies on public facing AWS APIs and standard IAM Permissions. Karpenter uses AWS-SDK-Go v1, and AWS advises that credentials are provided using [IAM Roles for Service Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). + + +## Architecture & Actors + +1. **Cluster Operator**: An identity that installs and configures Karpenter in a Kubernetes cluster, and configures Karpenter's cloud identity and permissions. +2. **Cluster Developer**: An identity that can create pods, typically through Deployments, DaemonSets, or other pod-controller types. +3. **Karpenter Controller:** The Karpenter application pod that operates inside a cluster. + +![threat-model](/threat-model.png) + +## Capabilities + +### Cluster Operator + +The Cluster Operator has full control to install and configure Karpenter including all [`NodePools`]({{}}) and [`EC2NodeClasses`]({{}}). The Cluster Operator has privileges to manage the cloud identities and permissions for Nodes, and the cloud identity and permissions for Karpenter. + +### Cluster Developer + +A Cluster Developer has the ability to create pods via `Deployments`, `ReplicaSets`, `StatefulSets`, `Jobs`, etc. This assumes that the Cluster Developer cannot modify the Karpenter pod or launch pods using Karpenter’s service account and gain access to Karpenter’s IAM role. + +### Karpenter Controller + +Karpenter has permissions to create and manage cloud instances. Karpenter has Kubernetes API permissions to create, update, and remove nodes, as well as evict pods. For a full list of the permissions, see the RBAC rules in the helm chart template. Karpenter also has AWS IAM permissions to create instances with IAM roles. + +* [aggregate-clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/aggregate-clusterrole.yaml) +* [clusterrole-core.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole-core.yaml) +* [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/clusterrole.yaml) +* [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/rolebinding.yaml) +* [role.yaml](https://github.com/aws/karpenter/blob/v0.35.0/charts/karpenter/templates/role.yaml) + +## Assumptions + +| Category | Assumption | Comment | +|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Generic | The Karpenter pod is operated on a node in the cluster, and uses a Service Account for authentication to the Kubernetes API | Cluster Operators may want to isolate the node running the Karpenter pod to a system-pool of nodes to mitigate the possibility of container breakout with Karpenter’s permissions. | +| Generic | Cluster Developer does not have any Kubernetes permissions to manage Karpenter running in the cluster (The deployment, pods, clusterrole, etc) | | +| Generic | Restrictions on the fields of pods a Cluster Developer can create are out of scope. | Cluster Operators can use policy frameworks to enforce restrictions on Pod capabilities | +| Generic | No sensitive data is included in non-Secret resources in the Kubernetes API. The Karpenter controller has the ability to list all pods, nodes, deployments, and many other pod-controller and storage resource types. | Karpenter does not have permission to list/watch cluster-wide ConfigMaps or Secrets | +| Generic | Karpenter has permissions to create, modify, and delete nodes from the cluster, and evict any pod. | Cluster Operators running applications with varying security profiles in the same cluster may want to configure dedicated nodes and scheduling rules for Karpenter to mitigate potential container escapes from other containers | +| AWS-Specific | The Karpenter IAM policy is encoded in the GitHub repo. Any additional permissions possibly granted to that role by the administrator are out of scope | | +| AWS-Specific | The Karpenter pod uses IRSA for AWS credentials | Setup of IRSA is out of scope for this document | + +## Generic Threats and Mitigations + +### Threat: Cluster Developer can influence creation of an arbitrary number of nodes + +**Background**: Karpenter creates new instances based on the count of pending pods. + +**Threat**: A Cluster Developer attempts to have Karpenter create more instances than intended by creating a large number of pods or by using anti-affinity to schedule one pod per node. + +**Mitigation**: In addition to [Kubernetes resource limits](https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota), Cluster Operators can [configure limits on a NodePool]({{< ref "../concepts/nodepools#spec-limits" >}}) to limit the total amount of memory, CPU, or other resources provisioned across all nodes. + +## Threats + +### Threat: Using EC2 CreateTag/DeleteTag Permissions to Orchestrate Instance Creation/Deletion + +**Background**: As of `0.28.0`, Karpenter creates a mapping between CloudProvider instances and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: + +* `karpenter.sh/managed-by` +* `karpenter.sh/nodepool` +* `kubernetes.io/cluster/${CLUSTER_NAME}` +* `karpenter.sh/provisioner-name` (prior to `0.32.0`) + +Any user that has the ability to Create/Delete tags on CloudProvider instances will have the ability to orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. + +In addition, as of `0.29.0`, Karpenter will Drift on Security Groups and Subnets. If a user has the Create/Delete tags permission for either of resources, they can orchestrate Karpenter to Create/Delete CloudProvider instances as a side effect. + +**Threat:** A Cluster Operator attempts to create or delete a tag on a resource discovered by Karpenter. If it has the ability to create a tag it can effectively create or delete CloudProvider instances associated with the tagged resources. + +**Mitigation** Cluster Operators should [enforce tag-based IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) on these tags against any EC2 instance resource (`i-*`) for any users that might have [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html)/[DeleteTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteTags.html) permissions but should not have [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html)/[TerminateInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html) permissions. + +### Threat: Launching EC2 instances with IAM roles not intended for Karpenter nodes + +**Background**: Many IAM roles in an AWS account may trust the EC2 service principal. IAM administrators must grant the `iam:PassRole` permission to IAM principals to allow those principals in the account to launch instances with specific roles. + +**Threat:** A Cluster Operator attempts to create an `EC2NodeClass` with an IAM role not intended for Karpenter + +**Mitigation**: Cluster Operators must enumerate the roles in the resource section of the IAM policy granted to the Karpenter role for the `iam:PassRole` action. Karpenter will fail to generate an instance profile if role that is specified in the `spec.role` section of the `EC2NodeClass` is not enumerated in the `iam:PassRole` permission. + +### Threat: Karpenter can orchestrate the creation/deletion of IAM Instance Profiles it doesn't own + +**Background**: Karpenter has permission to create/update/delete instance profiles as part of its controller permissions to ensure that it can auto-generate instance profiles when EC2NodeClasses are created. + +**Threat**: An actor who obtains control of the Karpenter pod’s IAM role may delete instance profiles not owned by Karpenter, causing workload disruption to other instances using the profile in the account. + +**Mitigation**: Karpenter's controller permissions enforce that it creates instance profiles with tags which provide ownership information. These tags include: + +* `karpenter.sh/managed-by` +* `kubernetes.io/cluster/${CLUSTER_NAME}` +* `karpenter.k8s.aws/ec2nodeclass` +* `topology.kubernetes.io/region` + +These tags ensure that instance profiles created by Karpenter in the account are unique to that cluster. Karpenter's controller permissions _only_ allow it to act on instance profiles that contain these tags which match the cluster information. + +### Threat: Karpenter can be used to create or terminate EC2 instances outside the cluster + +**Background**: EC2 instances can exist in an AWS account outside of the Kubernetes cluster. + +**Threat**: An actor who obtains control of the Karpenter pod’s IAM role may be able to create or terminate EC2 instances not part of the Kubernetes cluster managed by Karpenter. + +**Mitigation**: Karpenter creates instances with tags, several of which are enforced in the IAM policy granted to the Karpenter IAM role that restrict the instances Karpenter can terminate. One tag requires that the instance was provisioned by a Karpenter controller (`karpenter.sh/nodepool`), another tag can include a cluster name to mitigate any termination between two clusters with Karpenter in the same account (`kubernetes.io/cluster/${CLUSTER_NAME}`. Cluster Operators also can restrict the region to prevent two clusters in the same account with the same name in different regions. + +Additionally, Karpenter does not allow tags to be modified on instances unowned by Karpenter after creation, except for the `Name` and `karpenter.sh/nodeclaim` tags. Though these tags can be changed after instance creation, `aws:ResourceTag` conditions enforce that the Karpenter controller is only able to change these tags on instances that it already owns, enforced through the `karpenter.sh/nodepool` and `kubernetes.io/cluster/${CLUSTER_NAME}` tags. + +### Threat: Karpenter launches an EC2 instance using an unintended AMI + +**Background**: Cluster Developers can create Node Templates that refer to an AMI by metadata, such as a name rather than an AMI resource ID. + +**Threat:** A threat actor creates a public AMI with the same name as a customer’s AMI in an attempt to get Karpenter to select the threat actor’s AMI instead of the intended AMI. + +**Mitigation**: When selecting AMIs by name or tags, Karpenter defaults to adding an ownership filter of `self,amazon` so AMI images external to the account are not used. diff --git a/website/content/en/v0.35/tasks/_index.md b/website/content/en/v0.35/tasks/_index.md new file mode 100644 index 000000000000..7d4ac8605f4e --- /dev/null +++ b/website/content/en/v0.35/tasks/_index.md @@ -0,0 +1,10 @@ +--- +title: "Tasks" +linkTitle: "Tasks" +weight: 25 +description: > + Tasks to run with Karpenter +cascade: + type: docs +--- + diff --git a/website/content/en/v0.35/tasks/amitasks.md b/website/content/en/v0.35/tasks/amitasks.md new file mode 100644 index 000000000000..94235e0da878 --- /dev/null +++ b/website/content/en/v0.35/tasks/amitasks.md @@ -0,0 +1,176 @@ +--- +title: "Managing AMIs" +linkTitle: "Managing AMIs" +weight: 10 +description: > + Tasks for managing AMIS in Karpenter +--- + +Understanding how Karpenter assigns AMIs to nodes can help ensure that your workloads will run successfully on those nodes and continue to run if the nodes are upgraded to newer AMIs. +Below we describe how Karpenter assigns AMIs to nodes when they are first deployed and how newer AMIs are assigned later when nodes are spun up to replace old ones. +Later, there are tasks that describe the ways that you can intervene to assert control over how AMIs are used by Karpenter for your clusters. + +Features for managing AMIs described here should be considered as part of the larger upgrade policies that you have for your clusters. +See [How do I upgrade an EKS Cluster with Karpenter]({{< relref "../faq/#how-do-i-upgrade-an-eks-cluster-with-karpenter" >}}) for details on this process. + +## How Karpenter assigns AMIs to nodes by default + +If you do nothing to modify how Karpenter handles AMIs, here is how Karpenter assigns AMIs nodes: + +* When you create an `EC2NodeClass`, you are required to set the family of AMIs to use. For example, for the AL2 family, you would set `amiFamily: AL2`. +* With that `amiFamily` set, any time Karpenter needed to spin up a new node, it would use the latest AMI in the AL2 family. +* Later, if an existing node needs to be replaced, Karpenter checks to see if a newer AMI in the AL2 family is available and automatically uses the new AMI instead to spin up the new node. In other words, you may automatically get an AMI that you have not tested with your workloads. + +You can manually delete a node managed by Karpenter, which will cause the default behavior just described to take effect. +However, there are situations that will cause node replacements with newer AMIs to happen automatically. +These include: + +* **Expiration**: If node expiry is set for a node, the node is marked for deletion after a certain time. +* [**Consolidation**]({{< relref "../concepts/disruption/#consolidation" >}}): If a node is empty of workloads, or deemed to be inefficiently running workloads, nodes can be deleted and more appropriately featured nodes are brought up to consolidate workloads. +* [**Drift**]({{< relref "../concepts/disruption/#drift" >}}): Nodes are set for deletion when they drift from the desired state of the `NodeClaim`s and new nodes are brought up to replace them. +* [**Interruption**]({{< relref "../concepts/disruption/#interruption" >}}): Nodes are sometimes involuntarily disrupted by things like Spot interruption, health changes, and instance events, requiring new nodes to be deployed. + +See [**Automated Methods**]({{< relref "../concepts/disruption/#automated-methods" >}}) for details on how Karpenter uses these automated actions to replace nodes. + +With these types of automated updates in place, there is some risk of a new AMI being brought up that introduces some incompatibilities or bugs that cause your workloads to be degraded or fail altogether. +The tasks described below tell you how to take more control over the ways in which Karpenter handles AMI assignments to nodes. + +{{% alert title="Important" color="warning" %}} +If you are new to Karpenter, you should know that the behavior described here is different than you get with Managed Node Groups (MNG). MNG will always use the assigned AMI when it creates a new node and will never automatically upgrade to a new AMI when a new node is required. See [Updating a Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html) to see how you would manually update MNG to use new AMIs. +{{% /alert %}} + +## Choosing AMI tasks +One of Karpenter's greatest assets is its ability to provide the right node at the right time, with little intervention from the person managing the cluster. +Its default behavior of using a later AMI if one becomes available in the selected family means you automatically get the latest security fixes and features. +However, with this comes the risk that the new AMI could break or degrade your workloads. + +As the Karpenter team looks for new ways to manage AMIs, the tasks below offer some means of reducing these risks, based on your own security and ease-of-use requirements. +Here are the advantages and challenges of each of the tasks described below: + +* Task 1 (Test AMIs): The safest way, and the one we recommend, for ensuring that a new AMI doesn't break your workloads is to test it before putting it into production. This takes the most effort on your part, but can reduce the risk of failed workloads in production. Note that you can sometimes get different results from your test environment when you roll a new AMI into production, since issues like scale and other factors can elevate problems you might not see in test. So combining this with other tasks, that do things like slow rollouts, can allow you to catch problems before they impact your whole cluster. +* Task 2 (Lock down AMIs): If workloads require a particluar AMI, this task can make sure that it is the only AMI used by Karpenter. This can be used in combination with Task 1, where you lock down the AMI in production, but allow the newest AMIs in a test cluster while you test your workloads before upgrading production. Keep in mind that this makes upgrades a manual process for you. +* Task 3 (Disruption budgets): This task can be used as a way of preventing a major problem if a new AMI causes problems with your workloads. With Disruption budgets you can slow the pace of upgrades to nodes with new AMIs or make sure that upgrades only happen during selected dates and times (using crontab). This doesn't prevent a bad AMI from being deployed, but it does give you time to respond if a few upgraded nodes at a time show some distress. +* Task 4 (Do not interrupt): While this task doesn't represent a larger solution to the problem, it gives you the opportunity to either prevent all nodes or a node running a particular workload from being upgraded. Note that these settings have no impact in cases where the node is not in control of its being removed (such as when the instance it is running on crashes or a Spot instance is reclaimed). + +## Tasks + +The following tasks let you have an on impact Karpenter’s behavior as it relates to how nodes are created and AMIs are consumed. + +### Task 1: Manage how AMIs are tested and rolled out + +Instead of just avoiding AMI upgrades, you can set up test clusters where you can try out new AMI releases before they are put into production. +For example, you could have: + +* **Test clusters**: On these private clusters, you can run the latest AMIs for your workloads in a safe environment. +* **Production clusters**: When you feel that everything is working properly, you can set the latest AMIs to be deployed in your production clusters so they are note upgraded. + +Remember that it is still best practice to gradually roll new AMIs into your cluster, even if they have been tested. + +### Task 2: Lock down which AMIs are selected + +Instead of letting Karpenter always run the latest AMI, you can change Karpenter’s default behavior. +When you configure the [**EC2NodeClass**]({{< relref "../concepts/nodeclasses" >}}), you can set a specific AMI that you want Karpenter to always choose, using the `amiSelectorTerms` field. +This prevents a new and potentially untested AMI from replacing existing nodes when those nodes are terminated. + +With the `amiSelectorTerms` field in an `EC2NodeClass`, you can set a specific AMI for Karpenter to use, based on AMI name or id (only one is required). +These examples show two different ways to identify the same AMI: + +```bash +amiSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: prod + - name: al2023-ami-2023.3.20240219.0-kernel-6.1-x86_64 +``` + +```bash +amiSelectorTerms: + - tags: + karpenter.sh/discovery: "${CLUSTER_NAME}" + environment: prod + - id: ami-052c9ea013e6e3567 +``` + +See the [**spec.amiSelectorTerms**]({{< relref "../concepts/nodeclasses/#specamiselectorterms" >}}) section of the NodeClasses page for details. +Keep in mind, that this could prevent you from getting critical security patches when new AMIs are available, but it does give you control over exactly which AMI is running. + + +### Task 3: Control the pace of node disruptions + +To help prevent the possibilities of a new AMI being deployed to all your nodes and breaking all of your workloads, you can enable Karpenter [**Disruption Budgets**]({{< relref "../concepts/disruption/#disruption-budgets " >}}). +Disruption Budgets limit when and to what extent nodes can be disrupted. +You can prevent disruption based on nodes (a percentage or number of nodes that can be disrupted at a time) and schedule (excluding certain times from disrupting nodes). +You can set Disruption Budgets in a `NodePool` spec. +Here is an example: + +```bash +disruption: + consolidationPolicy: WhenEmpty + expireAfter: 1440h + budgets: + - nodes: 15% + - nodes: "3" + - nodes: "0" + schedule: "0 7 * * sat-sun" + duration: 12h +``` + +The `disruption` settings define a few fields that indicate the state of a node that should be disrupted. +The `consolidationPolicy` field indicates that a node should be disrupted if the node is either underutilized (`WhenUnderutilized`) or not running any pods (`WhenEmpty`). +With `expireAfter` set to `1440` hours, the node expires after 60 days. +Extending those values causes longer times without disruption. + +Settings for budgets in the above example include the following: + +* **Percentage of nodes**: From the first `nodes` setting, only `15%` of the NodePool’s nodes can be disrupted at a time. +* **Number of nodes**: The second `nodes` setting limits the number of nodes that can be disrupted at a time to `5`. +* **Schedule**: The third `nodes` setting uses schedule to say that zero disruptions (`0`) are allowed starting at 7am on Saturday and Sunday and continues for 12 hours. +The format of the schedule follows the `crontab` format for identifying dates and times. +See the [crontab](https://man7.org/linux/man-pages/man5/crontab.5.html) page for information on the supported values for these fields. + +As with all disruption settings, keep in mind that avoiding updated AMIs for your nodes can result in not getting fixes for known security risks and bugs. +You need to balance that with your desire to not risk breaking the workloads on your cluster. + +### Task 4: Prevent Karpenter from disrupting nodes + +There are several ways you can prevent Karpenter from disrupting nodes that it manages, to mitigate the risk of an untested AMI from being deployed. + +* **Set Pods to not allow disruption**: When you run pods from a Deployment spec, you can set `karpenter.sh/do-not-disrupt` to true on that Deployment. +This will prevent the node that pod is running on from being disrupted while the pod is running (see [**Pod-level Controls**]({{< relref "../concepts/disruption/#pod-level-controls" >}}) for details). +This can be useful for things like batch jobs, which you want to run to completion and never be moved. +For example: + + +```bash + apiVersion: apps/v1 + kind: Deployment + spec: + template: + metadata: + annotations: + karpenter.sh/do-not-disrupt: "true" +``` + +* **Set nodes to not allow disruption** In the NodePool spec, you can set `karpenter.sh/do-not-disrupt` to true. +This prevents any nodes created from the NodePool from being considered for disruption (see [**Example: Disable Disruption on a NodePool**]({{< relref "../concepts/disruption/#example-disable-disruption-on-a-nodepool" >}}) for details). +For example: + +```bash + apiVersion: karpenter.sh/v1beta1 + kind: NodePool + metadata: + name: default + spec: + template: + metadata: + annotations: # will be applied to all nodes + karpenter.sh/do-not-disrupt: "true" +``` + +Keep in mind that these are not permanent solutions and cannot prevent all node disruptions, such as disruptions resulting from failed node health checks or the instance running the node going down. +Using only the methods to prevent disruptions described here, you will not prevent new AMIs from being used if an unintended disruption of a node occurs, unless you have already locked down specific AMIs to use. +## Follow-up + +The Karpenter project continues to add features to give you greater control over AMI upgrades on your clusters. +If you have opinions about features you would like to see to manage AMIs with Karpenter, feel free to enter a Karpenter [New Issue](https://github.com/aws/karpenter-provider-aws/issues/new/choose). + diff --git a/website/content/en/v0.31/troubleshooting.md b/website/content/en/v0.35/troubleshooting.md similarity index 90% rename from website/content/en/v0.31/troubleshooting.md rename to website/content/en/v0.35/troubleshooting.md index d2490cdc758e..9bab69ef22ff 100644 --- a/website/content/en/v0.31/troubleshooting.md +++ b/website/content/en/v0.35/troubleshooting.md @@ -1,7 +1,7 @@ --- title: "Troubleshooting" linkTitle: "Troubleshooting" -weight: 90 +weight: 70 description: > Troubleshoot Karpenter problems --- @@ -29,7 +29,7 @@ Update the zap-logger-config "level" and restart the Karpenter pod(s) to enable #### Debug logging via Helm -You can enable debug logging during installation with helm by setting the option `logLevel`. +You can enable debug logging during installation with Helm by setting the option `logLevel`. ``` helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter \ @@ -77,7 +77,7 @@ Info on whether there has been a change to the CRD between versions of Karpenter ### Unable to schedule pod due to insufficient node group instances -v0.16.0 changed the default replicas from 1 to 2. +`0.16.0` changed the default replicas from 1 to 2. Karpenter won't launch capacity to run itself (log related to the `karpenter.sh/provisioner-name DoesNotExist requirement`) so it can't provision for the second Karpenter pod. @@ -89,18 +89,18 @@ To do so on AWS increase the `minimum` and `desired` parameters on the node grou ### Helm Error When Pulling the Chart -If Helm is showing an error when trying to install Karpenter helm charts: +If Helm is showing an error when trying to install Karpenter Helm charts: -- Ensure you are using a newer Helm version, Helm started supporting OCI images since v3.8.0. -- Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this +- Ensure you are using a newer Helm version, Helm started supporting OCI images since `3.8.0`. +- Helm does not have an `helm repo add` concept in OCI, so to install Karpenter you no longer need this. +- If you get an error like `Error: public.ecr.aws/karpenter/karpenter:0.34.0: not found` make sure you're adding a `v` prefix for Karpenter versions between `0.17.0` & `0.34.x`. - Verify that the image you are trying to pull actually exists in [gallery.ecr.aws/karpenter](https://gallery.ecr.aws/karpenter/karpenter) -- Sometimes Helm generates a generic error, you can add the --debug switch to any of the helm commands in this doc for more verbose error messages -- If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html) -- If you are receiving this error: `Error: failed to download "oci://public.ecr.aws/karpenter/karpenter" at version "0.17.0"`, then you need to prepend a `v` to the version number: `v0.17.0`. Before Karpenter moved to OCI helm charts (pre-v0.17.0), both `v0.16.0` and `0.16.0` would work, but OCI charts require an exact version match. +- Sometimes Helm generates a generic error, you can add the --debug switch to any of the Helm commands in this doc for more verbose error messages +- If you are getting a 403 forbidden error, you can try `docker logout public.ecr.aws` as explained [here](https://docs.aws.amazon.com/AmazonECR/latest/public/public-troubleshooting.html). ### Helm Error when installing the `karpenter-crd` chart -Karpenter v0.26.1+ introduced the `karpenter-crd` helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. +Karpenter `0.26.1` introduced the `karpenter-crd` Helm chart. When installing this chart on your cluster, if you have previously added the Karpenter CRDs to your cluster through the `karpenter` controller chart or through `kubectl replace`, Helm will reject the install of the chart due to `invalid ownership metadata`. - In the case of `invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"` run: @@ -137,7 +137,7 @@ kubectl get nodes -ojsonpath='{range .items[*].metadata}{@.name}:{@.finalizers}{ If you are not able to create a provisioner due to `Internal error occurred: failed calling webhook "validation.webhook.provisioners.karpenter.sh":` -Webhooks were renamed in `v0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. +Webhooks were renamed in `0.19.0`. There's a bug in ArgoCD's upgrade workflow where webhooks are leaked. This results in Provisioner's failing to be validated, since the validation server no longer corresponds to the webhook definition. Delete the stale webhooks. @@ -148,7 +148,7 @@ kubectl delete validatingwebhookconfiguration validation.webhook.provisioners.ka ### Failed calling webhook "defaulting.webhook.karpenter.sh" -The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `v0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. +The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. ```text kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh @@ -192,11 +192,11 @@ Disabling swap will allow kubelet to join the cluster successfully, however user ### DaemonSets can result in deployment failures -For Karpenter versions 0.5.3 and earlier, DaemonSets were not properly considered when provisioning nodes. +For Karpenter versions `0.5.3` and earlier, DaemonSets were not properly considered when provisioning nodes. This sometimes caused nodes to be deployed that could not meet the needs of the requested DaemonSets and workloads. -This issue no longer occurs after Karpenter version 0.5.3 (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). +This issue no longer occurs after Karpenter version `0.5.3` (see [PR #1155](https://github.com/aws/karpenter/pull/1155)). -If you are using a pre-0.5.3 version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. +If you are using a pre `0.5.3` version of Karpenter, one workaround is to set your provisioner to only use larger instance types that you know will be big enough for the DaemonSet and the workload. For more information, see [Issue #1084](https://github.com/aws/karpenter/issues/1084). Examples of this behavior are included in [Issue #1180](https://github.com/aws/karpenter/issues/1180). @@ -213,7 +213,7 @@ See the Karpenter [Best Practices Guide](https://aws.github.io/aws-eks-best-prac ### Missing subnetSelector and securityGroupSelector tags causes provisioning failures -Starting with Karpenter v0.5.5, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. +Starting with Karpenter `0.5.5`, if you are using Karpenter-generated launch template, provisioners require that [subnetSelector]({{}}) and [securityGroupSelector]({{}}) tags be set to match your cluster. The [Provisioner]({{}}) section in the Karpenter Getting Started Guide uses the following example: ```text @@ -266,7 +266,7 @@ spec: When attempting to schedule a large number of pods with PersistentVolumes, it's possible that these pods will co-locate on the same node. Pods will report the following errors in their events using a `kubectl describe pod` call -```console +```bash Warning FailedAttachVolume pod/example-pod AttachVolume.Attach failed for volume "***" : rpc error: code = Internal desc = Could not attach volume "***" to node "***": attachment of disk "***" failed, expected device to be attached but was attaching Warning FailedMount pod/example-pod Unable to attach or mount volumes: unmounted volumes=[***], unattached volumes=[***]: timed out waiting for the condition ``` @@ -277,7 +277,7 @@ In this case, Karpenter may fail to scale-up your nodes due to these pods due to Karpenter does not support [in-tree storage plugins](https://kubernetes.io/blog/2021/12/10/storage-in-tree-to-csi-migration-status-update/) to provision PersistentVolumes, since nearly all of the in-tree plugins have been deprecated in upstream Kubernetes. This means that, if you are using a statically-provisioned PersistentVolume that references a volume source like `AWSElasticBlockStore` or a dynamically-provisioned PersistentVolume that references a StorageClass with a in-tree storage plugin provisioner like `kubernetes.io/aws-ebs`, Karpenter will fail to discover the maxiumum volume attachments for the node. Instead, Karpenter may think the node still has more schedulable space due to memory and cpu constraints when there is really no more schedulable space on the node due to volume limits. When Karpenter sees you are using an in-tree storage plugin on your pod volumes, it will print the following error message into the logs. If you see this message, upgrade your StorageClasses and statically-provisioned PersistentVolumes to use the latest CSI drivers for your cloud provider. -```console +```bash 2023-04-05T23:56:53.363Z ERROR controller.node_state PersistentVolume source 'AWSElasticBlockStore' uses an in-tree storage plugin which is unsupported by Karpenter and is deprecated by Kubernetes. Scale-ups may fail because Karpenter will not discover driver limits. Use a PersistentVolume that references the 'CSI' volume source for Karpenter auto-scaling support. {"commit": "b2af562", "node": "ip-192-168-36-137.us-west-2.compute.internal", "pod": "inflate0-6c4bdb8b75-7qmfd", "volume": "mypd", "persistent-volume": "pvc-11db7489-3c6e-46f3-a958-91f9d5009d41"} 2023-04-05T23:56:53.464Z ERROR controller.node_state StorageClass .spec.provisioner uses an in-tree storage plugin which is unsupported by Karpenter and is deprecated by Kubernetes. Scale-ups may fail because Karpenter will not discover driver limits. Create a new StorageClass with a .spec.provisioner referencing the CSI driver plugin name 'ebs.csi.aws.com'. {"commit": "b2af562", "node": "ip-192-168-36-137.us-west-2.compute.internal", "pod": "inflate0-6c4bdb8b75-7qmfd", "volume": "mypd", "storage-class": "gp2", "provisioner": "kubernetes.io/aws-ebs"} ``` @@ -290,15 +290,17 @@ The following is a list of known CSI drivers which support a startupTaint to eli - [aws-ebs-csi-driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/install.md#configure-node-startup-taint) - [aws-efs-csi-driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/docs#configure-node-startup-taint) -These taints should be configured via `startupTaints` on your `Provisioner`. For example, to enable this for EBS, add the following to your `Provisioner`: +These taints should be configured via `startupTaints` on your `NodePool`. For example, to enable this for EBS, add the following to your `NodePool`: ```yaml -apiVersion: karpenter.sh/v1alpha5 -kind: Provisioner +apiVersion: karpenter.sh/v1beta1 +kind: NodePool spec: - startupTaints: - - key: ebs.csi.aws.com/agent-not-ready - effect: NoExecute - + template: + spec: + startupTaints: + - key: ebs.csi.aws.com/agent-not-ready + effect: NoExecute +``` ### CNI is unable to allocate IPs to pods @@ -314,7 +316,7 @@ time=2023-06-12T19:18:15Z type=Warning reason=FailedCreatePodSandBox from=kubele By default, the number of pods on a node is limited by both the number of networking interfaces (ENIs) that may be attached to an instance type and the number of IP addresses that can be assigned to each ENI. See [IP addresses per network interface per instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for a more detailed information on these instance types' limits. -If the max-pods (configured through your Provisioner [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. +If the max-pods (configured through your Provisioner [`kubeletConfiguration`]({{}})) is greater than the number of supported IPs for a given instance type, the CNI will fail to assign an IP to the pod and your pod will be left in a `ContainerCreating` state. ##### Solutions @@ -322,13 +324,13 @@ To avoid this discrepancy between `maxPods` and the supported pod density of the 1. Enable [Prefix Delegation](https://www.eksworkshop.com/docs/networking/prefix/) to increase the number of allocatable IPs for the ENIs on each instance type 2. Reduce your `maxPods` value to be under the maximum pod density for the instance types assigned to your Provisioner -3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. +3. Remove the `maxPods` value from your [`kubeletConfiguration`]({{}}) if you no longer need it and instead rely on the defaulted values from Karpenter and EKS AMIs. -For more information on pod density, view the [Pod Density Conceptual Documentation]({{}}). +For more information on pod density, view the [Pod Density Section in the NodePools doc]({{}}). #### IP exhaustion in a subnet -When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`AWSNodeTemplate`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. +When a node is launched by Karpenter, it is assigned to a subnet within your VPC based on the [`subnetSelector`]({{}}) value in your [`AWSNodeTemplate`]({{}})). When a subnet becomes IP address constrained, EC2 may think that it can successfully launch an instance in the subnet; however, when the CNI tries to assign IPs to the pods, there are none remaining. In this case, your pod will stay in a `ContainerCreating` state until an IP address is freed in the subnet and the CNI can assign one to the pod. ##### Solutions @@ -593,7 +595,7 @@ This means that your CNI plugin is out of date. You can find instructions on how ### Node terminates before ready on failed encrypted EBS volume If you are using a custom launch template and an encrypted EBS volume, the IAM principal launching the node may not have sufficient permissions to use the KMS customer managed key (CMK) for the EC2 EBS root volume. -This issue also applies to [Block Device Mappings]({{}}) specified in the Provisioner. +This issue also applies to [Block Device Mappings]({{}}) specified in the Provisioner. In either case, this results in the node terminating almost immediately upon creation. Keep in mind that it is possible that EBS Encryption can be enabled without your knowledge. @@ -652,7 +654,7 @@ This typically occurs when the node has not been considered fully initialized fo This error indicates that the `vpc.amazonaws.com/pod-eni` resource was never reported on the node. If you've enabled Pod ENI for Karpenter nodes via the `aws.enablePodENI` setting, you will need to make the corresponding change to the VPC CNI to enable [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html) which will cause the resource to be registered. ### AWS Node Termination Handler (NTH) interactions -Karpenter [doesn't currently support draining and terminating on spot rebalance recommendations]({{< ref "concepts/deprovisioning#interruption" >}}). Users who want support for both drain and terminate on spot interruption as well as drain and termination on spot rebalance recommendations may install Node Termination Handler (NTH) on their clusters to support this behavior. +Karpenter [doesn't currently support draining and terminating on spot rebalance recommendations]({{< ref "concepts/disruption#interruption" >}}). Users who want support for both drain and terminate on spot interruption as well as drain and termination on spot rebalance recommendations may install Node Termination Handler (NTH) on their clusters to support this behavior. These two components do not share information between each other, meaning if you have drain and terminate functionality enabled on NTH, NTH may remove a node for a spot rebalance recommendation. Karpenter will replace the node to fulfill the pod capacity that was being fulfilled by the old node; however, Karpenter won't be aware of the reason that that node was terminated. This means that Karpenter may launch the same instance type that was just deprovisioned, causing a spot rebalance recommendation to be sent again. This can result in very short-lived instances where NTH continually removes nodes and Karpeneter re-launches the same instance type over and over again. @@ -681,4 +683,4 @@ caused by: Post "https://api.pricing.us-east-1.amazonaws.com/": dial tcp 52.94.2 This network timeout occurs because there is no VPC endpoint available for the [Price List Query API.](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-pelong.html). To workaround this issue, Karpenter ships updated on-demand pricing data as part of the Karpenter binary; however, this means that pricing data will only be updated on Karpenter version upgrades. To disable pricing lookups and avoid the error messages, set the `AWS_ISOLATED_VPC` environment variable (or the `--aws-isolated-vpc` option) to true. -See [Environment Variables / CLI Flags]({{}}) for details. +See [Environment Variables / CLI Flags]({{}}) for details. diff --git a/website/content/en/v0.35/upgrading/_index.md b/website/content/en/v0.35/upgrading/_index.md new file mode 100644 index 000000000000..ef1368b22a75 --- /dev/null +++ b/website/content/en/v0.35/upgrading/_index.md @@ -0,0 +1,10 @@ +--- +title: "Upgrading" +linkTitle: "Upgrading" +weight: 30 +description: > + Upgrading Karpenter guide and reference +cascade: + type: docs +--- + diff --git a/website/content/en/v0.35/upgrading/compatibility.md b/website/content/en/v0.35/upgrading/compatibility.md new file mode 100644 index 000000000000..88507e3c7b8f --- /dev/null +++ b/website/content/en/v0.35/upgrading/compatibility.md @@ -0,0 +1,105 @@ +--- +title: "Compatibility" +linkTitle: "Compatibility" +weight: 20 +description: > + Compatibility issues for Karpenter +--- + +# Compatibility + +To make upgrading easier we aim to minimize the introduction of breaking changes. +Before you begin upgrading Karpenter, consider Karpenter compatibility issues related to Kubernetes and the NodePool API (previously Provisioner). + +## Compatibility Matrix + +[comment]: <> (the content below is generated from hack/docs/compataiblitymetrix_gen_docs.go) + +| KUBERNETES | 1.23 | 1.24 | 1.25 | 1.26 | 1.27 | 1.28 | 1.29 | +|------------|---------|---------|---------|---------|---------|---------|---------| +| karpenter | 0.21.x+ | 0.21.x+ | 0.25.x+ | 0.28.x+ | 0.28.x+ | 0.31.x+ | 0.34.0+ | + +[comment]: <> (end docs generated content from hack/docs/compataiblitymetrix_gen_docs.go) + +{{% alert title="Note" color="warning" %}} +The Ubuntu EKS optimized AMI has moved from 20.04 to 22.04 for Kubernetes 1.29+. This new AMI version is __not currently__ supported for users relying on AMI auto-discovery with the Ubuntu AMI family. More details can be found in this [GitHub issue](https://github.com/aws/karpenter-provider-aws/issues/5572). Please review this issue before upgrading to Kubernetes 1.29 if you are using the Ubuntu AMI family. Upgrading to 1.29 without making any changes to your EC2NodeClass will result in Karpenter being unable to create new nodes. +{{% /alert %}} + +{{% alert title="Note" color="warning" %}} +Karpenter currently does not support the following [new `topologySpreadConstraints` keys](https://kubernetes.io/blog/2023/04/17/fine-grained-pod-topology-spread-features-beta/), promoted to beta in Kubernetes 1.27: +- `matchLabelKeys` +- `nodeAffinityPolicy` +- `nodeTaintsPolicy` + +For more information on Karpenter's support for these keys, view [this tracking issue](https://github.com/aws/karpenter-core/issues/430). +{{% /alert %}} + +{{% alert title="Note" color="warning" %}} +Karpenter supports using [Kubernetes Common Expression Language](https://kubernetes.io/docs/reference/using-api/cel/) for validating its Custom Resource Definitions out-of-the-box; however, this feature is not supported on versions of Kubernetes < 1.25. If you are running an earlier version of Kubernetes, you will need to use the Karpenter admission webhooks for validation instead. You can enable these webhooks with `--set webhook.enabled=true` when applying the Karpenter Helm chart. +{{% /alert %}} + +## Compatibility issues + +When we introduce a breaking change, we do so only as described in this document. + +Karpenter follows [Semantic Versioning 2.0.0](https://semver.org/) in its stable release versions, while in +major version zero (`0.y.z`) [anything may change at any time](https://semver.org/#spec-item-4). +However, to further protect users during this phase we will only introduce breaking changes in minor releases (releases that increment y in x.y.z). +Note this does not mean every minor upgrade has a breaking change as we will also increment the +minor version when we release a new feature. + +Users should therefore check to see if there is a breaking change every time they are upgrading to a new minor version. + +### How Do We Break Incompatibility? + +When there is a breaking change we will: + +* Increment the minor version when in major version 0 +* Add a permanent separate section named `upgrading to x.y.z+` under [release upgrade notes](#release-upgrade-notes) + clearly explaining the breaking change and what needs to be done on the user side to ensure a safe upgrade +* Add the sentence “This is a breaking change, please refer to the above link for upgrade instructions” to the top of the release notes and in all our announcements + +### How Do We Find Incompatibilities? + +Besides the peer review process for all changes to the code base we also do the followings in order to find +incompatibilities: +* (To be implemented) To check the compatibility of the application, we will automate tests for installing, uninstalling, upgrading from an older version, and downgrading to an older version +* (To be implemented) To check the compatibility of the documentation with the application, we will turn the commands in our documentation into scripts that we can automatically run + +### Security Patches + +While we are in major version 0 we will not release security patches to older versions. +Rather we provide the patches in the latest versions. +When at major version 1 we will have an EOL (end of life) policy where we provide security patches +for a subset of older versions and deprecate the others. + +## Release Types + +Karpenter offers three types of releases. This section explains the purpose of each release type and how the images for each release type are tagged in our [public image repository](https://gallery.ecr.aws/karpenter). + +### Stable Releases + +Stable releases are the most reliable releases that are released with weekly cadence. Stable releases are our only recommended versions for production environments. +Sometimes we skip a stable release because we find instability or problems that need to be fixed before having a stable release. +Stable releases are tagged with a semantic version prefixed by a `v`. For example `v0.13.0`. + +### Release Candidates + +We consider having release candidates for major and important minor versions. Our release candidates are tagged like `vx.y.z-rc.0`, `vx.y.z-rc.1`. The release candidate will then graduate to `vx.y.z` as a normal stable release. +By adopting this practice we allow our users who are early adopters to test out new releases before they are available to the wider community, thereby providing us with early feedback resulting in more stable releases. + +### Snapshot Releases + +We release a snapshot release for every commit that gets merged into [`aws/karpenter-provider-aws`](https://www.github.com/aws/karpenter-provider-aws). This enables users to immediately try a new feature or fix right after it's merged rather than waiting days or weeks for release. + +Snapshot releases are not made available in the same public ECR repository as other release types, they are instead published to a separate private ECR repository. +Helm charts are published to `oci://{{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com/karpenter/snapshot/karpenter` and are tagged with the git commit hash prefixed by the Karpenter major version (e.g. `0-fc17bfc89ebb30a3b102a86012b3e3992ec08adf`). +Anyone with an AWS account can pull from this repository, but must first authenticate: + +```bash +aws ecr get-login-password --region {{< param "snapshot_repo.region" >}} | docker login --username AWS --password-stdin {{< param "snapshot_repo.account_id" >}}.dkr.ecr.{{< param "snapshot_repo.region" >}}.amazonaws.com +``` + +{{% alert title="Note" color="warning" %}} +Snapshot releases are suitable for testing, and troubleshooting but they should not be used in production environments. Snapshot releases are ephemeral and will be removed 90 days after they were published. +{{% /alert %}} diff --git a/website/content/en/v0.35/upgrading/upgrade-guide.md b/website/content/en/v0.35/upgrading/upgrade-guide.md new file mode 100644 index 000000000000..2bf2e17e603b --- /dev/null +++ b/website/content/en/v0.35/upgrading/upgrade-guide.md @@ -0,0 +1,349 @@ +--- +title: "Upgrade Guide" +linkTitle: "Upgrade Guide" +weight: 10 +description: > + Learn about upgrading Karpenter +--- + +Karpenter is a controller that runs in your cluster, but it is not tied to a specific Kubernetes version, as the Cluster Autoscaler is. +Use your existing upgrade mechanisms to upgrade your core add-ons in Kubernetes and keep Karpenter up to date on bug fixes and new features. +This guide contains information needed to upgrade to the latest release of Karpenter, along with compatibility issues you need to be aware of when upgrading from earlier Karpenter versions. + +### CRD Upgrades + +Karpenter ships with a few Custom Resource Definitions (CRDs). These CRDs are published: +* As an independent Helm chart [karpenter-crd](https://gallery.ecr.aws/karpenter/karpenter-crd) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter-crd) that can be used by Helm to manage the lifecycle of these CRDs. To upgrade or install `karpenter-crd` run: + ```bash + KARPENTER_NAMESPACE=kube-system + helm upgrade --install karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version x.y.z --namespace "${KARPENTER_NAMESPACE}" --create-namespace + ``` + +{{% alert title="Note" color="warning" %}} +If you get the error `invalid ownership metadata; label validation error:` while installing the `karpenter-crd` chart from an older version of Karpenter, follow the [Troubleshooting Guide]({{}}) for details on how to resolve these errors. +{{% /alert %}} + +* As part of the helm chart [karpenter](https://gallery.ecr.aws/karpenter/karpenter) - [source](https://github.com/aws/karpenter/blob/main/charts/karpenter/crds). Helm [does not manage the lifecycle of CRDs using this method](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/), the tool will only install the CRD during the first installation of the Helm chart. Subsequent chart upgrades will not add or remove CRDs, even if the CRDs have changed. When CRDs are changed, we will make a note in the version's upgrade guide. + +In general, you can reapply the CRDs in the `crds` directory of the Karpenter Helm chart: + +```shell +kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.0/pkg/apis/crds/karpenter.sh_nodepools.yaml +kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.0/pkg/apis/crds/karpenter.sh_nodeclaims.yaml +kubectl apply -f https://raw.githubusercontent.com/aws/karpenter/v0.35.0/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml +``` + + + +### Upgrading to `0.35.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.35.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +* Karpenter OCI tags and Helm chart version are now valid semantic versions, meaning that the `v` prefix from the git tag has been removed and they now follow the `x.y.z` pattern. + +### Upgrading to `0.34.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. Do not upgrade to `0.34.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +{{% alert title="Warning" color="warning" %}} +The Ubuntu EKS optimized AMI has moved from 20.04 to 22.04 for Kubernetes 1.29+. This new AMI version is __not currently__ supported for users relying on AMI auto-discovery with the Ubuntu AMI family. More details can be found in this [GitHub issue](https://github.com/aws/karpenter-provider-aws/issues/5572). Please review this issue before upgrading to Kubernetes 1.29 if you are using the Ubuntu AMI family. Upgrading to 1.29 without making any changes to your EC2NodeClass will result in Karpenter being unable to create new nodes. +{{% /alert %}} + +* Karpenter now supports `nodepool.spec.disruption.budgets`, which allows users to control the speed of disruption in the cluster. Since this requires an update to the Custom Resource, before upgrading, you should re-apply the new updates to the CRDs. Check out [Disruption Budgets]({{}}) for more. +* With Disruption Budgets, Karpenter will disrupt multiple batches of nodes simultaneously, which can result in overall quicker scale-down of your cluster. Before `0.34.0`, Karpenter had a hard-coded parallelism limit for each type of disruption. In `0.34.0`+, Karpenter will now disrupt at most 10% of nodes for a given NodePool. There is no setting that will be perfectly equivalent with the behavior prior to `0.34.0`. When considering how to configure your budgets, please refer to the following limits for versions prior to `0.34.0`: + * `Empty Expiration / Empty Drift / Empty Consolidation`: infinite parallelism + * `Non-Empty Expiration / Non-Empty Drift / Single-Node Consolidation`: one node at a time + * `Multi-Node Consolidation`: max 100 nodes +* To support Disruption Budgets, `0.34.0`+ includes critical changes to Karpenter's core controllers, which allows Karpenter to consider multiple batches of disrupting nodes simultaneously. This increases Karpenter's performance with the potential downside of higher CPU and memory utilization from the Karpenter pod. While the magnitude of this difference varies on a case-by-case basis, when upgrading to Karpenter `0.34.0`+, please note that you may need to increase the resources allocated to the Karpenter controller pods. +* Karpenter now adds a default `podSecurityContext` that configures the `fsgroup: 65536` of volumes in the pod. If you are using sidecar containers, you should review if this configuration is compatible for them. You can disable this default `podSecurityContext` through helm by performing `--set podSecurityContext=null` when installing/upgrading the chart. +* The `dnsPolicy` for the Karpenter controller pod has been changed back to the Kubernetes cluster default of `ClusterFirst`. Setting our `dnsPolicy` to `Default` (confusingly, this is not the Kubernetes cluster default) caused more confusion for any users running IPv6 clusters with dual-stack nodes or anyone running Karpenter with dependencies on cluster services (like clusters running service meshes). If you still want the old behavior here, you can change the `dnsPolicy` to point to use `Default` by setting the helm value on install/upgrade with `--set dnsPolicy=Default`. More details on this issue can be found in the following Github issues: [#2186](https://github.com/aws/karpenter-provider-aws/issues/2186) and [#4947](https://github.com/aws/karpenter-provider-aws/issues/4947). +* Karpenter now disallows `nodepool.spec.template.spec.resources` to be set. The webhook validation never allowed `nodepool.spec.template.spec.resources`. We are now ensuring that CEL validation also disallows `nodepool.spec.template.spec.resources` to be set. If you were previously setting the resources field on your NodePool, ensure that you remove this field before upgrading to the newest version of Karpenter or else updates to the resource may fail on the new version. + +### Upgrading to `0.33.0`+ + +{{% alert title="Warning" color="warning" %}} +`0.33.0`+ _only_ supports Karpenter v1beta1 APIs and will not work with existing Provisioner, AWSNodeTemplate or Machine alpha APIs. **Do not** upgrade to `0.33.0`+ without first [upgrading to `0.32.x`]({{}}). This version supports both the alpha and beta APIs, allowing you to migrate all of your existing APIs to beta APIs without experiencing downtime. +{{% /alert %}} + +* Karpenter no longer supports using the `karpenter.sh/provisioner-name` label in NodePool labels and requirements or in application node selectors, affinities, or topologySpreadConstraints. If you were previously using this label to target applications to specific Provisioners, you should update your applications to use the `karpenter.sh/nodepool` label instead before upgrading. If you upgrade without changing these labels, you may begin to see pod scheduling failures for these applications. +* Karpenter now tags `spot-instances-request` with the same tags that it tags instances, volumes, and primary ENIs. This means that you will now need to add `ec2:CreateTags` permission for `spot-instances-request`. You can also further scope your controller policy for the `ec2:RunInstances` action to require that it launches the `spot-instances-request` with these specific tags. You can view an example of scoping these actions in the [Getting Started Guide's default CloudFormation controller policy](https://github.com/aws/karpenter/blob/v0.33.0/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml#L61). +* We now recommend that you set the installation namespace for your Karpenter controllers to `kube-system` to denote Karpenter as a critical cluster component. This ensures that requests from the Karpenter controllers are treated with higher priority by assigning them to a different [PriorityLevelConfiguration](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/#prioritylevelconfiguration) than generic requests from other namespaces. For more details on API Priority and Fairness, read the [Kubernetes API Priority and Fairness Conceptual Docs](https://kubernetes.io/docs/concepts/cluster-administration/flow-control/). Note: Changing the namespace for your Karpenter release will cause the service account namespace to change. If you are using IRSA for authentication with AWS, you will need to change scoping set in the controller's trust policy from `karpenter:karpenter` to `kube-system:karpenter`. +* `0.33.0` disables mutating and validating webhooks by default in favor of using [Common Expression Language for CRD validation](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). The Common Expression Language Validation Feature [is enabled by default on EKS 1.25](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules). If you are using Kubernetes version >= 1.25, no further action is required. If you are using a Kubernetes version below 1.25, you now need to set `DISABLE_WEBHOOK=false` in your container environment variables or `--set webhook.enabled=true` if using Helm. View the [Webhook Support Deprecated in Favor of CEL Section of the v1beta1 Migration Guide]({{}}). +* `0.33.0` drops support for passing settings through the `karpenter-global-settings` ConfigMap. You should pass settings through the container environment variables in the Karpenter deployment manifest. View the [Global Settings Section of the v1beta1 Migration Guide]({{}}) for more details. +* `0.33.0` enables `Drift=true` by default in the `FEATURE_GATES`. If you previously didn't enable the feature gate, Karpenter will now check if there is a difference between the desired state of your nodes declared in your NodePool and the actual state of your nodes. View the [Drift Section of Disruption Conceptual Docs]({{}}) for more details. +* `0.33.0` drops looking up the `zap-logger-config` through ConfigMap discovery. Instead, Karpenter now expects the logging config to be mounted on the filesystem if you are using this to configure Zap logging. This is not enabled by default, but can be enabled through `--set logConfig.enabled=true` in the Helm values. If you are setting any values in the `logConfig` from the `0.32.x` upgrade, such as `logConfig.logEncoding`, note that you will have to explicitly set `logConfig.enabled=true` alongside it. Also, note that setting the Zap logging config is a deprecated feature in beta and is planned to be dropped at v1. View the [Logging Configuration Section of the v1beta1 Migration Guide]({{}}) for more details. +* `0.33.0` change the default `LOG_LEVEL` from `debug` to `info` by default. If you are still enabling logging configuration through the `zap-logger-config`, no action is required. +* `0.33.0` drops support for comma delimited lists on tags for `SubnetSelectorTerm`, `SecurityGroupsSelectorTerm`, and `AMISelectorTerm`. Karpenter now supports multiple terms for each of the selectors which means that we can specify a more explicit OR-based constraint through separate terms rather than a comma-delimited list of values. + +### Upgrading to `0.32.0`+ + +{{% alert title="Warning" color="warning" %}} +Karpenter `0.32.0` introduces v1beta1 APIs, including _significant_ changes to the API and installation procedures for the Karpenter controllers. **Do not** upgrade to `0.32.0`+ without referencing the [v1beta1 Migration Upgrade Procedure]({{}}). + +This version includes **dual support** for both alpha and beta APIs to ensure that you can slowly migrate your existing Provisioner, AWSNodeTemplate, and Machine alpha APIs to the newer NodePool, EC2NodeClass, and NodeClaim beta APIs. + +Note that if you are rolling back after upgrading to `0.32.0`, note that `0.31.4` is the only version that supports handling rollback after you have deployed the v1beta1 APIs to your cluster. +{{% /alert %}} + +* Karpenter now serves the webhook prometheus metrics server on port `8001`. If this port is already in-use on the pod or you are running in `hostNetworking` mode, you may need to change this port value. You can configure this port value through the `WEBHOOK_METRICS_PORT` environment variable or the `webhook.metrics.port` value if installing via Helm. +* Karpenter now exposes the ability to disable webhooks through the `webhook.enabled=false` value. This value will disable the webhook server and will prevent any permissions, mutating or validating webhook configurations from being deployed to the cluster. +* Karpenter now moves all logging configuration for the Zap logger into the `logConfig` values block. Configuring Karpenter logging with this mechanism _is_ deprecated and will be dropped at v1. Karpenter now only surfaces logLevel through the `logLevel` helm value. If you need more advanced configuration due to log parsing constraints, we recommend configuring your log parser to handle Karpenter's Zap JSON logging. +* The default log encoding changed from `console` to `json`. If you were previously not setting the type of log encoding, this default will change with the Helm chart. If you were setting the value through `logEncoding`, this value will continue to work until `0.33.x` but it is deprecated in favor of `logConfig.logEncoding` +* Karpenter now uses the `karpenter.sh/disruption:NoSchedule=disrupting` taint instead of the upstream `node.kubernetes.io/unschedulable` taint for nodes spawned with a NodePool to prevent pods from scheduling to nodes being disrupted. Pods that previously tolerated the `node.kubernetes.io/unschedulable` taint that previously weren't evicted during termination will now be evicted. This most notably affects DaemonSets, which have the `node.kubernetes.io/unschedulable` toleration by default, where Karpenter will now remove these pods during termination. If you want your specific pods to not be evicted when nodes are scaled down, you should add a toleration to the pods with the following: `Key=karpenter.sh/disruption, Effect=NoSchedule, Operator=Equals, Values=disrupting`. + * Note: Karpenter will continue to use the old `node.kubernetes.io/unschedulable` taint for nodes spawned with a Provisioner. + +### Upgrading to `0.31.0`+ + +* Karpenter moved its `securityContext` constraints from pod-wide to only applying to the Karpenter container exclusively. If you were previously relying on the pod-wide `securityContext` for your sidecar containers, you will now need to set these values explicitly in your sidecar container configuration. + +### Upgrading to `0.30.0`+ + +* Karpenter will now [statically drift]({{}}) on both Provisioner and AWSNodeTemplate Fields. For Provisioner Static Drift, the `karpenter.sh/provisioner-hash` annotation must be present on both the Provisioner and Machine. For AWSNodeTemplate drift, the `karpenter.k8s.aws/nodetemplate-hash` annotation must be present on the AWSNodeTemplate and Machine. Karpenter will not add these annotations to pre-existing nodes, so each of these nodes will need to be recycled one time for the annotations to be added. +* Karpenter will now fail validation on AWSNodeTemplates and Provisioner `spec.provider` that have `amiSelectors`, `subnetSelectors`, or `securityGroupSelectors` set with a combination of id selectors (`aws-ids`, `aws::ids`) and other selectors. +* Karpenter now statically sets the `securityContext` at both the pod and container-levels and doesn't allow override values to be passed through the Helm chart. This change was made to adhere to [Restricted Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted), which follows pod hardening best practices. + +{{% alert title="Note" color="primary" %}} +If you have sidecar containers configured to run alongside Karpenter that cannot tolerate the [pod-wide `securityContext` constraints](https://github.com/aws/karpenter/blob/v0.30.0/charts/karpenter/templates/deployment.yaml#L40), you will need to specify overrides to the sidecar `securityContext` in your deployment. +{{% /alert %}} + +### Upgrading to `0.29.0`+ + +{{% alert title="Warning" color="warning" %}} +Karpenter `0.29.1` contains a [file descriptor and memory leak bug](https://github.com/aws/karpenter/issues/4296) that leads to Karpenter getting OOMKilled and restarting at the point that it hits its memory or file descriptor limit. Karpenter `0.29.2`+ fixes this leak. +{{% /alert %}} + +* Karpenter has changed the default metrics service port from 8080 to 8000 and the default webhook service port from 443 to 8443. In `0.28.0`, the Karpenter pod port was changed to 8000, but referenced the service by name, allowing users to scrape the service at port 8080 for metrics. `0.29.0` aligns the two ports so that service and pod metrics ports are the same. These ports are set by the `controller.metrics.port` and `webhook.port` Helm chart values, so if you have previously set these to non-default values, you may need to update your Prometheus scraper to match these new values. + +* Karpenter will now reconcile nodes that are drifted due to their Security Groups or their Subnets. If your AWSNodeTemplate's Security Groups differ from the Security Groups used for an instance, Karpenter will consider it drifted. If the Subnet used by an instance is not contained in the allowed list of Subnets for an AWSNodeTemplate, Karpenter will also consider it drifted. + * Since Karpenter uses tags for discovery of Subnets and SecurityGroups, check the [Threat Model]({{}}) to see how to manage this IAM Permission. + +### Upgrading to `0.28.0`+ + +{{% alert title="Warning" color="warning" %}} +Karpenter `0.28.0` is incompatible with Kubernetes version 1.26+, which can result in additional node scale outs when using `--cloudprovider=external`, which is the default for the EKS Optimized AMI. See: https://github.com/aws/karpenter-core/pull/375. Karpenter `0.28.1`+ fixes this issue and is compatible with Kubernetes version 1.26+. +{{% /alert %}} + +* The `extraObjects` value is now removed from the Helm chart. Having this value in the chart proved to not work in the majority of Karpenter installs and often led to anti-patterns, where the Karpenter resources installed to manage Karpenter's capacity were directly tied to the install of the Karpenter controller deployments. The Karpenter team recommends that, if you want to install Karpenter manifests alongside the Karpenter Helm chart, to do so by creating a separate chart for the manifests, creating a dependency on the controller chart. +* The `aws.nodeNameConvention` setting is now removed from the [`karpenter-global-settings`]({{}}) ConfigMap. Because Karpenter is now driving its orchestration of capacity through Machines, it no longer needs to know the node name, making this setting obsolete. Karpenter ignores configuration that it doesn't recognize in the [`karpenter-global-settings`]({{}}) ConfigMap, so leaving the `aws.nodeNameConvention` in the ConfigMap will simply cause this setting to be ignored. +* Karpenter now defines a set of "restricted tags" which can't be overridden with custom tagging in the AWSNodeTemplate or in the [`karpenter-global-settings`]({{}}) ConfigMap. If you are currently using any of these tag overrides when tagging your instances, webhook validation will now fail. These tags include: + + * `karpenter.sh/managed-by` + * `karpenter.sh/provisioner-name` + * `kubernetes.io/cluster/${CLUSTER_NAME}` + +* The following metrics changed their meaning, based on the introduction of the Machine resource: + * `karpenter_nodes_terminated`: Use `karpenter_machines_terminated` if you are interested in the reason why a Karpenter machine was deleted. `karpenter_nodes_terminated` now only tracks the count of terminated nodes without any additional labels. + * `karpenter_nodes_created`: Use `karpenter_machines_created` if you are interested in the reason why a Karpenter machine was created. `karpenter_nodes_created` now only tracks the count of created nodes without any additional labels. + * `karpenter_deprovisioning_replacement_node_initialized_seconds`: This metric has been replaced in favor of `karpenter_deprovisioning_replacement_machine_initialized_seconds`. +* `0.28.0` introduces the Machine CustomResource into the `karpenter.sh` API Group and requires this CustomResourceDefinition to run properly. Karpenter now orchestrates its CloudProvider capacity through these in-cluster Machine CustomResources. When performing a scheduling decision, Karpenter will create a Machine, resulting in launching CloudProvider capacity. The kubelet running on the new capacity will then register the node to the cluster shortly after launch. + * If you are using Helm to upgrade between versions of Karpenter, note that [Helm does not automate the process of upgrading or install the new CRDs into your cluster](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations). To install or upgrade the existing CRDs, follow the guidance under the [Custom Resource Definition (CRD) Upgrades]({{< relref "#custom-resource-definition-crd-upgrades" >}}) section of the upgrade guide. + * Karpenter will hydrate Machines on startup for existing capacity managed by Karpenter into the cluster. Existing capacity launched by an older version of Karpenter is discovered by finding CloudProvider capacity with the `karpenter.sh/provisioner-name` tag or the `karpenter.sh/provisioner-name` label on nodes. +* The metrics port for the Karpenter deployment was changed from 8080 to 8000. Users who scrape the pod directly for metrics rather than the service will need to adjust the commands they use to reference port 8000. Any users who scrape metrics from the service should be unaffected. + +{{% alert title="Warning" color="warning" %}} +Karpenter creates a mapping between CloudProvider machines and CustomResources in the cluster for capacity tracking. To ensure this mapping is consistent, Karpenter utilizes the following tag keys: + +* `karpenter.sh/managed-by` +* `karpenter.sh/provisioner-name` +* `kubernetes.io/cluster/${CLUSTER_NAME}` + +Because Karpenter takes this dependency, any user that has the ability to Create/Delete these tags on CloudProvider machines will have the ability to orchestrate Karpenter to Create/Delete CloudProvider machines as a side effect. Check the [Threat Model]({{}}) to see how this might affect you, and ways to mitigate this. +{{% /alert %}} + +{{% alert title="Rolling Back" color="warning" %}} +If, after upgrading to `0.28.0`+, a rollback to an older version of Karpenter needs to be performed, Karpenter will continue to function normally, though you will still have the Machine CustomResources on your cluster. You will need to manually delete the Machines and patch out the finalizers to fully complete the rollback. + +Karpenter marks CloudProvider capacity as "managed by" a Machine using the `karpenter-sh/managed-by` tag on the CloudProvider machine. It uses this tag to ensure that the Machine CustomResources in the cluster match the CloudProvider capacity managed by Karpenter. If these states don't match, Karpenter will garbage collect the capacity. Because of this, if performing an upgrade, followed by a rollback, followed by another upgrade to `0.28.0`+, ensure you remove the `karpenter.sh/managed-by` tags from existing capacity; otherwise, Karpenter will deprovision the capacity without a Machine CR counterpart. +{{% /alert %}} + +### Upgrading to `0.27.3`+ + +* The `defaulting.webhook.karpenter.sh` mutating webhook was removed in `0.27.3`. If you are coming from an older version of Karpenter where this webhook existed and the webhook was not managed by Helm, you may need to delete the stale webhook. + +```bash +kubectl delete mutatingwebhookconfigurations defaulting.webhook.karpenter.sh +``` + +### Upgrading to `0.27.0`+ + +* The Karpenter controller pods now deploy with `kubernetes.io/hostname` self anti-affinity by default. If you are running Karpenter in HA (high-availability) mode and you do not have enough nodes to match the number of pod replicas you are deploying with, you will need to scale-out your nodes for Karpenter. +* The following controller metrics changed and moved under the `controller_runtime` metrics namespace: + * `karpenter_metricscraper_...` + * `karpenter_deprovisioning_...` + * `karpenter_provisioner_...` + * `karpenter_interruption_...` +* The following controller metric names changed, affecting the `controller` label value under `controller_runtime_...` metrics. These metrics include: + * `podmetrics` -> `pod_metrics` + * `provisionermetrics` -> `provisioner_metrics` + * `metricscraper` -> `metric_scraper` + * `provisioning` -> `provisioner_trigger` + * `node-state` -> `node_state` + * `pod-state` -> `pod_state` + * `provisioner-state` -> `provisioner_state` +* The `karpenter_allocation_controller_scheduling_duration_seconds` metric name changed to `karpenter_provisioner_scheduling_duration_seconds` + +### Upgrading to `0.26.0`+ + +* The `karpenter.sh/do-not-evict` annotation no longer blocks node termination when running `kubectl delete node`. This annotation on pods will only block automatic deprovisioning that is considered "voluntary," that is, disruptions that can be avoided. Disruptions that Karpenter deems as "involuntary" and will ignore the `karpenter.sh/do-not-evict` annotation include spot interruption and manual deletion of the node. See [Disabling Deprovisioning]({{}}) for more details. +* Default resources `requests` and `limits` are removed from the Karpenter's controller deployment through the Helm chart. If you have not set custom resource `requests` or `limits` in your Helm values and are using Karpenter's defaults, you will now need to set these values in your Helm chart deployment. +* The `controller.image` value in the Helm chart has been broken out to a map consisting of `controller.image.repository`, `controller.image.tag`, and `controller.image.digest`. If manually overriding the `controller.image`, you will need to update your values to the new design. + +### Upgrading to `0.25.0`+ + +* Cluster Endpoint can now be automatically discovered. If you are using Amazon Elastic Kubernetes Service (EKS), you can now omit the `clusterEndpoint` field in your configuration. In order to allow the resolving, you have to add the permission `eks:DescribeCluster` to the Karpenter Controller IAM role. + +### Upgrading to `0.24.0`+ + +* Settings are no longer updated dynamically while Karpenter is running. If you manually make a change to the [`karpenter-global-settings`]({{}}) ConfigMap, you will need to reload the containers by restarting the deployment with `kubectl rollout restart -n karpenter deploy/karpenter` +* Karpenter no longer filters out instance types internally. Previously, `g2` (not supported by the NVIDIA device plugin) and FPGA instance types were filtered. The only way to filter instance types now is to set requirements on your provisioner or pods using well-known node labels described [here]({{}}). If you are currently using overly broad requirements that allows all of the `g` instance-category, you will want to tighten the requirement, or add an instance-generation requirement. +* `aws.tags` in [`karpenter-global-settings`]({{}}) ConfigMap is now a top-level field and expects the value associated with this key to be a JSON object of string to string. This is change from previous versions where keys were given implicitly by providing the key-value pair `aws.tags.: value` in the ConfigMap. + +### Upgrading to `0.22.0`+ + +* Do not upgrade to this version unless you are on Kubernetes >= v1.21. Karpenter no longer supports Kubernetes v1.20, but now supports Kubernetes v1.25. This change is due to the v1 PDB API, which was introduced in K8s v1.20 and subsequent removal of the v1beta1 API in K8s v1.25. + +### Upgrading to `0.20.0`+ + +* Prior to `0.20.0`, Karpenter would prioritize certain instance type categories absent of any requirements in the Provisioner. `0.20.0`+ removes prioritizing these instance type categories ("m", "c", "r", "a", "t", "i") in code. Bare Metal and GPU instance types are still deprioritized and only used if no other instance types are compatible with the node requirements. Since Karpenter does not prioritize any instance types, if you do not want exotic instance types and are not using the runtime Provisioner defaults, you will need to specify this in the Provisioner. + +### Upgrading to `0.19.0`+ + +* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the Helm chart. If your Karpenter installation (Helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes. +* Karpenter now supports native interruption handling. If you were previously using Node Termination Handler for spot interruption handling and health events, you will need to remove the component from your cluster before enabling `aws.interruptionQueueName`. For more details on Karpenter's interruption handling, see the [Interruption Handling Docs]({{< ref "../concepts/disruption/#interruption" >}}). +* Instance category defaults are now explicitly persisted in the Provisioner, rather than handled implicitly in memory. By default, Provisioners will limit instance category to c,m,r. If any instance type constraints are applied, it will override this default. If you have created Provisioners in the past with unconstrained instance type, family, or category, Karpenter will now more flexibly use instance types than before. If you would like to apply these constraints, they must be included in the Provisioner CRD. +* Karpenter CRD raw YAML URLs have migrated from `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/charts/karpenter/crds/...` to `https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.19.3/pkg/apis/crds/...`. If you reference static Karpenter CRDs or rely on `kubectl replace -f` to apply these CRDs from their remote location, you will need to migrate to the new location. +* Pods without an ownerRef (also called "controllerless" or "naked" pods) will now be evicted by default during node termination and consolidation. Users can prevent controllerless pods from being voluntarily disrupted by applying the `karpenter.sh/do-not-evict: "true"` annotation to the pods in question. +* The following CLI options/environment variables are now removed and replaced in favor of pulling settings dynamically from the [`karpenter-global-settings`]({{}}) ConfigMap. See the [Settings docs]({{}}) for more details on configuring the new values in the ConfigMap. + + * `CLUSTER_NAME` -> `settings.aws.clusterName` + * `CLUSTER_ENDPOINT` -> `settings.aws.clusterEndpoint` + * `AWS_DEFAULT_INSTANCE_PROFILE` -> `settings.aws.defaultInstanceProfile` + * `AWS_ENABLE_POD_ENI` -> `settings.aws.enablePodENI` + * `AWS_ENI_LIMITED_POD_DENSITY` -> `settings.aws.enableENILimitedPodDensity` + * `AWS_ISOLATED_VPC` -> `settings.aws.isolatedVPC` + * `AWS_NODE_NAME_CONVENTION` -> `settings.aws.nodeNameConvention` + * `VM_MEMORY_OVERHEAD` -> `settings.aws.vmMemoryOverheadPercent` + +### Upgrading to `0.18.0`+ + +* `0.18.0` removes the `karpenter_consolidation_nodes_created` and `karpenter_consolidation_nodes_terminated` prometheus metrics in favor of the more generic `karpenter_nodes_created` and `karpenter_nodes_terminated` metrics. You can still see nodes created and terminated by consolidation by checking the `reason` label on the metrics. Check out all the metrics published by Karpenter [here]({{}}). + +### Upgrading to `0.17.0`+ + +Karpenter's Helm chart package is now stored in [Karpenter's OCI (Open Container Initiative) registry](https://gallery.ecr.aws/karpenter/karpenter). The Helm CLI supports the new format since [v3.8.0+](https://helm.sh/docs/topics/registries/). +With this change [charts.karpenter.sh](https://charts.karpenter.sh/) is no longer updated but preserved to allow using older Karpenter versions. For examples on working with the Karpenter Helm charts look at [Install Karpenter Helm Chart]({{< ref "../getting-started/getting-started-with-karpenter/#install-karpenter-helm-chart" >}}). + +Users who have scripted the installation or upgrading of Karpenter need to adjust their scripts with the following changes: +1. There is no longer a need to add the Karpenter Helm repo with `helm repo add` +2. The full URL of the Helm chart needs to be present when using the `helm` CLI +3. If you were not prepending a `v` to the version (i.e. `0.17.0`), you will need to do so with the OCI chart (i.e `v0.17.0`). + +### Upgrading to `0.16.2`+ + +* `0.16.2` adds new kubeletConfiguration fields to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +```bash +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.2/charts/karpenter/crds/karpenter.sh_provisioners.yaml +``` + +### Upgrading to `0.16.0`+ + +* `0.16.0` adds a new weight field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +```bash +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.16.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml +``` + +### Upgrading to `0.15.0`+ + +* `0.15.0` adds a new consolidation field to the `provisioners.karpenter.sh` v1alpha5 CRD. The CRD will need to be updated to use the new parameters: +```bash +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.15.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml +``` + +### Upgrading to `0.14.0`+ + +* `0.14.0` adds new fields to the `provisioners.karpenter.sh` v1alpha5 and `awsnodetemplates.karpenter.k8s.aws` v1alpha1 CRDs. The CRDs will need to be updated to use the new parameters: + +```bash +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml + +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.14.0/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml +``` + +* `0.14.0` changes the way Karpenter discovers its dynamically generated AWS launch templates to use a tag rather than a Name scheme. The previous name scheme was `Karpenter-${CLUSTER_NAME}-*` which could collide with user created launch templates that Karpenter should not manage. The new scheme uses a tag on the launch template `karpenter.k8s.aws/cluster: ${CLUSTER_NAME}`. As a result, Karpenter will not clean-up dynamically generated launch templates using the old name scheme. You can manually clean these up with the following commands: + +```bash +## Find launch templates that match the naming pattern and you do not want to keep +aws ec2 describe-launch-templates --filters="Name=launch-template-name,Values=Karpenter-${CLUSTER_NAME}-*" + +## Delete launch template(s) that match the name but do not have the "karpenter.k8s.aws/cluster" tag +aws ec2 delete-launch-template --launch-template-id +``` + +* `0.14.0` introduces additional instance type filtering if there are no `node.kubernetes.io/instance-type` or `karpenter.k8s.aws/instance-family` or `karpenter.k8s.aws/instance-category` requirements that restrict instance types specified on the provisioner. This prevents Karpenter from launching bare metal and some older non-current generation instance types unless the provisioner has been explicitly configured to allow them. If you specify an instance type or family requirement that supplies a list of instance-types or families, that list will be used regardless of filtering. The filtering can also be completely eliminated by adding an `Exists` requirement for instance type or family. +```yaml + - key: node.kubernetes.io/instance-type + operator: Exists +``` + +* `0.14.0` introduces support for custom AMIs without the need for an entire launch template. You must add the `ec2:DescribeImages` permission to the Karpenter Controller Role for this feature to work. This permission is needed for Karpenter to discover custom images specified. Read the [Custom AMI documentation here]({{}}) to get started +* `0.14.0` adds an an additional default toleration (CriticalAddonOnly=Exists) to the Karpenter Helm chart. This may cause Karpenter to run on nodes with that use this Taint which previously would not have been schedulable. This can be overridden by using `--set tolerations[0]=null`. + +* `0.14.0` deprecates the `AWS_ENI_LIMITED_POD_DENSITY` environment variable in-favor of specifying `spec.kubeletConfiguration.maxPods` on the Provisioner. `AWS_ENI_LIMITED_POD_DENSITY` will continue to work when `maxPods` is not set on the Provisioner. If `maxPods` is set, it will override `AWS_ENI_LIMITED_POD_DENSITY` on that specific Provisioner. + +### Upgrading to `0.13.0`+ + +* `0.13.0` introduces a new CRD named `AWSNodeTemplate` which can be used to specify AWS Cloud Provider parameters. Everything that was previously specified under `spec.provider` in the Provisioner resource, can now be specified in the spec of the new resource. The use of `spec.provider` is deprecated but will continue to function to maintain backwards compatibility for the current API version (v1alpha5) of the Provisioner resource. `0.13.0` also introduces support for custom user data that doesn't require the use of a custom launch template. The user data can be specified in-line in the AWSNodeTemplate resource. + + If you are upgrading from `0.10.1` - `0.11.1`, a new CRD `awsnodetemplate` was added. In `0.12.0`, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: + 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. + 2. `kubectl delete crd awsnodetemplate` + 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.13.2/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` + 4. Perform the Karpenter upgrade to `0.13.0`+, which will install the new `awsnodetemplates` CRD. + 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. +* `0.13.0` also adds EC2/spot price fetching to Karpenter to allow making more accurate decisions regarding node deployments. Our [getting started guide]({{< ref "../getting-started/getting-started-with-karpenter" >}}) documents this, but if you are upgrading Karpenter you will need to modify your Karpenter controller policy to add the `pricing:GetProducts` and `ec2:DescribeSpotPriceHistory` permissions. + +### Upgrading to `0.12.0`+ + +* `0.12.0` adds an OwnerReference to each Node created by a provisioner. Previously, deleting a provisioner would orphan nodes. Now, deleting a provisioner will cause Kubernetes [cascading delete](https://kubernetes.io/docs/concepts/architecture/garbage-collection/#cascading-deletion) logic to gracefully terminate the nodes using the Karpenter node finalizer. You may still orphan nodes by removing the owner reference. +* If you are upgrading from `0.10.1` - `0.11.1`, a new CRD `awsnodetemplate` was added. In `0.12.0`, this crd was renamed to `awsnodetemplates`. Since Helm does not manage the lifecycle of CRDs, you will need to perform a few manual steps for this CRD upgrade: + 1. Make sure any `awsnodetemplate` manifests are saved somewhere so that they can be reapplied to the cluster. + 2. `kubectl delete crd awsnodetemplate` + 3. `kubectl apply -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.12.1/charts/karpenter/crds/karpenter.k8s.aws_awsnodetemplates.yaml` + 4. Perform the Karpenter upgrade to `0.12.0`+, which will install the new `awsnodetemplates` CRD. + 5. Reapply the `awsnodetemplate` manifests you saved from step 1, if applicable. + +### Upgrading to `0.11.0`+ + +`0.11.0` changes the way that the `vpc.amazonaws.com/pod-eni` resource is reported. Instead of being reported for all nodes that could support the resources regardless of if the cluster is configured to support it, it is now controlled by a command line flag or environment variable. The parameter defaults to false and must be set if your cluster uses [security groups for pods](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). This can be enabled by setting the environment variable `AWS_ENABLE_POD_ENI` to true via the helm value `controller.env`. + +Other extended resources must be registered on nodes by their respective device plugins which are typically installed as DaemonSets (e.g. the `nvidia.com/gpu` resource will be registered by the [NVIDIA device plugin](https://github.com/NVIDIA/k8s-device-plugin). Previously, Karpenter would register these resources on nodes at creation and they would be zeroed out by `kubelet` at startup. By allowing the device plugins to register the resources, pods will not bind to the nodes before any device plugin initialization has occurred. + +`0.11.0` adds a `providerRef` field in the Provisioner CRD. To use this new field you will need to replace the Provisioner CRD manually: + +```shell +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.11.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml +``` + +### Upgrading to `0.10.0`+ + +`0.10.0` adds a new field, `startupTaints` to the provisioner spec. Standard Helm upgrades [do not upgrade CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) so the field will not be available unless the CRD is manually updated. This can be performed prior to the standard upgrade by applying the new CRD manually: + +```shell +kubectl replace -f https://raw.githubusercontent.com/aws/karpenter-provider-aws/v0.10.0/charts/karpenter/crds/karpenter.sh_provisioners.yaml +``` + +📝 If you don't perform this manual CRD update, Karpenter will work correctly except for rejecting the creation/update of provisioners that use `startupTaints`. + +### Upgrading to `0.6.2`+ + +If using Helm, the variable names have changed for the cluster's name and endpoint. You may need to update any configuration +that sets the old variable names. + +- `controller.clusterName` is now `clusterName` +- `controller.clusterEndpoint` is now `clusterEndpoint` diff --git a/website/hugo.yaml b/website/hugo.yaml index fd4ccfa685f6..ff34053f90bd 100644 --- a/website/hugo.yaml +++ b/website/hugo.yaml @@ -76,13 +76,13 @@ params: url: "https://slack.k8s.io/" icon: fab fa-slack desc: "Chat with us on Slack in the #aws-provider channel" - latest_release_version: v0.34 + latest_release_version: v0.35.0 latest_k8s_version: 1.29 versions: + - v0.35 - v0.34 - v0.33 - v0.32 - - v0.31 - preview menu: main: