diff --git a/packages/@aws-cdk-testing/framework-integ/package.json b/packages/@aws-cdk-testing/framework-integ/package.json index 4149ca228a596..5aa0e504c9674 100644 --- a/packages/@aws-cdk-testing/framework-integ/package.json +++ b/packages/@aws-cdk-testing/framework-integ/package.json @@ -35,8 +35,8 @@ "@aws-sdk/client-acm": "3.632.0", "@aws-sdk/client-rds": "3.632.0", "@aws-sdk/client-s3": "3.632.0", - "delay": "5.0.0", - "axios": "1.7.8" + "axios": "1.7.8", + "delay": "5.0.0" }, "dependencies": { "@aws-cdk/integ-tests-alpha": "0.0.0", @@ -44,6 +44,7 @@ "@aws-cdk/lambda-layer-kubectl-v29": "^2.1.0", "@aws-cdk/lambda-layer-kubectl-v30": "^2.0.1", "@aws-cdk/lambda-layer-kubectl-v31": "^2.0.0", + "@aws-cdk/lambda-layer-kubectl-v32": "^2.0.1", "aws-cdk-lib": "0.0.0", "cdk8s": "2.69.10", "cdk8s-plus-27": "2.9.5", diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ-tests-kubernetes-version.ts b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ-tests-kubernetes-version.ts index c9f1798841144..d75366f20bf3a 100644 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ-tests-kubernetes-version.ts +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ-tests-kubernetes-version.ts @@ -3,6 +3,7 @@ import { KubectlV24Layer } from '@aws-cdk/lambda-layer-kubectl-v24'; import { KubectlV29Layer } from '@aws-cdk/lambda-layer-kubectl-v29'; import { KubectlV30Layer } from '@aws-cdk/lambda-layer-kubectl-v30'; import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; import { Construct } from 'constructs'; import * as eks from 'aws-cdk-lib/aws-eks'; @@ -15,6 +16,7 @@ const versionMap: { [key: string]: new (scope: Construct, id: string) => lambda. '1.29': KubectlV29Layer, '1.30': KubectlV30Layer, '1.31': KubectlV31Layer, + '1.32': KubectlV32Layer, }; export function getClusterVersionConfig(scope: Construct, version?: eks.KubernetesVersion) { diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip new file mode 100644 index 0000000000000..e60e31834daf5 Binary files /dev/null and b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip differ diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.93d96d34e0d3cd20eb082652b91012b131bdc34fcf2bc16eb4170e04772fddb1.zip b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.93d96d34e0d3cd20eb082652b91012b131bdc34fcf2bc16eb4170e04772fddb1.zip index 270a85f97b1e6..22515aed82d4a 100644 Binary files a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.93d96d34e0d3cd20eb082652b91012b131bdc34fcf2bc16eb4170e04772fddb1.zip and b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.93d96d34e0d3cd20eb082652b91012b131bdc34fcf2bc16eb4170e04772fddb1.zip differ diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.e42a736be21cd3134b9bff4e71e3afa99a4cc900ae489e9a7f7025c8d258f9b8.zip b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.e42a736be21cd3134b9bff4e71e3afa99a4cc900ae489e9a7f7025c8d258f9b8.zip index 33cd69e8824d5..d28ed8e86969d 100644 Binary files a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.e42a736be21cd3134b9bff4e71e3afa99a4cc900ae489e9a7f7025c8d258f9b8.zip and b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.e42a736be21cd3134b9bff4e71e3afa99a4cc900ae489e9a7f7025c8d258f9b8.zip differ diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip deleted file mode 100644 index 8ffd37a845e12..0000000000000 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/asset.f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:82d8de41d1c1ca9433b70fcf54f41e86c430290640dbec5a50aa46d39bd7b512 -size 34163012 diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.assets.json b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.assets.json index 0ca66c6023f10..48a6142d8f1b5 100644 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.assets.json +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.assets.json @@ -1,15 +1,15 @@ { "version": "39.0.0", "files": { - "f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b": { + "2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e": { "source": { - "path": "asset.f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip", + "path": "asset.2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip", "packaging": "file" }, "destinations": { "current_account-current_region": { "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", - "objectKey": "f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip", + "objectKey": "2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip", "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" } } @@ -105,7 +105,7 @@ } } }, - "699eb0ac5c33dfc382be599bcfdba1fd0c5e46571eb0a58b842b2c9fd42f289e": { + "19eaa6604b4fff55db09199c550f47c7110aec2e5352d8ac56b8292d9695cf13": { "source": { "path": "aws-cdk-eks-cluster-al2023-nodegroup-test.template.json", "packaging": "file" @@ -113,7 +113,7 @@ "destinations": { "current_account-current_region": { "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", - "objectKey": "699eb0ac5c33dfc382be599bcfdba1fd0c5e46571eb0a58b842b2c9fd42f289e.json", + "objectKey": "19eaa6604b4fff55db09199c550f47c7110aec2e5352d8ac56b8292d9695cf13.json", "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" } } diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.template.json b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.template.json index d7d2bbec5fc4e..746eeb1991a51 100644 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.template.json +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/aws-cdk-eks-cluster-al2023-nodegroup-test.template.json @@ -434,9 +434,9 @@ "S3Bucket": { "Fn::Sub": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}" }, - "S3Key": "f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip" + "S3Key": "2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip" }, - "Description": "/opt/kubectl/kubectl 1.31.0; /opt/helm/helm 3.16.1", + "Description": "/opt/kubectl/kubectl 1.32.0; /opt/helm/helm 3.17.0", "LicenseInfo": "Apache-2.0" } }, @@ -755,7 +755,7 @@ ] }, "Config": { - "version": "1.31", + "version": "1.32", "roleArn": { "Fn::GetAtt": [ "ClusterRoleFA261979", diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/manifest.json b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/manifest.json index de9a37d7b8a75..1c59b80ce84c1 100644 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/manifest.json +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/manifest.json @@ -18,7 +18,7 @@ "validateOnSynth": false, "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-deploy-role-${AWS::AccountId}-${AWS::Region}", "cloudFormationExecutionRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-cfn-exec-role-${AWS::AccountId}-${AWS::Region}", - "stackTemplateAssetObjectUrl": "s3://cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}/699eb0ac5c33dfc382be599bcfdba1fd0c5e46571eb0a58b842b2c9fd42f289e.json", + "stackTemplateAssetObjectUrl": "s3://cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}/19eaa6604b4fff55db09199c550f47c7110aec2e5352d8ac56b8292d9695cf13.json", "requiresBootstrapStackVersion": 6, "bootstrapStackVersionSsmParameter": "/cdk-bootstrap/hnb659fds/version", "additionalDependencies": [ @@ -321,7 +321,10 @@ "/aws-cdk-eks-cluster-al2023-nodegroup-test/KubectlLayer/Resource": [ { "type": "aws:cdk:logicalId", - "data": "KubectlLayer600207B5" + "data": "KubectlLayer600207B5", + "trace": [ + "!!DESTRUCTIVE_CHANGES: WILL_REPLACE" + ] } ], "/aws-cdk-eks-cluster-al2023-nodegroup-test/Cluster": [ diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/tree.json b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/tree.json index 56f02ae639131..5591bd2806224 100644 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/tree.json +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.js.snapshot/tree.json @@ -844,9 +844,9 @@ "s3Bucket": { "Fn::Sub": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}" }, - "s3Key": "f3c812b299b0759c937b41e39d3451f5cc61279c2ec9ee791fac08ba1e56508b.zip" + "s3Key": "2e670e0c40dc05a34d602c35c948edefcb81afaeea05b9f6240341173af6164e.zip" }, - "description": "/opt/kubectl/kubectl 1.31.0; /opt/helm/helm 3.16.1", + "description": "/opt/kubectl/kubectl 1.32.0; /opt/helm/helm 3.17.0", "licenseInfo": "Apache-2.0" } }, @@ -857,8 +857,8 @@ } }, "constructInfo": { - "fqn": "@aws-cdk/lambda-layer-kubectl-v31.KubectlV31Layer", - "version": "2.0.0", + "fqn": "@aws-cdk/lambda-layer-kubectl-v32.KubectlV32Layer", + "version": "2.0.1", "metadata": [ "*" ] diff --git a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.ts b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.ts index b99eb0f31cdda..a6808d4715343 100644 --- a/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.ts +++ b/packages/@aws-cdk-testing/framework-integ/test/aws-eks/test/integ.eks-al2023-nodegroup.ts @@ -27,7 +27,7 @@ class EksClusterStack extends Stack { vpc: this.vpc, mastersRole, defaultCapacity: 0, - ...getClusterVersionConfig(this, eks.KubernetesVersion.V1_31), + ...getClusterVersionConfig(this, eks.KubernetesVersion.V1_32), }); // create nodegroup with AL2023_X86_64_STANDARD diff --git a/packages/aws-cdk-lib/aws-eks/README.md b/packages/aws-cdk-lib/aws-eks/README.md index a3cd982781db6..3f2fde1b06260 100644 --- a/packages/aws-cdk-lib/aws-eks/README.md +++ b/packages/aws-cdk-lib/aws-eks/README.md @@ -68,12 +68,12 @@ This example defines an Amazon EKS cluster with the following configuration: * A Kubernetes pod with a container based on the [paulbouwer/hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes) image. ```ts -import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; // provisioning a cluster const cluster = new eks.Cluster(this, 'hello-eks', { - version: eks.KubernetesVersion.V1_31, - kubectlLayer: new KubectlV31Layer(this, 'kubectl'), + version: eks.KubernetesVersion.V1_32, + kubectlLayer: new KubectlV32Layer(this, 'kubectl'), }); // apply a kubernetes manifest to the cluster @@ -139,7 +139,7 @@ Creating a new cluster is done using the `Cluster` or `FargateCluster` construct ```ts new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); ``` @@ -147,7 +147,7 @@ You can also use `FargateCluster` to provision a cluster that uses only fargate ```ts new eks.FargateCluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); ``` @@ -159,7 +159,7 @@ Capacity is the amount and the type of worker nodes that are available to the cl ### Managed node groups Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. -With Amazon EKS managed node groups, you don’t need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available. +With Amazon EKS managed node groups, you don't need to separately provision or register the Amazon EC2 instances that provide compute capacity to run your Kubernetes applications. You can create, update, or terminate nodes for your cluster with a single operation. Nodes run using the latest Amazon EKS optimized AMIs in your AWS account while node updates and terminations gracefully drain nodes to ensure that your applications stay available. > For more details visit [Amazon EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). @@ -171,7 +171,7 @@ At cluster instantiation time, you can customize the number of instances and the ```ts new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, defaultCapacity: 5, defaultCapacityInstance: ec2.InstanceType.of(ec2.InstanceClass.M5, ec2.InstanceSize.SMALL), }); @@ -183,7 +183,7 @@ Additional customizations are available post instantiation. To apply them, set t ```ts const cluster = new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, defaultCapacity: 0, }); @@ -284,7 +284,7 @@ const eksClusterNodeGroupRole = new iam.Role(this, 'eksClusterNodeGroupRole', { }); const cluster = new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, defaultCapacity: 0, }); @@ -399,7 +399,7 @@ successful replacement. Consider this example if you are renaming the cluster fr ```ts const cluster = new eks.Cluster(this, 'cluster-to-rename', { clusterName: 'foo', // rename this to 'bar' - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); // allow the cluster admin role to delete the cluster 'foo' @@ -453,7 +453,7 @@ The following code defines an Amazon EKS cluster with a default Fargate Profile ```ts const cluster = new eks.FargateCluster(this, 'MyCluster', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); ``` @@ -530,7 +530,7 @@ You can also configure the cluster to use an auto-scaling group as the default c ```ts const cluster = new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, defaultCapacityType: eks.DefaultCapacityType.EC2, }); ``` @@ -634,7 +634,7 @@ You can configure the [cluster endpoint access](https://docs.aws.amazon.com/eks/ ```ts const cluster = new eks.Cluster(this, 'hello-eks', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, endpointAccess: eks.EndpointAccess.PRIVATE, // No access outside of your VPC. }); ``` @@ -656,7 +656,7 @@ To deploy the controller on your EKS cluster, configure the `albController` prop ```ts new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, albController: { version: eks.AlbControllerVersion.V2_8_2, }, @@ -699,7 +699,7 @@ You can specify the VPC of the cluster using the `vpc` and `vpcSubnets` properti declare const vpc: ec2.Vpc; new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, vpc, vpcSubnets: [{ subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS }], }); @@ -746,7 +746,7 @@ You can configure the environment of the Cluster Handler functions by specifying ```ts declare const proxyInstanceSecurityGroup: ec2.SecurityGroup; const cluster = new eks.Cluster(this, 'hello-eks', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, clusterHandlerEnvironment: { https_proxy: 'http://proxy.myproxy.com', }, @@ -788,7 +788,7 @@ for (let subnet of subnets) { } const cluster = new eks.Cluster(this, 'hello-eks', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, vpc: vpc, ipFamily: eks.IpFamily.IP_V6, vpcSubnets: [{ subnets: vpc.publicSubnets }], @@ -823,7 +823,7 @@ You can configure the environment of this function by specifying it at cluster i ```ts const cluster = new eks.Cluster(this, 'hello-eks', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, kubectlEnvironment: { 'http_proxy': 'http://proxy.myproxy.com', }, @@ -843,11 +843,11 @@ Depending on which version of kubernetes you're targeting, you will need to use the `@aws-cdk/lambda-layer-kubectl-vXY` packages. ```ts -import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; const cluster = new eks.Cluster(this, 'hello-eks', { - version: eks.KubernetesVersion.V1_31, - kubectlLayer: new KubectlV31Layer(this, 'kubectl'), + version: eks.KubernetesVersion.V1_32, + kubectlLayer: new KubectlV32Layer(this, 'kubectl'), }); ``` @@ -882,7 +882,7 @@ const cluster1 = new eks.Cluster(this, 'MyCluster', { kubectlLayer: layer, vpc, clusterName: 'cluster-name', - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); // or @@ -900,7 +900,7 @@ By default, the kubectl provider is configured with 1024MiB of memory. You can u ```ts new eks.Cluster(this, 'MyCluster', { kubectlMemory: Size.gibibytes(4), - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); // or @@ -939,7 +939,7 @@ When you create a cluster, you can specify a `mastersRole`. The `Cluster` constr ```ts declare const role: iam.Role; new eks.Cluster(this, 'HelloEKS', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, mastersRole: role, }); ``` @@ -989,7 +989,7 @@ You can use the `secretsEncryptionKey` to configure which key the cluster will u const secretsKey = new kms.Key(this, 'SecretsKey'); const cluster = new eks.Cluster(this, 'MyCluster', { secretsEncryptionKey: secretsKey, - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); ``` @@ -999,7 +999,7 @@ You can also use a similar configuration for running a cluster built using the F const secretsKey = new kms.Key(this, 'SecretsKey'); const cluster = new eks.FargateCluster(this, 'MyFargateCluster', { secretsEncryptionKey: secretsKey, - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, }); ``` @@ -1043,7 +1043,7 @@ To access the Kubernetes resources from the console, make sure your viewing prin in the `aws-auth` ConfigMap. Some options to consider: ```ts -import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; declare const cluster: eks.Cluster; declare const your_current_role: iam.Role; declare const vpc: ec2.Vpc; @@ -1063,7 +1063,7 @@ your_current_role.addToPolicy(new iam.PolicyStatement({ ```ts // Option 2: create your custom mastersRole with scoped assumeBy arn as the Cluster prop. Switch to this role from the AWS console. -import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; declare const vpc: ec2.Vpc; @@ -1073,8 +1073,8 @@ const mastersRole = new iam.Role(this, 'MastersRole', { const cluster = new eks.Cluster(this, 'EksCluster', { vpc, - version: eks.KubernetesVersion.V1_31, - kubectlLayer: new KubectlV31Layer(this, 'KubectlLayer'), + version: eks.KubernetesVersion.V1_32, + kubectlLayer: new KubectlV32Layer(this, 'KubectlLayer'), mastersRole, }); @@ -1118,13 +1118,13 @@ AWS IAM principals from both Amazon EKS access entry APIs and the aws-auth confi To specify the `authenticationMode`: ```ts -import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; declare const vpc: ec2.Vpc; new eks.Cluster(this, 'Cluster', { vpc, - version: eks.KubernetesVersion.V1_31, - kubectlLayer: new KubectlV31Layer(this, 'KubectlLayer'), + version: eks.KubernetesVersion.V1_32, + kubectlLayer: new KubectlV32Layer(this, 'KubectlLayer'), authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP, }); ``` @@ -1169,7 +1169,7 @@ eks.AccessPolicy.fromAccessPolicyName('AmazonEKSAdminPolicy', { Use `grantAccess()` to grant the AccessPolicy to an IAM principal: ```ts -import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { KubectlV32Layer } from '@aws-cdk/lambda-layer-kubectl-v32'; declare const vpc: ec2.Vpc; const clusterAdminRole = new iam.Role(this, 'ClusterAdminRole', { @@ -1187,8 +1187,8 @@ const eksAdminViewRole = new iam.Role(this, 'EKSAdminViewRole', { const cluster = new eks.Cluster(this, 'Cluster', { vpc, mastersRole: clusterAdminRole, - version: eks.KubernetesVersion.V1_31, - kubectlLayer: new KubectlV31Layer(this, 'KubectlLayer'), + version: eks.KubernetesVersion.V1_32, + kubectlLayer: new KubectlV32Layer(this, 'KubectlLayer'), authenticationMode: eks.AuthenticationMode.API_AND_CONFIG_MAP, }); @@ -1521,7 +1521,7 @@ when a cluster is defined: ```ts new eks.Cluster(this, 'MyCluster', { - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, prune: false, }); ``` @@ -1924,7 +1924,7 @@ property. For example: ```ts const cluster = new eks.Cluster(this, 'Cluster', { // ... - version: eks.KubernetesVersion.V1_31, + version: eks.KubernetesVersion.V1_32, clusterLogging: [ eks.ClusterLoggingTypes.API, eks.ClusterLoggingTypes.AUTHENTICATOR, diff --git a/packages/aws-cdk-lib/aws-eks/lib/cluster.ts b/packages/aws-cdk-lib/aws-eks/lib/cluster.ts index c79999c6360c4..b61ebc4bc21e0 100644 --- a/packages/aws-cdk-lib/aws-eks/lib/cluster.ts +++ b/packages/aws-cdk-lib/aws-eks/lib/cluster.ts @@ -999,6 +999,15 @@ export class KubernetesVersion { */ public static readonly V1_31 = KubernetesVersion.of('1.31'); + /** + * Kubernetes version 1.32 + * + * When creating a `Cluster` with this version, you need to also specify the + * `kubectlLayer` property with a `KubectlV32Layer` from + * `@aws-cdk/lambda-layer-kubectl-v32`. + */ + public static readonly V1_32 = KubernetesVersion.of('1.32'); + /** * Custom cluster version * @param version custom version number diff --git a/packages/aws-cdk-lib/aws-eks/test/access-entry.test.ts b/packages/aws-cdk-lib/aws-eks/test/access-entry.test.ts index 0ae34b01d511b..329cfa1726aa8 100644 --- a/packages/aws-cdk-lib/aws-eks/test/access-entry.test.ts +++ b/packages/aws-cdk-lib/aws-eks/test/access-entry.test.ts @@ -16,7 +16,7 @@ describe('AccessEntry', () => { app = new App(); stack = new Stack(app, 'test-stack'); cluster = new Cluster(stack, 'Cluster', { - version: KubernetesVersion.V1_29, + version: KubernetesVersion.V1_32, authenticationMode: AuthenticationMode.API, }); diff --git a/packages/aws-cdk-lib/aws-eks/test/addon.test.ts b/packages/aws-cdk-lib/aws-eks/test/addon.test.ts index 249d584b5b0c4..2edb20a3c1e22 100644 --- a/packages/aws-cdk-lib/aws-eks/test/addon.test.ts +++ b/packages/aws-cdk-lib/aws-eks/test/addon.test.ts @@ -11,7 +11,7 @@ describe('Addon', () => { app = new App(); stack = new Stack(app, 'Stack'); cluster = new Cluster(stack, 'Cluster', { - version: KubernetesVersion.V1_30, + version: KubernetesVersion.V1_32, }); }); diff --git a/packages/aws-cdk-lib/package.json b/packages/aws-cdk-lib/package.json index 1a774175f1ed7..96c34529e8629 100644 --- a/packages/aws-cdk-lib/package.json +++ b/packages/aws-cdk-lib/package.json @@ -522,7 +522,7 @@ }, "jsiiRosetta": { "exampleDependencies": { - "@aws-cdk/lambda-layer-kubectl-v31": "^2.0.0", + "@aws-cdk/lambda-layer-kubectl-v32": "^2.0.0", "cdk8s-plus-25": "^2.7.0", "@aws-cdk/aws-kinesisfirehose-alpha": "*", "@aws-cdk/aws-kinesisfirehose-destinations-alpha": "*" diff --git a/yarn.lock b/yarn.lock index 462a2f11e2c8f..daaeffbc4fbaa 100644 --- a/yarn.lock +++ b/yarn.lock @@ -114,6 +114,11 @@ resolved "https://registry.npmjs.org/@aws-cdk/lambda-layer-kubectl-v31/-/lambda-layer-kubectl-v31-2.0.0.tgz#d87799d7d0d5dad77af45281a36942e4b7996b6b" integrity sha512-8JI0sMDbqCubOyt1TbQFEwicYok9KYSrNSfzREgjGJcoPy17/Kd0gbe44ATyLMfjae7dExUhhwKMhr6GK7Hmrw== +"@aws-cdk/lambda-layer-kubectl-v32@^2.0.1": + version "2.0.1" + resolved "https://registry.npmjs.org/@aws-cdk/lambda-layer-kubectl-v32/-/lambda-layer-kubectl-v32-2.0.1.tgz#8f77f30f16161fc07f1e82688ae7fa618c9a6d4e" + integrity sha512-kysjeU/A5axsSLYhOzrIZgzZoCY4z9R2cYo/l6s7QA8mKbes3plmOGBQCRSCVQN2MhnXNk/a0kZOEfDUpLQung== + "@aws-cdk/service-spec-importers@^0.0.64": version "0.0.64" resolved "https://registry.npmjs.org/@aws-cdk/service-spec-importers/-/service-spec-importers-0.0.64.tgz#8e69644f627c838f8a6523fc6b5e63069fbd6656"