diff --git a/.github/workflows/e2e-matrix.yaml b/.github/workflows/e2e-matrix.yaml index a5c633fae5f0..a82e5cfa2b2f 100644 --- a/.github/workflows/e2e-matrix.yaml +++ b/.github/workflows/e2e-matrix.yaml @@ -49,7 +49,7 @@ jobs: strategy: fail-fast: false matrix: - suite: [Beta/Integration, Beta/Drift, Beta/Consolidation, Alpha/Integration, Alpha/Machine, Alpha/Consolidation, Alpha/Utilization, Alpha/Interruption, Alpha/Drift, Alpha/Expiration, Alpha/Chaos, Alpha/IPv6] + suite: [Beta/Integration, Beta/Drift, Beta/Consolidation, Beta/NodeClaim, Alpha/Integration, Alpha/Machine, Alpha/Consolidation, Alpha/Utilization, Alpha/Interruption, Alpha/Drift, Alpha/Expiration, Alpha/Chaos, Alpha/IPv6] uses: ./.github/workflows/e2e.yaml with: suite: ${{ matrix.suite }} diff --git a/test/suites/beta/nodeclaim/garbage_collection_test.go b/test/suites/beta/nodeclaim/garbage_collection_test.go new file mode 100644 index 000000000000..703b4edd505b --- /dev/null +++ b/test/suites/beta/nodeclaim/garbage_collection_test.go @@ -0,0 +1,149 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeclaim_test + +import ( + "encoding/base64" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + v1 "k8s.io/api/core/v1" + + corev1beta1 "github.com/aws/karpenter-core/pkg/apis/v1beta1" + coretest "github.com/aws/karpenter-core/pkg/test" + "github.com/aws/karpenter/pkg/apis/settings" + awserrors "github.com/aws/karpenter/pkg/errors" + "github.com/aws/karpenter/pkg/utils" + environmentaws "github.com/aws/karpenter/test/pkg/environment/aws" +) + +var _ = Describe("NodeClaimGarbageCollection", func() { + var customAMI string + var instanceInput *ec2.RunInstancesInput + + BeforeEach(func() { + securityGroups := env.GetSecurityGroups(map[string]string{"karpenter.sh/discovery": env.ClusterName}) + subnets := env.GetSubnetNameAndIds(map[string]string{"karpenter.sh/discovery": env.ClusterName}) + Expect(securityGroups).ToNot(HaveLen(0)) + Expect(subnets).ToNot(HaveLen(0)) + + customAMI = env.GetCustomAMI("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", 1) + instanceInput = &ec2.RunInstancesInput{ + InstanceType: aws.String("c5.large"), + IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ + Name: aws.String(settings.FromContext(env.Context).DefaultInstanceProfile), + }, + SecurityGroupIds: lo.Map(securityGroups, func(s environmentaws.SecurityGroup, _ int) *string { + return s.GroupIdentifier.GroupId + }), + SubnetId: aws.String(subnets[0].ID), + BlockDeviceMappings: []*ec2.BlockDeviceMapping{ + { + DeviceName: aws.String("/dev/xvda"), + Ebs: &ec2.EbsBlockDevice{ + Encrypted: aws.Bool(true), + DeleteOnTermination: aws.Bool(true), + VolumeType: aws.String(ec2.VolumeTypeGp3), + VolumeSize: aws.Int64(20), + }, + }, + }, + ImageId: aws.String(customAMI), // EKS AL2-based AMI + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String(ec2.ResourceTypeInstance), + Tags: []*ec2.Tag{ + { + Key: aws.String(fmt.Sprintf("kubernetes.io/cluster/%s", env.ClusterName)), + Value: aws.String("owned"), + }, + { + Key: aws.String(corev1beta1.NodePoolLabelKey), + Value: aws.String(nodePool.Name), + }, + }, + }, + }, + MinCount: aws.Int64(1), + MaxCount: aws.Int64(1), + } + }) + It("should succeed to garbage collect an Instance that was launched by a NodeClaim but has no Instance mapping", func() { + // Update the userData for the instance input with the correct NodePool + rawContent, err := os.ReadFile("testdata/al2_userdata_input.sh") + Expect(err).ToNot(HaveOccurred()) + instanceInput.UserData = lo.ToPtr(base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(string(rawContent), env.ClusterName, + env.ClusterEndpoint, env.ExpectCABundle(), nodePool.Name)))) + + // Create an instance manually to mock Karpenter launching an instance + out := env.ExpectRunInstances(instanceInput) + Expect(out.Instances).To(HaveLen(1)) + + // Always ensure that we cleanup the instance + DeferCleanup(func() { + _, err := env.EC2API.TerminateInstances(&ec2.TerminateInstancesInput{ + InstanceIds: []*string{out.Instances[0].InstanceId}, + }) + if awserrors.IsNotFound(err) { + return + } + Expect(err).ToNot(HaveOccurred()) + }) + + // Wait for the node to register with the cluster + node := env.EventuallyExpectCreatedNodeCount("==", 1)[0] + + // Update the tags to add the karpenter.sh/managed-by tag + _, err = env.EC2API.CreateTagsWithContext(env.Context, &ec2.CreateTagsInput{ + Resources: []*string{out.Instances[0].InstanceId}, + Tags: []*ec2.Tag{ + { + Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Value: aws.String(env.ClusterName), + }, + }, + }) + Expect(err).ToNot(HaveOccurred()) + + // Eventually expect the node and the instance to be removed (shutting-down) + env.EventuallyExpectNotFound(node) + Eventually(func(g Gomega) { + g.Expect(lo.FromPtr(env.GetInstanceByID(aws.StringValue(out.Instances[0].InstanceId)).State.Name)).To(Equal("shutting-down")) + }, time.Second*10).Should(Succeed()) + }) + It("should succeed to garbage collect an Instance that was deleted without the cluster's knowledge", func() { + // Disable the interruption queue for the garbage collection coretest + env.ExpectSettingsOverridden(v1.EnvVar{Name: "INTERRUPTION_QUEUE", Value: ""}) + + pod := coretest.Pod() + env.ExpectCreated(nodeClass, nodePool, pod) + env.EventuallyExpectHealthy(pod) + node := env.ExpectCreatedNodeCount("==", 1)[0] + + _, err := env.EC2API.TerminateInstances(&ec2.TerminateInstancesInput{ + InstanceIds: aws.StringSlice([]string{lo.Must(utils.ParseInstanceID(node.Spec.ProviderID))}), + }) + Expect(err).ToNot(HaveOccurred()) + + // The garbage collection mechanism should eventually delete this NodeClaim and Node + env.EventuallyExpectNotFound(node) + }) +}) diff --git a/test/suites/beta/nodeclaim/nodeclaim_test.go b/test/suites/beta/nodeclaim/nodeclaim_test.go new file mode 100644 index 000000000000..d9511f4c30a6 --- /dev/null +++ b/test/suites/beta/nodeclaim/nodeclaim_test.go @@ -0,0 +1,354 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeclaim_test + +import ( + "encoding/base64" + "fmt" + "os" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/samber/lo" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1beta1 "github.com/aws/karpenter-core/pkg/apis/v1beta1" + "github.com/aws/karpenter-core/pkg/test" + "github.com/aws/karpenter-core/pkg/utils/resources" + "github.com/aws/karpenter/pkg/apis/v1beta1" +) + +var _ = Describe("StandaloneNodeClaim", func() { + It("should create a standard NodeClaim within the 'c' instance family", func() { + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + Spec: corev1beta1.NodeClaimSpec{ + Requirements: []v1.NodeSelectorRequirement{ + { + Key: v1beta1.LabelInstanceCategory, + Operator: v1.NodeSelectorOpIn, + Values: []string{"c"}, + }, + { + Key: corev1beta1.CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{corev1beta1.CapacityTypeOnDemand}, + }, + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + env.ExpectCreated(nodeClass, nodeClaim) + node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] + nodeClaim = env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] + Expect(node.Labels).To(HaveKeyWithValue(v1beta1.LabelInstanceCategory, "c")) + env.EventuallyExpectNodeClaimsReady(nodeClaim) + }) + It("should create a standard NodeClaim based on resource requests", func() { + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + Spec: corev1beta1.NodeClaimSpec{ + Resources: corev1beta1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("3"), + v1.ResourceMemory: resource.MustParse("64Gi"), + }, + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + env.ExpectCreated(nodeClass, nodeClaim) + node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] + nodeClaim = env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] + Expect(resources.Fits(nodeClaim.Spec.Resources.Requests, node.Status.Allocatable)) + env.EventuallyExpectNodeClaimsReady(nodeClaim) + }) + It("should create a NodeClaim propagating all the NodeClaim spec details", func() { + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "custom-annotation": "custom-value", + }, + Labels: map[string]string{ + "custom-label": "custom-value", + }, + }, + Spec: corev1beta1.NodeClaimSpec{ + Taints: []v1.Taint{ + { + Key: "custom-taint", + Effect: v1.TaintEffectNoSchedule, + Value: "custom-value", + }, + { + Key: "other-custom-taint", + Effect: v1.TaintEffectNoExecute, + Value: "other-custom-value", + }, + }, + Resources: corev1beta1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("3"), + v1.ResourceMemory: resource.MustParse("16Gi"), + }, + }, + Kubelet: &corev1beta1.KubeletConfiguration{ + MaxPods: lo.ToPtr[int32](110), + PodsPerCore: lo.ToPtr[int32](10), + SystemReserved: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + v1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + KubeReserved: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + v1.ResourceEphemeralStorage: resource.MustParse("1Gi"), + }, + EvictionHard: map[string]string{ + "memory.available": "5%", + "nodefs.available": "5%", + "nodefs.inodesFree": "5%", + "imagefs.available": "5%", + "imagefs.inodesFree": "5%", + "pid.available": "3%", + }, + EvictionSoft: map[string]string{ + "memory.available": "10%", + "nodefs.available": "10%", + "nodefs.inodesFree": "10%", + "imagefs.available": "10%", + "imagefs.inodesFree": "10%", + "pid.available": "6%", + }, + EvictionSoftGracePeriod: map[string]metav1.Duration{ + "memory.available": {Duration: time.Minute * 2}, + "nodefs.available": {Duration: time.Minute * 2}, + "nodefs.inodesFree": {Duration: time.Minute * 2}, + "imagefs.available": {Duration: time.Minute * 2}, + "imagefs.inodesFree": {Duration: time.Minute * 2}, + "pid.available": {Duration: time.Minute * 2}, + }, + EvictionMaxPodGracePeriod: lo.ToPtr[int32](120), + ImageGCHighThresholdPercent: lo.ToPtr[int32](50), + ImageGCLowThresholdPercent: lo.ToPtr[int32](10), + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + env.ExpectCreated(nodeClass, nodeClaim) + node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] + Expect(node.Annotations).To(HaveKeyWithValue("custom-annotation", "custom-value")) + Expect(node.Labels).To(HaveKeyWithValue("custom-label", "custom-value")) + Expect(node.Spec.Taints).To(ContainElements( + v1.Taint{ + Key: "custom-taint", + Effect: v1.TaintEffectNoSchedule, + Value: "custom-value", + }, + v1.Taint{ + Key: "other-custom-taint", + Effect: v1.TaintEffectNoExecute, + Value: "other-custom-value", + }, + )) + Expect(node.OwnerReferences).To(ContainElement( + metav1.OwnerReference{ + APIVersion: corev1beta1.SchemeGroupVersion.String(), + Kind: "NodeClaim", + Name: nodeClaim.Name, + UID: nodeClaim.UID, + BlockOwnerDeletion: lo.ToPtr(true), + }, + )) + env.EventuallyExpectCreatedNodeClaimCount("==", 1) + env.EventuallyExpectNodeClaimsReady(nodeClaim) + }) + It("should remove the cloudProvider NodeClaim when the cluster NodeClaim is deleted", func() { + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + Spec: corev1beta1.NodeClaimSpec{ + Requirements: []v1.NodeSelectorRequirement{ + { + Key: v1beta1.LabelInstanceCategory, + Operator: v1.NodeSelectorOpIn, + Values: []string{"c"}, + }, + { + Key: corev1beta1.CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{corev1beta1.CapacityTypeOnDemand}, + }, + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + env.ExpectCreated(nodeClass, nodeClaim) + node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] + nodeClaim = env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] + + instanceID := env.ExpectParsedProviderID(node.Spec.ProviderID) + env.GetInstance(node.Name) + + // Node is deleted and now should be not found + env.ExpectDeleted(nodeClaim) + env.EventuallyExpectNotFound(nodeClaim, node) + + Eventually(func(g Gomega) { + g.Expect(lo.FromPtr(env.GetInstanceByID(instanceID).State.Name)).To(Equal("shutting-down")) + }, time.Second*10).Should(Succeed()) + }) + It("should delete a NodeClaim from the node termination finalizer", func() { + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + Spec: corev1beta1.NodeClaimSpec{ + Requirements: []v1.NodeSelectorRequirement{ + { + Key: v1beta1.LabelInstanceCategory, + Operator: v1.NodeSelectorOpIn, + Values: []string{"c"}, + }, + { + Key: corev1beta1.CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{corev1beta1.CapacityTypeOnDemand}, + }, + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + env.ExpectCreated(nodeClass, nodeClaim) + node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] + nodeClaim = env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] + + instanceID := env.ExpectParsedProviderID(node.Spec.ProviderID) + env.GetInstance(node.Name) + + // Delete the node and expect both the node and nodeClaim to be gone as well as the instance to be shutting-down + env.ExpectDeleted(node) + env.EventuallyExpectNotFound(nodeClaim, node) + + Eventually(func(g Gomega) { + g.Expect(lo.FromPtr(env.GetInstanceByID(instanceID).State.Name)).To(Equal("shutting-down")) + }, time.Second*10).Should(Succeed()) + }) + It("should create a NodeClaim with custom labels passed through the userData", func() { + customAMI := env.GetCustomAMI("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", 1) + // Update the userData for the instance input with the correct NodePool + rawContent, err := os.ReadFile("testdata/al2_userdata_custom_labels_input.sh") + Expect(err).ToNot(HaveOccurred()) + + // Create userData that adds custom labels through the --kubelet-extra-args + nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: customAMI}} + nodeClass.Spec.UserData = lo.ToPtr(base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(string(rawContent), env.ClusterName, + env.ClusterEndpoint, env.ExpectCABundle())))) + + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + Spec: corev1beta1.NodeClaimSpec{ + Requirements: []v1.NodeSelectorRequirement{ + { + Key: v1beta1.LabelInstanceCategory, + Operator: v1.NodeSelectorOpIn, + Values: []string{"c"}, + }, + { + Key: v1.LabelArchStable, + Operator: v1.NodeSelectorOpIn, + Values: []string{"amd64"}, + }, + { + Key: corev1beta1.CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{corev1beta1.CapacityTypeOnDemand}, + }, + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + env.ExpectCreated(nodeClass, nodeClaim) + node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] + Expect(node.Labels).To(HaveKeyWithValue("custom-label", "custom-value")) + Expect(node.Labels).To(HaveKeyWithValue("custom-label2", "custom-value2")) + + env.EventuallyExpectCreatedNodeClaimCount("==", 1) + env.EventuallyExpectNodeClaimsReady(nodeClaim) + }) + It("should delete a NodeClaim after the registration timeout when the node doesn't register", func() { + customAMI := env.GetCustomAMI("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", 1) + // Update the userData for the instance input with the correct NodePool + rawContent, err := os.ReadFile("testdata/al2_userdata_input.sh") + Expect(err).ToNot(HaveOccurred()) + + // Create userData that adds custom labels through the --kubelet-extra-args + nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: customAMI}} + + // Giving bad clusterName and clusterEndpoint to the userData + nodeClass.Spec.UserData = lo.ToPtr(base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(string(rawContent), "badName", "badEndpoint", env.ExpectCABundle())))) + + nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + Spec: corev1beta1.NodeClaimSpec{ + Requirements: []v1.NodeSelectorRequirement{ + { + Key: v1beta1.LabelInstanceCategory, + Operator: v1.NodeSelectorOpIn, + Values: []string{"c"}, + }, + { + Key: v1.LabelArchStable, + Operator: v1.NodeSelectorOpIn, + Values: []string{"amd64"}, + }, + { + Key: corev1beta1.CapacityTypeLabelKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{corev1beta1.CapacityTypeOnDemand}, + }, + }, + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }) + + env.ExpectCreated(nodeClass, nodeClaim) + nodeClaim = env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] + + // Expect that the nodeClaim eventually launches and has false Registration/Initialization + Eventually(func(g Gomega) { + temp := &corev1beta1.NodeClaim{} + g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClaim), temp)).To(Succeed()) + g.Expect(temp.StatusConditions().GetCondition(corev1beta1.Launched).IsTrue()).To(BeTrue()) + g.Expect(temp.StatusConditions().GetCondition(corev1beta1.Registered).IsFalse()).To(BeTrue()) + g.Expect(temp.StatusConditions().GetCondition(corev1beta1.Initialized).IsFalse()).To(BeTrue()) + }).Should(Succeed()) + + // Expect that the nodeClaim is eventually de-provisioned due to the registration timeout + env.EventuallyExpectNotFoundAssertion(nodeClaim).WithTimeout(time.Minute * 20).Should(Succeed()) + }) +}) diff --git a/test/suites/beta/nodeclaim/suite_test.go b/test/suites/beta/nodeclaim/suite_test.go new file mode 100644 index 000000000000..2d8b16c45dc5 --- /dev/null +++ b/test/suites/beta/nodeclaim/suite_test.go @@ -0,0 +1,77 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeclaim_test + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1beta1 "github.com/aws/karpenter-core/pkg/apis/v1beta1" + coretest "github.com/aws/karpenter-core/pkg/test" + "github.com/aws/karpenter/pkg/apis/v1beta1" + "github.com/aws/karpenter/pkg/test" + "github.com/aws/karpenter/test/pkg/environment/aws" +) + +var env *aws.Environment +var nodeClass *v1beta1.EC2NodeClass +var nodePool *corev1beta1.NodePool + +func TestNodeClaim(t *testing.T) { + RegisterFailHandler(Fail) + BeforeSuite(func() { + env = aws.NewEnvironment(t) + }) + AfterSuite(func() { + env.Stop() + }) + RunSpecs(t, "Beta/NodeClaim") +} + +var _ = BeforeEach(func() { + env.BeforeEach() + nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ + Spec: v1beta1.EC2NodeClassSpec{ + AMIFamily: &v1beta1.AMIFamilyAL2, + SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + { + Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, + }, + }, + SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + { + Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, + }, + }, + Role: fmt.Sprintf("KarpenterNodeRole-%s", env.ClusterName), + }, + }) + nodePool = coretest.NodePool(corev1beta1.NodePool{ + Spec: corev1beta1.NodePoolSpec{ + Template: corev1beta1.NodeClaimTemplate{ + Spec: corev1beta1.NodeClaimSpec{ + NodeClassRef: &corev1beta1.NodeClassReference{ + Name: nodeClass.Name, + }, + }, + }, + }, + }) +}) +var _ = AfterEach(func() { env.Cleanup() }) +var _ = AfterEach(func() { env.AfterEach() }) diff --git a/test/suites/beta/nodeclaim/testdata/al2_userdata_custom_labels_input.sh b/test/suites/beta/nodeclaim/testdata/al2_userdata_custom_labels_input.sh new file mode 100644 index 000000000000..fc1d8a3853f8 --- /dev/null +++ b/test/suites/beta/nodeclaim/testdata/al2_userdata_custom_labels_input.sh @@ -0,0 +1,14 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="BOUNDARY" + +--BOUNDARY +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh '%s' --apiserver-endpoint '%s' --b64-cluster-ca '%s' \ +--use-max-pods false \ +--container-runtime containerd \ +--kubelet-extra-args '--node-labels=testing/cluster=unspecified,custom-label=custom-value,custom-label2=custom-value2' + +--BOUNDARY-- diff --git a/test/suites/beta/nodeclaim/testdata/al2_userdata_input.sh b/test/suites/beta/nodeclaim/testdata/al2_userdata_input.sh new file mode 100644 index 000000000000..9a40bf4562a4 --- /dev/null +++ b/test/suites/beta/nodeclaim/testdata/al2_userdata_input.sh @@ -0,0 +1,14 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="BOUNDARY" + +--BOUNDARY +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 +/etc/eks/bootstrap.sh '%s' --apiserver-endpoint '%s' --b64-cluster-ca '%s' \ +--use-max-pods false \ +--container-runtime containerd \ +--kubelet-extra-args '--node-labels=karpenter.sh/nodepool=%s,testing/cluster=unspecified' + +--BOUNDARY--