diff --git a/go.mod b/go.mod index e4cbc0679311..062a2fe4599e 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( k8s.io/utils v0.0.0-20240102154912-e7106e64919e knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/karpenter v0.37.1-0.20240710172318-86056e48b9ac + sigs.k8s.io/karpenter v0.37.1-0.20240711192037-195f8961cae4 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index e8b0a35ae617..3d507f86b678 100644 --- a/go.sum +++ b/go.sum @@ -761,8 +761,8 @@ sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHv sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/karpenter v0.37.1-0.20240710172318-86056e48b9ac h1:8nfNoKGJSAzTMKxweI4DcTADPyMY/oCW2x1qgx3gUVY= -sigs.k8s.io/karpenter v0.37.1-0.20240710172318-86056e48b9ac/go.mod h1:jwEZ2Efxsc0yyNkrDEFN2RduAwlm/s7reIVNblZ8vyM= +sigs.k8s.io/karpenter v0.37.1-0.20240711192037-195f8961cae4 h1:PfXPyel4nqHFf/3fMQ0TSP+gUZTyyrjSjBDjhp7IRjQ= +sigs.k8s.io/karpenter v0.37.1-0.20240711192037-195f8961cae4/go.mod h1:jwEZ2Efxsc0yyNkrDEFN2RduAwlm/s7reIVNblZ8vyM= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/hack/docs/instancetypes_gen/main.go b/hack/docs/instancetypes_gen/main.go index f7b5b139c009..87b4deb1f30d 100644 --- a/hack/docs/instancetypes_gen/main.go +++ b/hack/docs/instancetypes_gen/main.go @@ -25,7 +25,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/kubernetes" @@ -33,13 +33,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/manager" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coreoperator "sigs.k8s.io/karpenter/pkg/operator" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/operator" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -103,10 +103,10 @@ func main() { log.Fatalf("updating instance types offerings, %s", err) } // Fake a NodeClass so we can use it to get InstanceTypes - nodeClass := &v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - AMIFamily: &v1beta1.AMIFamilyAL2023, - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass := &v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + AMIFamily: &v1.AMIFamilyAL2023, + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{ "*": "*", @@ -119,13 +119,13 @@ func main() { if err != nil { log.Fatalf("listing subnets, %s", err) } - nodeClass.Status.Subnets = lo.Map(subnets, func(ec2subnet *ec2.Subnet, _ int) v1beta1.Subnet { - return v1beta1.Subnet{ + nodeClass.Status.Subnets = lo.Map(subnets, func(ec2subnet *ec2.Subnet, _ int) v1.Subnet { + return v1.Subnet{ ID: *ec2subnet.SubnetId, Zone: *ec2subnet.AvailabilityZone, } }) - instanceTypes, err := op.InstanceTypesProvider.List(ctx, &corev1beta1.KubeletConfiguration{}, nodeClass) + instanceTypes, err := op.InstanceTypesProvider.List(ctx, &v1.KubeletConfiguration{}, nodeClass) if err != nil { log.Fatalf("listing instance types, %s", err) } @@ -171,9 +171,9 @@ below are the resources available with some assumptions and after the instance o sort.Strings(familyNames) // we don't want to show a few labels that will vary amongst regions - delete(labelNameMap, v1.LabelTopologyZone) - delete(labelNameMap, v1beta1.LabelTopologyZoneID) - delete(labelNameMap, corev1beta1.CapacityTypeLabelKey) + delete(labelNameMap, corev1.LabelTopologyZone) + delete(labelNameMap, v1.LabelTopologyZoneID) + delete(labelNameMap, karpv1.CapacityTypeLabelKey) labelNames := lo.Keys(labelNameMap) @@ -210,7 +210,7 @@ below are the resources available with some assumptions and after the instance o if !ok { continue } - if req.Key == v1.LabelTopologyRegion { + if req.Key == corev1.LabelTopologyRegion { continue } if len(req.Values()) == 1 { @@ -221,11 +221,11 @@ below are the resources available with some assumptions and after the instance o fmt.Fprintln(f, " | Resource | Quantity |") fmt.Fprintln(f, " |--|--|") for _, resourceName := range resourceNames { - quantity := minusOverhead[v1.ResourceName(resourceName)] + quantity := minusOverhead[corev1.ResourceName(resourceName)] if quantity.IsZero() { continue } - if v1.ResourceName(resourceName) == v1.ResourceEphemeralStorage { + if corev1.ResourceName(resourceName) == corev1.ResourceEphemeralStorage { i64, _ := quantity.AsInt64() quantity = *resource.NewQuantity(i64, resource.BinarySI) } diff --git a/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml b/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml index 95959d19e704..cc061eb57cf9 100644 --- a/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml +++ b/pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml @@ -717,7 +717,7 @@ spec: type: object type: object served: true - storage: false + storage: true subresources: status: {} - name: v1beta1 @@ -1293,7 +1293,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} conversion: diff --git a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml index 6834866ff541..9be03992d05a 100644 --- a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml +++ b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml @@ -348,7 +348,7 @@ spec: - spec type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -790,7 +790,7 @@ spec: - spec type: object served: true - storage: true + storage: false subresources: status: {} conversion: diff --git a/pkg/apis/crds/karpenter.sh_nodepools.yaml b/pkg/apis/crds/karpenter.sh_nodepools.yaml index 9ae64056f616..f14bdd8883a3 100644 --- a/pkg/apis/crds/karpenter.sh_nodepools.yaml +++ b/pkg/apis/crds/karpenter.sh_nodepools.yaml @@ -488,7 +488,7 @@ spec: - spec type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -1068,7 +1068,7 @@ spec: - spec type: object served: true - storage: true + storage: false subresources: status: {} conversion: diff --git a/pkg/apis/v1/doc.go b/pkg/apis/v1/doc.go index 9d40f9d57285..44692b28c362 100644 --- a/pkg/apis/v1/doc.go +++ b/pkg/apis/v1/doc.go @@ -19,7 +19,7 @@ limitations under the License. package v1 // doc.go is discovered by codegen import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" @@ -28,7 +28,7 @@ import ( func init() { gv := schema.GroupVersion{Group: apis.Group, Version: "v1"} - v1.AddToGroupVersion(scheme.Scheme, gv) + corev1.AddToGroupVersion(scheme.Scheme, gv) scheme.Scheme.AddKnownTypes(gv, &EC2NodeClass{}, &EC2NodeClassList{}, diff --git a/pkg/apis/v1/ec2nodeclass.go b/pkg/apis/v1/ec2nodeclass.go index fda4c4c3e71a..c0950eaca00d 100644 --- a/pkg/apis/v1/ec2nodeclass.go +++ b/pkg/apis/v1/ec2nodeclass.go @@ -21,7 +21,7 @@ import ( "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" ) // EC2NodeClassSpec is the top level specification for the AWS Karpenter Provider. @@ -399,6 +399,7 @@ const ( // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Role",type="string",JSONPath=".spec.role",priority=1,description="" // +kubebuilder:resource:path=ec2nodeclasses,scope=Cluster,categories=karpenter,shortName={ec2nc,ec2ncs} +// +kubebuilder:storageversion // +kubebuilder:subresource:status type EC2NodeClass struct { metav1.TypeMeta `json:",inline"` @@ -436,7 +437,7 @@ func (in *EC2NodeClass) InstanceProfileRole() string { func (in *EC2NodeClass) InstanceProfileTags(clusterName string) map[string]string { return lo.Assign(in.Spec.Tags, map[string]string{ fmt.Sprintf("kubernetes.io/cluster/%s", clusterName): "owned", - corev1beta1.ManagedByAnnotationKey: clusterName, + karpv1.ManagedByAnnotationKey: clusterName, LabelNodeClass: in.Name, }) } diff --git a/pkg/apis/v1/ec2nodeclass_status.go b/pkg/apis/v1/ec2nodeclass_status.go index 6114d2cce35a..89afe2370e8c 100644 --- a/pkg/apis/v1/ec2nodeclass_status.go +++ b/pkg/apis/v1/ec2nodeclass_status.go @@ -16,7 +16,7 @@ package v1 import ( "github.com/awslabs/operatorpkg/status" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) const ( @@ -59,7 +59,7 @@ type AMI struct { Name string `json:"name,omitempty"` // Requirements of the AMI to be utilized on an instance type // +required - Requirements []v1.NodeSelectorRequirement `json:"requirements"` + Requirements []corev1.NodeSelectorRequirement `json:"requirements"` } // EC2NodeClassStatus contains the resolved state of the EC2NodeClass diff --git a/pkg/apis/v1/ec2nodeclass_validation_cel_test.go b/pkg/apis/v1/ec2nodeclass_validation_cel_test.go index 0da8485fc89d..d862d62e9b8f 100644 --- a/pkg/apis/v1/ec2nodeclass_validation_cel_test.go +++ b/pkg/apis/v1/ec2nodeclass_validation_cel_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/ptr" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" @@ -97,7 +97,7 @@ var _ = Describe("CEL/Validation", func() { }) It("should fail if tags contain a restricted domain key", func() { nc.Spec.Tags = map[string]string{ - corev1beta1.NodePoolLabelKey: "value", + karpv1.NodePoolLabelKey: "value", } Expect(env.Client.Create(ctx, nc)).To(Not(Succeed())) nc.Spec.Tags = map[string]string{ @@ -105,7 +105,7 @@ var _ = Describe("CEL/Validation", func() { } Expect(env.Client.Create(ctx, nc)).To(Not(Succeed())) nc.Spec.Tags = map[string]string{ - corev1beta1.ManagedByAnnotationKey: "test", + karpv1.ManagedByAnnotationKey: "test", } Expect(env.Client.Create(ctx, nc)).To(Not(Succeed())) nc.Spec.Tags = map[string]string{ diff --git a/pkg/apis/v1/labels.go b/pkg/apis/v1/labels.go index ea3403ed48e5..58a48828dfa4 100644 --- a/pkg/apis/v1/labels.go +++ b/pkg/apis/v1/labels.go @@ -18,17 +18,17 @@ import ( "fmt" "regexp" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" coreapis "sigs.k8s.io/karpenter/pkg/apis" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/apis" ) func init() { - v1beta1.RestrictedLabelDomains = v1beta1.RestrictedLabelDomains.Insert(RestrictedLabelDomains...) - v1beta1.WellKnownLabels = v1beta1.WellKnownLabels.Insert( + karpv1.RestrictedLabelDomains = karpv1.RestrictedLabelDomains.Insert(RestrictedLabelDomains...) + karpv1.WellKnownLabels = karpv1.WellKnownLabels.Insert( LabelInstanceHypervisor, LabelInstanceEncryptionInTransitSupported, LabelInstanceCategory, @@ -49,19 +49,19 @@ func init() { LabelInstanceAcceleratorManufacturer, LabelInstanceAcceleratorCount, LabelTopologyZoneID, - v1.LabelWindowsBuild, + corev1.LabelWindowsBuild, ) } var ( TerminationFinalizer = apis.Group + "/termination" AWSToKubeArchitectures = map[string]string{ - "x86_64": v1beta1.ArchitectureAmd64, - v1beta1.ArchitectureArm64: v1beta1.ArchitectureArm64, + "x86_64": karpv1.ArchitectureAmd64, + karpv1.ArchitectureArm64: karpv1.ArchitectureArm64, } WellKnownArchitectures = sets.NewString( - v1beta1.ArchitectureAmd64, - v1beta1.ArchitectureArm64, + karpv1.ArchitectureAmd64, + karpv1.ArchitectureArm64, ) RestrictedLabelDomains = []string{ apis.Group, @@ -70,30 +70,30 @@ var ( // Adheres to cluster name pattern matching as specified in the API spec // https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateCluster.html regexp.MustCompile(`^kubernetes\.io/cluster/[0-9A-Za-z][A-Za-z0-9\-_]*$`), - regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(v1beta1.NodePoolLabelKey))), - regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(v1beta1.ManagedByAnnotationKey))), + regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(karpv1.NodePoolLabelKey))), + regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(karpv1.ManagedByAnnotationKey))), regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(LabelNodeClass))), regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(TagNodeClaim))), } - AMIFamilyBottlerocket = "Bottlerocket" - AMIFamilyAL2 = "AL2" - AMIFamilyAL2023 = "AL2023" - AMIFamilyUbuntu = "Ubuntu" - AMIFamilyWindows2019 = "Windows2019" - AMIFamilyWindows2022 = "Windows2022" - AMIFamilyCustom = "Custom" - Windows2019 = "2019" - Windows2022 = "2022" - WindowsCore = "Core" - Windows2019Build = "10.0.17763" - Windows2022Build = "10.0.20348" - ResourceNVIDIAGPU v1.ResourceName = "nvidia.com/gpu" - ResourceAMDGPU v1.ResourceName = "amd.com/gpu" - ResourceAWSNeuron v1.ResourceName = "aws.amazon.com/neuron" - ResourceHabanaGaudi v1.ResourceName = "habana.ai/gaudi" - ResourceAWSPodENI v1.ResourceName = "vpc.amazonaws.com/pod-eni" - ResourcePrivateIPv4Address v1.ResourceName = "vpc.amazonaws.com/PrivateIPv4Address" - ResourceEFA v1.ResourceName = "vpc.amazonaws.com/efa" + AMIFamilyBottlerocket = "Bottlerocket" + AMIFamilyAL2 = "AL2" + AMIFamilyAL2023 = "AL2023" + AMIFamilyUbuntu = "Ubuntu" + AMIFamilyWindows2019 = "Windows2019" + AMIFamilyWindows2022 = "Windows2022" + AMIFamilyCustom = "Custom" + Windows2019 = "2019" + Windows2022 = "2022" + WindowsCore = "Core" + Windows2019Build = "10.0.17763" + Windows2022Build = "10.0.20348" + ResourceNVIDIAGPU corev1.ResourceName = "nvidia.com/gpu" + ResourceAMDGPU corev1.ResourceName = "amd.com/gpu" + ResourceAWSNeuron corev1.ResourceName = "aws.amazon.com/neuron" + ResourceHabanaGaudi corev1.ResourceName = "habana.ai/gaudi" + ResourceAWSPodENI corev1.ResourceName = "vpc.amazonaws.com/pod-eni" + ResourcePrivateIPv4Address corev1.ResourceName = "vpc.amazonaws.com/PrivateIPv4Address" + ResourceEFA corev1.ResourceName = "vpc.amazonaws.com/efa" LabelNodeClass = apis.Group + "/ec2nodeclass" diff --git a/pkg/apis/v1/nodepool_validation_cel_test.go b/pkg/apis/v1/nodepool_validation_cel_test.go index ee4ade8f5a21..dffc518947e0 100644 --- a/pkg/apis/v1/nodepool_validation_cel_test.go +++ b/pkg/apis/v1/nodepool_validation_cel_test.go @@ -20,36 +20,35 @@ import ( "github.com/Pallinder/go-randomdata" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" ) -// TODO @engedaam: Updated NodePool and a NodeClaim to use the v1 API var _ = Describe("CEL/Validation", func() { - var nodePool *v1beta1.NodePool + var nodePool *karpv1.NodePool BeforeEach(func() { if env.Version.Minor() < 25 { Skip("CEL Validation is for 1.25>") } - nodePool = &v1beta1.NodePool{ + nodePool = &karpv1.NodePool{ ObjectMeta: metav1.ObjectMeta{Name: strings.ToLower(randomdata.SillyName())}, - Spec: v1beta1.NodePoolSpec{ - Template: v1beta1.NodeClaimTemplate{ - Spec: v1beta1.NodeClaimSpec{ - NodeClassRef: &v1beta1.NodeClassReference{ - APIVersion: "karpenter.k8s.aws/v1beta1", - Kind: "EC2NodeClass", - Name: "default", + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: "karpenter.k8s.aws", + Kind: "EC2NodeClass", + Name: "default", }, - Requirements: []v1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpExists, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpExists, }, }, }, @@ -61,9 +60,9 @@ var _ = Describe("CEL/Validation", func() { Context("Requirements", func() { It("should allow restricted domains exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.LabelDomainExceptions { - nodePool.Spec.Template.Spec.Requirements = []v1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + for label := range karpv1.LabelDomainExceptions { + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: label + "/test", Operator: corev1.NodeSelectorOpIn, Values: []string{"test"}}}, } Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) Expect(nodePool.RuntimeValidate()).To(Succeed()) @@ -73,9 +72,9 @@ var _ = Describe("CEL/Validation", func() { }) It("should allow well known label exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) { - nodePool.Spec.Template.Spec.Requirements = []v1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + for label := range karpv1.WellKnownLabels.Difference(sets.New(karpv1.NodePoolLabelKey)) { + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: label, Operator: corev1.NodeSelectorOpIn, Values: []string{"test"}}}, } Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) Expect(nodePool.RuntimeValidate()).To(Succeed()) @@ -87,7 +86,7 @@ var _ = Describe("CEL/Validation", func() { Context("Labels", func() { It("should allow restricted domains exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.LabelDomainExceptions { + for label := range karpv1.LabelDomainExceptions { nodePool.Spec.Template.Labels = map[string]string{ label: "test", } @@ -99,7 +98,7 @@ var _ = Describe("CEL/Validation", func() { }) It("should allow well known label exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) { + for label := range karpv1.WellKnownLabels.Difference(sets.New(karpv1.NodePoolLabelKey)) { nodePool.Spec.Template.Labels = map[string]string{ label: "test", } diff --git a/pkg/apis/v1beta1/doc.go b/pkg/apis/v1beta1/doc.go index 361d5f55f217..bfbe13887f53 100644 --- a/pkg/apis/v1beta1/doc.go +++ b/pkg/apis/v1beta1/doc.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 // doc.go is discovered by codegen import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" @@ -28,7 +28,7 @@ import ( func init() { gv := schema.GroupVersion{Group: apis.Group, Version: "v1beta1"} - v1.AddToGroupVersion(scheme.Scheme, gv) + metav1.AddToGroupVersion(scheme.Scheme, gv) scheme.Scheme.AddKnownTypes(gv, &EC2NodeClass{}, &EC2NodeClassList{}, diff --git a/pkg/apis/v1beta1/ec2nodeclass.go b/pkg/apis/v1beta1/ec2nodeclass.go index b3395dd3a60d..833dcdd5683e 100644 --- a/pkg/apis/v1beta1/ec2nodeclass.go +++ b/pkg/apis/v1beta1/ec2nodeclass.go @@ -21,7 +21,7 @@ import ( "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" ) // EC2NodeClassSpec is the top level specification for the AWS Karpenter Provider. @@ -318,7 +318,6 @@ const ( // EC2NodeClass is the Schema for the EC2NodeClass API // +kubebuilder:object:root=true -// +kubebuilder:storageversion // +kubebuilder:resource:path=ec2nodeclasses,scope=Cluster,categories=karpenter,shortName={ec2nc,ec2ncs} // +kubebuilder:subresource:status type EC2NodeClass struct { @@ -357,7 +356,7 @@ func (in *EC2NodeClass) InstanceProfileRole() string { func (in *EC2NodeClass) InstanceProfileTags(clusterName string) map[string]string { return lo.Assign(in.Spec.Tags, map[string]string{ fmt.Sprintf("kubernetes.io/cluster/%s", clusterName): "owned", - corev1beta1.ManagedByAnnotationKey: clusterName, + karpv1beta1.ManagedByAnnotationKey: clusterName, LabelNodeClass: in.Name, }) } diff --git a/pkg/apis/v1beta1/ec2nodeclass_hash_test.go b/pkg/apis/v1beta1/ec2nodeclass_hash_test.go index ccdde5a389ec..311e34ac564d 100644 --- a/pkg/apis/v1beta1/ec2nodeclass_hash_test.go +++ b/pkg/apis/v1beta1/ec2nodeclass_hash_test.go @@ -31,7 +31,7 @@ var _ = Describe("Hash", func() { const staticHash = "10790156025840984195" var nodeClass *v1beta1.EC2NodeClass BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass = test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ AMIFamily: lo.ToPtr(v1beta1.AMIFamilyAL2023), Role: "role-1", @@ -202,7 +202,7 @@ var _ = Describe("Hash", func() { Expect(hash).To(Equal(updatedHash)) }) It("should expect two EC2NodeClasses with the same spec to have the same hash", func() { - otherNodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + otherNodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: nodeClass.Spec, }) Expect(nodeClass.Hash()).To(Equal(otherNodeClass.Hash())) diff --git a/pkg/apis/v1beta1/ec2nodeclass_status.go b/pkg/apis/v1beta1/ec2nodeclass_status.go index 4731f5aeede8..fa89119b9bb2 100644 --- a/pkg/apis/v1beta1/ec2nodeclass_status.go +++ b/pkg/apis/v1beta1/ec2nodeclass_status.go @@ -16,7 +16,7 @@ package v1beta1 import ( "github.com/awslabs/operatorpkg/status" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) const ( @@ -59,7 +59,7 @@ type AMI struct { Name string `json:"name,omitempty"` // Requirements of the AMI to be utilized on an instance type // +required - Requirements []v1.NodeSelectorRequirement `json:"requirements"` + Requirements []corev1.NodeSelectorRequirement `json:"requirements"` } // EC2NodeClassStatus contains the resolved state of the EC2NodeClass diff --git a/pkg/apis/v1beta1/ec2nodeclass_validation_cel_test.go b/pkg/apis/v1beta1/ec2nodeclass_validation_cel_test.go index b0104c9c506a..0ccde246fb30 100644 --- a/pkg/apis/v1beta1/ec2nodeclass_validation_cel_test.go +++ b/pkg/apis/v1beta1/ec2nodeclass_validation_cel_test.go @@ -18,7 +18,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -34,7 +34,7 @@ var _ = Describe("CEL/Validation", func() { if env.Version.Minor() < 25 { Skip("CEL Validation is for 1.25>") } - nc = test.EC2NodeClass() + nc = test.BetaEC2NodeClass() }) It("should succeed if just specifying role", func() { Expect(env.Client.Create(ctx, nc)).To(Succeed()) @@ -72,7 +72,7 @@ var _ = Describe("CEL/Validation", func() { }) It("should fail if tags contain a restricted domain key", func() { nc.Spec.Tags = map[string]string{ - corev1beta1.NodePoolLabelKey: "value", + karpv1beta1.NodePoolLabelKey: "value", } Expect(env.Client.Create(ctx, nc)).To(Not(Succeed())) nc.Spec.Tags = map[string]string{ @@ -80,7 +80,7 @@ var _ = Describe("CEL/Validation", func() { } Expect(env.Client.Create(ctx, nc)).To(Not(Succeed())) nc.Spec.Tags = map[string]string{ - corev1beta1.ManagedByAnnotationKey: "test", + karpv1beta1.ManagedByAnnotationKey: "test", } Expect(env.Client.Create(ctx, nc)).To(Not(Succeed())) nc.Spec.Tags = map[string]string{ @@ -494,7 +494,7 @@ var _ = Describe("CEL/Validation", func() { }) Context("BlockDeviceMappings", func() { It("should succeed if more than one root volume is specified", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { @@ -519,7 +519,7 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodeClass)).To(Succeed()) }) It("should succeed for valid VolumeSize in G", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { @@ -535,7 +535,7 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodeClass)).To(Succeed()) }) It("should succeed for valid VolumeSize in T", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { @@ -551,7 +551,7 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodeClass)).To(Succeed()) }) It("should fail if more than one root volume is specified", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { @@ -574,7 +574,7 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodeClass)).To(Not(Succeed())) }) It("should fail VolumeSize is less then 1Gi/1G", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { @@ -590,7 +590,7 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodeClass)).To(Not(Succeed())) }) It("should fail VolumeSize is greater then 64T", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { @@ -606,7 +606,7 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodeClass)).To(Not(Succeed())) }) It("should fail for VolumeSize that do not parse into quantity values", func() { - nodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ + nodeClass := test.BetaEC2NodeClass(v1beta1.EC2NodeClass{ Spec: v1beta1.EC2NodeClassSpec{ BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ { diff --git a/pkg/apis/v1beta1/labels.go b/pkg/apis/v1beta1/labels.go index 4b51945b2306..d7ae21c84230 100644 --- a/pkg/apis/v1beta1/labels.go +++ b/pkg/apis/v1beta1/labels.go @@ -18,17 +18,17 @@ import ( "fmt" "regexp" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" coreapis "sigs.k8s.io/karpenter/pkg/apis" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "github.com/aws/karpenter-provider-aws/pkg/apis" ) func init() { - v1beta1.RestrictedLabelDomains = v1beta1.RestrictedLabelDomains.Insert(RestrictedLabelDomains...) - v1beta1.WellKnownLabels = v1beta1.WellKnownLabels.Insert( + karpv1beta1.RestrictedLabelDomains = karpv1beta1.RestrictedLabelDomains.Insert(RestrictedLabelDomains...) + karpv1beta1.WellKnownLabels = karpv1beta1.WellKnownLabels.Insert( LabelInstanceHypervisor, LabelInstanceEncryptionInTransitSupported, LabelInstanceCategory, @@ -49,19 +49,19 @@ func init() { LabelInstanceAcceleratorManufacturer, LabelInstanceAcceleratorCount, LabelTopologyZoneID, - v1.LabelWindowsBuild, + corev1.LabelWindowsBuild, ) } var ( TerminationFinalizer = apis.Group + "/termination" AWSToKubeArchitectures = map[string]string{ - "x86_64": v1beta1.ArchitectureAmd64, - v1beta1.ArchitectureArm64: v1beta1.ArchitectureArm64, + "x86_64": karpv1beta1.ArchitectureAmd64, + karpv1beta1.ArchitectureArm64: karpv1beta1.ArchitectureArm64, } WellKnownArchitectures = sets.NewString( - v1beta1.ArchitectureAmd64, - v1beta1.ArchitectureArm64, + karpv1beta1.ArchitectureAmd64, + karpv1beta1.ArchitectureArm64, ) RestrictedLabelDomains = []string{ apis.Group, @@ -70,30 +70,30 @@ var ( // Adheres to cluster name pattern matching as specified in the API spec // https://docs.aws.amazon.com/eks/latest/APIReference/API_CreateCluster.html regexp.MustCompile(`^kubernetes\.io/cluster/[0-9A-Za-z][A-Za-z0-9\-_]*$`), - regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(v1beta1.NodePoolLabelKey))), - regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(v1beta1.ManagedByAnnotationKey))), + regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(karpv1beta1.NodePoolLabelKey))), + regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(karpv1beta1.ManagedByAnnotationKey))), regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(LabelNodeClass))), regexp.MustCompile(fmt.Sprintf("^%s$", regexp.QuoteMeta(TagNodeClaim))), } - AMIFamilyBottlerocket = "Bottlerocket" - AMIFamilyAL2 = "AL2" - AMIFamilyAL2023 = "AL2023" - AMIFamilyUbuntu = "Ubuntu" - AMIFamilyWindows2019 = "Windows2019" - AMIFamilyWindows2022 = "Windows2022" - AMIFamilyCustom = "Custom" - Windows2019 = "2019" - Windows2022 = "2022" - WindowsCore = "Core" - Windows2019Build = "10.0.17763" - Windows2022Build = "10.0.20348" - ResourceNVIDIAGPU v1.ResourceName = "nvidia.com/gpu" - ResourceAMDGPU v1.ResourceName = "amd.com/gpu" - ResourceAWSNeuron v1.ResourceName = "aws.amazon.com/neuron" - ResourceHabanaGaudi v1.ResourceName = "habana.ai/gaudi" - ResourceAWSPodENI v1.ResourceName = "vpc.amazonaws.com/pod-eni" - ResourcePrivateIPv4Address v1.ResourceName = "vpc.amazonaws.com/PrivateIPv4Address" - ResourceEFA v1.ResourceName = "vpc.amazonaws.com/efa" + AMIFamilyBottlerocket = "Bottlerocket" + AMIFamilyAL2 = "AL2" + AMIFamilyAL2023 = "AL2023" + AMIFamilyUbuntu = "Ubuntu" + AMIFamilyWindows2019 = "Windows2019" + AMIFamilyWindows2022 = "Windows2022" + AMIFamilyCustom = "Custom" + Windows2019 = "2019" + Windows2022 = "2022" + WindowsCore = "Core" + Windows2019Build = "10.0.17763" + Windows2022Build = "10.0.20348" + ResourceNVIDIAGPU corev1.ResourceName = "nvidia.com/gpu" + ResourceAMDGPU corev1.ResourceName = "amd.com/gpu" + ResourceAWSNeuron corev1.ResourceName = "aws.amazon.com/neuron" + ResourceHabanaGaudi corev1.ResourceName = "habana.ai/gaudi" + ResourceAWSPodENI corev1.ResourceName = "vpc.amazonaws.com/pod-eni" + ResourcePrivateIPv4Address corev1.ResourceName = "vpc.amazonaws.com/PrivateIPv4Address" + ResourceEFA corev1.ResourceName = "vpc.amazonaws.com/efa" LabelNodeClass = apis.Group + "/ec2nodeclass" diff --git a/pkg/apis/v1beta1/nodepool_validation_cel_test.go b/pkg/apis/v1beta1/nodepool_validation_cel_test.go index 7e0b0aab0f1b..9ac288cb3454 100644 --- a/pkg/apis/v1beta1/nodepool_validation_cel_test.go +++ b/pkg/apis/v1beta1/nodepool_validation_cel_test.go @@ -20,35 +20,35 @@ import ( "github.com/Pallinder/go-randomdata" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" ) var _ = Describe("CEL/Validation", func() { - var nodePool *v1beta1.NodePool + var nodePool *karpv1beta1.NodePool BeforeEach(func() { if env.Version.Minor() < 25 { Skip("CEL Validation is for 1.25>") } - nodePool = &v1beta1.NodePool{ + nodePool = &karpv1beta1.NodePool{ ObjectMeta: metav1.ObjectMeta{Name: strings.ToLower(randomdata.SillyName())}, - Spec: v1beta1.NodePoolSpec{ - Template: v1beta1.NodeClaimTemplate{ - Spec: v1beta1.NodeClaimSpec{ - NodeClassRef: &v1beta1.NodeClassReference{ + Spec: karpv1beta1.NodePoolSpec{ + Template: karpv1beta1.NodeClaimTemplate{ + Spec: karpv1beta1.NodeClaimSpec{ + NodeClassRef: &karpv1beta1.NodeClassReference{ APIVersion: "karpenter.k8s.aws/v1beta1", Kind: "EC2NodeClass", Name: "default", }, - Requirements: []v1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1beta1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpExists, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1beta1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpExists, }, }, }, @@ -60,9 +60,9 @@ var _ = Describe("CEL/Validation", func() { Context("Requirements", func() { It("should allow restricted domains exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.LabelDomainExceptions { - nodePool.Spec.Template.Spec.Requirements = []v1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + for label := range karpv1beta1.LabelDomainExceptions { + nodePool.Spec.Template.Spec.Requirements = []karpv1beta1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: label + "/test", Operator: corev1.NodeSelectorOpIn, Values: []string{"test"}}}, } Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) Expect(nodePool.RuntimeValidate()).To(Succeed()) @@ -72,9 +72,9 @@ var _ = Describe("CEL/Validation", func() { }) It("should allow well known label exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) { - nodePool.Spec.Template.Spec.Requirements = []v1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label, Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, + for label := range karpv1beta1.WellKnownLabels.Difference(sets.New(karpv1beta1.NodePoolLabelKey)) { + nodePool.Spec.Template.Spec.Requirements = []karpv1beta1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: label, Operator: corev1.NodeSelectorOpIn, Values: []string{"test"}}}, } Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) Expect(nodePool.RuntimeValidate()).To(Succeed()) @@ -86,7 +86,7 @@ var _ = Describe("CEL/Validation", func() { Context("Labels", func() { It("should allow restricted domains exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.LabelDomainExceptions { + for label := range karpv1beta1.LabelDomainExceptions { nodePool.Spec.Template.Labels = map[string]string{ label: "test", } @@ -98,7 +98,7 @@ var _ = Describe("CEL/Validation", func() { }) It("should allow well known label exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range v1beta1.WellKnownLabels.Difference(sets.New(v1beta1.NodePoolLabelKey)) { + for label := range karpv1beta1.WellKnownLabels.Difference(sets.New(karpv1beta1.NodePoolLabelKey)) { nodePool.Spec.Template.Labels = map[string]string{ label: "test", } diff --git a/pkg/cloudprovider/cloudprovider.go b/pkg/cloudprovider/cloudprovider.go index c9321a2a3625..565c5fe86411 100644 --- a/pkg/cloudprovider/cloudprovider.go +++ b/pkg/cloudprovider/cloudprovider.go @@ -28,17 +28,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" coreapis "sigs.k8s.io/karpenter/pkg/apis" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/events" "sigs.k8s.io/karpenter/pkg/scheduling" "sigs.k8s.io/karpenter/pkg/utils/resources" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/utils" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -77,7 +77,7 @@ func New(instanceTypeProvider instancetype.Provider, instanceProvider instance.P } // Create a NodeClaim given the constraints. -func (c *CloudProvider) Create(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) (*corev1beta1.NodeClaim, error) { +func (c *CloudProvider) Create(ctx context.Context, nodeClaim *karpv1.NodeClaim) (*karpv1.NodeClaim, error) { nodeClass, err := c.resolveNodeClassFromNodeClaim(ctx, nodeClaim) if err != nil { if errors.IsNotFound(err) { @@ -106,18 +106,18 @@ func (c *CloudProvider) Create(ctx context.Context, nodeClaim *corev1beta1.NodeC }) nc := c.instanceToNodeClaim(instance, instanceType, nodeClass) nc.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash(), - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHash: nodeClass.Hash(), + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, }) return nc, nil } -func (c *CloudProvider) List(ctx context.Context) ([]*corev1beta1.NodeClaim, error) { +func (c *CloudProvider) List(ctx context.Context) ([]*karpv1.NodeClaim, error) { instances, err := c.instanceProvider.List(ctx) if err != nil { return nil, fmt.Errorf("listing instances, %w", err) } - var nodeClaims []*corev1beta1.NodeClaim + var nodeClaims []*karpv1.NodeClaim for _, instance := range instances { instanceType, err := c.resolveInstanceTypeFromInstance(ctx, instance) if err != nil { @@ -132,7 +132,7 @@ func (c *CloudProvider) List(ctx context.Context) ([]*corev1beta1.NodeClaim, err return nodeClaims, nil } -func (c *CloudProvider) Get(ctx context.Context, providerID string) (*corev1beta1.NodeClaim, error) { +func (c *CloudProvider) Get(ctx context.Context, providerID string) (*karpv1.NodeClaim, error) { id, err := utils.ParseInstanceID(providerID) if err != nil { return nil, fmt.Errorf("getting instance ID, %w", err) @@ -158,7 +158,7 @@ func (c *CloudProvider) LivenessProbe(req *http.Request) error { } // GetInstanceTypes returns all available InstanceTypes -func (c *CloudProvider) GetInstanceTypes(ctx context.Context, nodePool *corev1beta1.NodePool) ([]*cloudprovider.InstanceType, error) { +func (c *CloudProvider) GetInstanceTypes(ctx context.Context, nodePool *karpv1.NodePool) ([]*cloudprovider.InstanceType, error) { nodeClass, err := c.resolveNodeClassFromNodePool(ctx, nodePool) if err != nil { if errors.IsNotFound(err) { @@ -169,15 +169,19 @@ func (c *CloudProvider) GetInstanceTypes(ctx context.Context, nodePool *corev1be // as the cause. return nil, fmt.Errorf("resolving node class, %w", err) } + kubeletConfig, err := utils.GetKubletConfigurationWithNodePool(nodePool, nodeClass) + if err != nil { + return nil, fmt.Errorf("resolving kubelet configuration, %w", err) + } // TODO, break this coupling - instanceTypes, err := c.instanceTypeProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instanceTypes, err := c.instanceTypeProvider.List(ctx, kubeletConfig, nodeClass) if err != nil { return nil, err } return instanceTypes, nil } -func (c *CloudProvider) Delete(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) error { +func (c *CloudProvider) Delete(ctx context.Context, nodeClaim *karpv1.NodeClaim) error { id, err := utils.ParseInstanceID(nodeClaim.Status.ProviderID) if err != nil { return fmt.Errorf("getting instance ID, %w", err) @@ -186,13 +190,13 @@ func (c *CloudProvider) Delete(ctx context.Context, nodeClaim *corev1beta1.NodeC return c.instanceProvider.Delete(ctx, id) } -func (c *CloudProvider) IsDrifted(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) (cloudprovider.DriftReason, error) { +func (c *CloudProvider) IsDrifted(ctx context.Context, nodeClaim *karpv1.NodeClaim) (cloudprovider.DriftReason, error) { // Not needed when GetInstanceTypes removes nodepool dependency - nodePoolName, ok := nodeClaim.Labels[corev1beta1.NodePoolLabelKey] + nodePoolName, ok := nodeClaim.Labels[karpv1.NodePoolLabelKey] if !ok { return "", nil } - nodePool := &corev1beta1.NodePool{} + nodePool := &karpv1.NodePool{} if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: nodePoolName}, nodePool); err != nil { return "", client.IgnoreNotFound(err) } @@ -219,11 +223,11 @@ func (c *CloudProvider) Name() string { } func (c *CloudProvider) GetSupportedNodeClasses() []status.Object { - return []status.Object{&v1beta1.EC2NodeClass{}} + return []status.Object{&v1.EC2NodeClass{}} } -func (c *CloudProvider) resolveNodeClassFromNodeClaim(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) (*v1beta1.EC2NodeClass, error) { - nodeClass := &v1beta1.EC2NodeClass{} +func (c *CloudProvider) resolveNodeClassFromNodeClaim(ctx context.Context, nodeClaim *karpv1.NodeClaim) (*v1.EC2NodeClass, error) { + nodeClass := &v1.EC2NodeClass{} if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: nodeClaim.Spec.NodeClassRef.Name}, nodeClass); err != nil { return nil, err } @@ -236,8 +240,8 @@ func (c *CloudProvider) resolveNodeClassFromNodeClaim(ctx context.Context, nodeC return nodeClass, nil } -func (c *CloudProvider) resolveNodeClassFromNodePool(ctx context.Context, nodePool *corev1beta1.NodePool) (*v1beta1.EC2NodeClass, error) { - nodeClass := &v1beta1.EC2NodeClass{} +func (c *CloudProvider) resolveNodeClassFromNodePool(ctx context.Context, nodePool *karpv1.NodePool) (*v1.EC2NodeClass, error) { + nodeClass := &v1.EC2NodeClass{} if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: nodePool.Spec.Template.Spec.NodeClassRef.Name}, nodeClass); err != nil { return nil, err } @@ -249,8 +253,12 @@ func (c *CloudProvider) resolveNodeClassFromNodePool(ctx context.Context, nodePo return nodeClass, nil } -func (c *CloudProvider) resolveInstanceTypes(ctx context.Context, nodeClaim *corev1beta1.NodeClaim, nodeClass *v1beta1.EC2NodeClass) ([]*cloudprovider.InstanceType, error) { - instanceTypes, err := c.instanceTypeProvider.List(ctx, nodeClaim.Spec.Kubelet, nodeClass) +func (c *CloudProvider) resolveInstanceTypes(ctx context.Context, nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass) ([]*cloudprovider.InstanceType, error) { + kubeletConfig, err := utils.GetKubeletConfigurationWithNodeClaim(nodeClaim, nodeClass) + if err != nil { + return nil, fmt.Errorf("resovling kubelet configuration, %w", err) + } + instanceTypes, err := c.instanceTypeProvider.List(ctx, kubeletConfig, nodeClass) if err != nil { return nil, fmt.Errorf("getting instance types, %w", err) } @@ -279,7 +287,7 @@ func (c *CloudProvider) resolveInstanceTypeFromInstance(ctx context.Context, ins return instanceType, nil } -func (c *CloudProvider) resolveNodeClassFromInstance(ctx context.Context, instance *instance.Instance) (*v1beta1.EC2NodeClass, error) { +func (c *CloudProvider) resolveNodeClassFromInstance(ctx context.Context, instance *instance.Instance) (*v1.EC2NodeClass, error) { np, err := c.resolveNodePoolFromInstance(ctx, instance) if err != nil { return nil, fmt.Errorf("resolving nodepool, %w", err) @@ -287,9 +295,9 @@ func (c *CloudProvider) resolveNodeClassFromInstance(ctx context.Context, instan return c.resolveNodeClassFromNodePool(ctx, np) } -func (c *CloudProvider) resolveNodePoolFromInstance(ctx context.Context, instance *instance.Instance) (*corev1beta1.NodePool, error) { - if nodePoolName, ok := instance.Tags[corev1beta1.NodePoolLabelKey]; ok { - nodePool := &corev1beta1.NodePool{} +func (c *CloudProvider) resolveNodePoolFromInstance(ctx context.Context, instance *instance.Instance) (*karpv1.NodePool, error) { + if nodePoolName, ok := instance.Tags[karpv1.NodePoolLabelKey]; ok { + nodePool := &karpv1.NodePool{} if err := c.kubeClient.Get(ctx, types.NamespacedName{Name: nodePoolName}, nodePool); err != nil { return nil, err } @@ -299,8 +307,8 @@ func (c *CloudProvider) resolveNodePoolFromInstance(ctx context.Context, instanc } //nolint:gocyclo -func (c *CloudProvider) instanceToNodeClaim(i *instance.Instance, instanceType *cloudprovider.InstanceType, nodeClass *v1beta1.EC2NodeClass) *corev1beta1.NodeClaim { - nodeClaim := &corev1beta1.NodeClaim{} +func (c *CloudProvider) instanceToNodeClaim(i *instance.Instance, instanceType *cloudprovider.InstanceType, nodeClass *v1.EC2NodeClass) *karpv1.NodeClaim { + nodeClaim := &karpv1.NodeClaim{} labels := map[string]string{} annotations := map[string]string{} @@ -310,13 +318,13 @@ func (c *CloudProvider) instanceToNodeClaim(i *instance.Instance, instanceType * labels[key] = req.Values()[0] } } - resourceFilter := func(n v1.ResourceName, v resource.Quantity) bool { + resourceFilter := func(n corev1.ResourceName, v resource.Quantity) bool { if resources.IsZero(v) { return false } // The nodeclaim should only advertise an EFA resource if it was requested. EFA network interfaces are only // added to the launch template if they're requested, otherwise the instance is launched with a normal ENI. - if n == v1beta1.ResourceEFA { + if n == v1.ResourceEFA { return i.EFAEnabled } return true @@ -324,24 +332,24 @@ func (c *CloudProvider) instanceToNodeClaim(i *instance.Instance, instanceType * nodeClaim.Status.Capacity = lo.PickBy(instanceType.Capacity, resourceFilter) nodeClaim.Status.Allocatable = lo.PickBy(instanceType.Allocatable(), resourceFilter) } - labels[v1.LabelTopologyZone] = i.Zone + labels[corev1.LabelTopologyZone] = i.Zone // Attempt to resolve the zoneID from the instance's EC2NodeClass' status condition. // If the EC2NodeClass is nil, we know we're in the List or Get paths, where we don't care about the zone-id value. // If we're in the Create path, we've already validated the EC2NodeClass exists. In this case, we resolve the zone-id from the status condition // both when creating offerings and when adding the label. if nodeClass != nil { - if subnet, ok := lo.Find(nodeClass.Status.Subnets, func(s v1beta1.Subnet) bool { + if subnet, ok := lo.Find(nodeClass.Status.Subnets, func(s v1.Subnet) bool { return s.Zone == i.Zone }); ok && subnet.ZoneID != "" { - labels[v1beta1.LabelTopologyZoneID] = subnet.ZoneID + labels[v1.LabelTopologyZoneID] = subnet.ZoneID } } - labels[corev1beta1.CapacityTypeLabelKey] = i.CapacityType - if v, ok := i.Tags[corev1beta1.NodePoolLabelKey]; ok { - labels[corev1beta1.NodePoolLabelKey] = v + labels[karpv1.CapacityTypeLabelKey] = i.CapacityType + if v, ok := i.Tags[karpv1.NodePoolLabelKey]; ok { + labels[karpv1.NodePoolLabelKey] = v } - if v, ok := i.Tags[corev1beta1.ManagedByAnnotationKey]; ok { - annotations[corev1beta1.ManagedByAnnotationKey] = v + if v, ok := i.Tags[karpv1.ManagedByAnnotationKey]; ok { + annotations[karpv1.ManagedByAnnotationKey] = v } nodeClaim.Labels = labels nodeClaim.Annotations = annotations diff --git a/pkg/cloudprovider/drift.go b/pkg/cloudprovider/drift.go index 5c87fdb62da9..0f58e6d06b26 100644 --- a/pkg/cloudprovider/drift.go +++ b/pkg/cloudprovider/drift.go @@ -19,13 +19,13 @@ import ( "fmt" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" "github.com/aws/karpenter-provider-aws/pkg/providers/instance" "github.com/aws/karpenter-provider-aws/pkg/utils" @@ -38,7 +38,7 @@ const ( NodeClassDrift cloudprovider.DriftReason = "NodeClassDrift" ) -func (c *CloudProvider) isNodeClassDrifted(ctx context.Context, nodeClaim *corev1beta1.NodeClaim, nodePool *corev1beta1.NodePool, nodeClass *v1beta1.EC2NodeClass) (cloudprovider.DriftReason, error) { +func (c *CloudProvider) isNodeClassDrifted(ctx context.Context, nodeClaim *karpv1.NodeClaim, nodePool *karpv1.NodePool, nodeClass *v1.EC2NodeClass) (cloudprovider.DriftReason, error) { // First check if the node class is statically drifted to save on API calls. if drifted := c.areStaticFieldsDrifted(nodeClaim, nodeClass); drifted != "" { return drifted, nil @@ -65,17 +65,17 @@ func (c *CloudProvider) isNodeClassDrifted(ctx context.Context, nodeClaim *corev return drifted, nil } -func (c *CloudProvider) isAMIDrifted(ctx context.Context, nodeClaim *corev1beta1.NodeClaim, nodePool *corev1beta1.NodePool, - instance *instance.Instance, nodeClass *v1beta1.EC2NodeClass) (cloudprovider.DriftReason, error) { +func (c *CloudProvider) isAMIDrifted(ctx context.Context, nodeClaim *karpv1.NodeClaim, nodePool *karpv1.NodePool, + instance *instance.Instance, nodeClass *v1.EC2NodeClass) (cloudprovider.DriftReason, error) { instanceTypes, err := c.GetInstanceTypes(ctx, nodePool) if err != nil { return "", fmt.Errorf("getting instanceTypes, %w", err) } nodeInstanceType, found := lo.Find(instanceTypes, func(instType *cloudprovider.InstanceType) bool { - return instType.Name == nodeClaim.Labels[v1.LabelInstanceTypeStable] + return instType.Name == nodeClaim.Labels[corev1.LabelInstanceTypeStable] }) if !found { - return "", fmt.Errorf(`finding node instance type "%s"`, nodeClaim.Labels[v1.LabelInstanceTypeStable]) + return "", fmt.Errorf(`finding node instance type "%s"`, nodeClaim.Labels[corev1.LabelInstanceTypeStable]) } if len(nodeClass.Status.AMIs) == 0 { return "", fmt.Errorf("no amis exist given constraints") @@ -89,13 +89,13 @@ func (c *CloudProvider) isAMIDrifted(ctx context.Context, nodeClaim *corev1beta1 // Checks if the security groups are drifted, by comparing the subnet returned from the subnetProvider // to the ec2 instance subnets -func (c *CloudProvider) isSubnetDrifted(instance *instance.Instance, nodeClass *v1beta1.EC2NodeClass) (cloudprovider.DriftReason, error) { +func (c *CloudProvider) isSubnetDrifted(instance *instance.Instance, nodeClass *v1.EC2NodeClass) (cloudprovider.DriftReason, error) { // subnets need to be found to check for drift if len(nodeClass.Status.Subnets) == 0 { return "", fmt.Errorf("no subnets are discovered") } - _, found := lo.Find(nodeClass.Status.Subnets, func(subnet v1beta1.Subnet) bool { + _, found := lo.Find(nodeClass.Status.Subnets, func(subnet v1.Subnet) bool { return subnet.ID == instance.SubnetID }) @@ -107,8 +107,8 @@ func (c *CloudProvider) isSubnetDrifted(instance *instance.Instance, nodeClass * // Checks if the security groups are drifted, by comparing the security groups returned from the SecurityGroupProvider // to the ec2 instance security groups -func (c *CloudProvider) areSecurityGroupsDrifted(ec2Instance *instance.Instance, nodeClass *v1beta1.EC2NodeClass) (cloudprovider.DriftReason, error) { - securityGroupIds := sets.New(lo.Map(nodeClass.Status.SecurityGroups, func(sg v1beta1.SecurityGroup, _ int) string { return sg.ID })...) +func (c *CloudProvider) areSecurityGroupsDrifted(ec2Instance *instance.Instance, nodeClass *v1.EC2NodeClass) (cloudprovider.DriftReason, error) { + securityGroupIds := sets.New(lo.Map(nodeClass.Status.SecurityGroups, func(sg v1.SecurityGroup, _ int) string { return sg.ID })...) if len(securityGroupIds) == 0 { return "", fmt.Errorf("no security groups are present in the status") } @@ -119,11 +119,11 @@ func (c *CloudProvider) areSecurityGroupsDrifted(ec2Instance *instance.Instance, return "", nil } -func (c *CloudProvider) areStaticFieldsDrifted(nodeClaim *corev1beta1.NodeClaim, nodeClass *v1beta1.EC2NodeClass) cloudprovider.DriftReason { - nodeClassHash, foundNodeClassHash := nodeClass.Annotations[v1beta1.AnnotationEC2NodeClassHash] - nodeClassHashVersion, foundNodeClassHashVersion := nodeClass.Annotations[v1beta1.AnnotationEC2NodeClassHashVersion] - nodeClaimHash, foundNodeClaimHash := nodeClaim.Annotations[v1beta1.AnnotationEC2NodeClassHash] - nodeClaimHashVersion, foundNodeClaimHashVersion := nodeClaim.Annotations[v1beta1.AnnotationEC2NodeClassHashVersion] +func (c *CloudProvider) areStaticFieldsDrifted(nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass) cloudprovider.DriftReason { + nodeClassHash, foundNodeClassHash := nodeClass.Annotations[v1.AnnotationEC2NodeClassHash] + nodeClassHashVersion, foundNodeClassHashVersion := nodeClass.Annotations[v1.AnnotationEC2NodeClassHashVersion] + nodeClaimHash, foundNodeClaimHash := nodeClaim.Annotations[v1.AnnotationEC2NodeClassHash] + nodeClaimHashVersion, foundNodeClaimHashVersion := nodeClaim.Annotations[v1.AnnotationEC2NodeClassHashVersion] if !foundNodeClassHash || !foundNodeClaimHash || !foundNodeClassHashVersion || !foundNodeClaimHashVersion { return "" diff --git a/pkg/cloudprovider/events/events.go b/pkg/cloudprovider/events/events.go index f571bc2baed2..e5c167f031c0 100644 --- a/pkg/cloudprovider/events/events.go +++ b/pkg/cloudprovider/events/events.go @@ -15,25 +15,25 @@ limitations under the License. package events import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + v1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/events" ) -func NodePoolFailedToResolveNodeClass(nodePool *v1beta1.NodePool) events.Event { +func NodePoolFailedToResolveNodeClass(nodePool *v1.NodePool) events.Event { return events.Event{ InvolvedObject: nodePool, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Message: "Failed resolving NodeClass", DedupeValues: []string{string(nodePool.UID)}, } } -func NodeClaimFailedToResolveNodeClass(nodeClaim *v1beta1.NodeClaim) events.Event { +func NodeClaimFailedToResolveNodeClass(nodeClaim *v1.NodeClaim) events.Event { return events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Message: "Failed resolving NodeClass", DedupeValues: []string{string(nodeClaim.UID)}, } diff --git a/pkg/cloudprovider/suite_test.go b/pkg/cloudprovider/suite_test.go index c50f82963fca..f28739327e15 100644 --- a/pkg/cloudprovider/suite_test.go +++ b/pkg/cloudprovider/suite_test.go @@ -25,7 +25,7 @@ import ( "sigs.k8s.io/karpenter/pkg/test/v1alpha1" "github.com/awslabs/operatorpkg/object" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -40,14 +40,14 @@ import ( "github.com/samber/lo" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" corecloudproivder "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/controllers/provisioning" "sigs.k8s.io/karpenter/pkg/controllers/state" @@ -113,15 +113,15 @@ var _ = AfterEach(func() { }) var _ = Describe("CloudProvider", func() { - var nodeClass *v1beta1.EC2NodeClass - var nodePool *corev1beta1.NodePool - var nodeClaim *corev1beta1.NodeClaim + var nodeClass *v1.EC2NodeClass + var nodePool *karpv1.NodePool + var nodeClaim *karpv1.NodeClaim var _ = BeforeEach(func() { nodeClass = test.EC2NodeClass( - v1beta1.EC2NodeClass{ - Status: v1beta1.EC2NodeClassStatus{ + v1.EC2NodeClass{ + Status: v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", - SecurityGroups: []v1beta1.SecurityGroup{ + SecurityGroups: []v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -135,7 +135,7 @@ var _ = Describe("CloudProvider", func() { Name: "securityGroup-test3", }, }, - Subnets: []v1beta1.Subnet{ + Subnets: []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -156,38 +156,38 @@ var _ = Describe("CloudProvider", func() { }, ) nodeClass.StatusConditions().SetTrue(opstatus.ConditionReady) - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: corev1beta1.CapacityTypeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.CapacityTypeOnDemand}}}, + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: karpv1.CapacityTypeLabelKey, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.CapacityTypeOnDemand}}}, }, }, }, }, }) - nodeClaim = coretest.NodeClaim(corev1beta1.NodeClaim{ + nodeClaim = coretest.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{corev1beta1.NodePoolLabelKey: nodePool.Name}, + Labels: map[string]string{karpv1.NodePoolLabelKey: nodePool.Name}, }, - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, @@ -206,11 +206,11 @@ var _ = Describe("CloudProvider", func() { }) It("should return an ICE error when there are no instance types to launch", func() { // Specify no instance types and expect to receive a capacity error - nodeClaim.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"test-instance-type"}, }, }, @@ -232,11 +232,11 @@ var _ = Describe("CloudProvider", func() { cloudProviderNodeClaim, err := cloudProvider.Create(ctx, nodeClaim) Expect(err).ToNot(HaveOccurred()) Expect(cloudProviderNodeClaim).ToNot(BeNil()) - zone, ok := cloudProviderNodeClaim.GetLabels()[v1.LabelTopologyZone] + zone, ok := cloudProviderNodeClaim.GetLabels()[corev1.LabelTopologyZone] Expect(ok).To(BeTrue()) - zoneID, ok := cloudProviderNodeClaim.GetLabels()[v1beta1.LabelTopologyZoneID] + zoneID, ok := cloudProviderNodeClaim.GetLabels()[v1.LabelTopologyZoneID] Expect(ok).To(BeTrue()) - subnet, ok := lo.Find(nodeClass.Status.Subnets, func(s v1beta1.Subnet) bool { + subnet, ok := lo.Find(nodeClass.Status.Subnets, func(s v1.Subnet) bool { return s.Zone == zone }) Expect(ok).To(BeTrue()) @@ -247,7 +247,7 @@ var _ = Describe("CloudProvider", func() { cloudProviderNodeClaim, err := cloudProvider.Create(ctx, nodeClaim) Expect(err).To(BeNil()) Expect(cloudProviderNodeClaim).ToNot(BeNil()) - _, ok := cloudProviderNodeClaim.ObjectMeta.Annotations[v1beta1.AnnotationEC2NodeClassHash] + _, ok := cloudProviderNodeClaim.ObjectMeta.Annotations[v1.AnnotationEC2NodeClassHash] Expect(ok).To(BeTrue()) }) It("should return NodeClass Hash Version on the nodeClaim", func() { @@ -255,9 +255,9 @@ var _ = Describe("CloudProvider", func() { cloudProviderNodeClaim, err := cloudProvider.Create(ctx, nodeClaim) Expect(err).To(BeNil()) Expect(cloudProviderNodeClaim).ToNot(BeNil()) - v, ok := cloudProviderNodeClaim.ObjectMeta.Annotations[v1beta1.AnnotationEC2NodeClassHashVersion] + v, ok := cloudProviderNodeClaim.ObjectMeta.Annotations[v1.AnnotationEC2NodeClassHashVersion] Expect(ok).To(BeTrue()) - Expect(v).To(Equal(v1beta1.EC2NodeClassHashVersion)) + Expect(v).To(Equal(v1.EC2NodeClassHashVersion)) }) Context("EC2 Context", func() { contextID := "context-1234" @@ -316,27 +316,27 @@ var _ = Describe("CloudProvider", func() { instanceNames := lo.Map(instances, func(info *ec2.InstanceTypeInfo, _ int) string { return *info.InstanceType }) // Define NodePool that has minValues on instance-type requirement. - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: instanceNames, }, MinValues: lo.ToPtr(2), @@ -352,14 +352,14 @@ var _ = Describe("CloudProvider", func() { // 2 pods are created with resources such that both fit together only in one of the 2 InstanceTypes created above. pod1 := coretest.UnschedulablePod( coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("0.9")}, + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.9")}, }, }) pod2 := coretest.UnschedulablePod( coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("0.9")}, + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.9")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod1, pod2) @@ -414,27 +414,27 @@ var _ = Describe("CloudProvider", func() { instanceNames := lo.Map(instances, func(info *ec2.InstanceTypeInfo, _ int) string { return *info.InstanceType }) // Define NodePool that has minValues on instance-type requirement. - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpExists, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpExists, }, MinValues: lo.ToPtr(2), }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: instanceNames, }, MinValues: lo.ToPtr(1), @@ -450,14 +450,14 @@ var _ = Describe("CloudProvider", func() { // 2 pods are created with resources such that both fit together only in one of the 2 InstanceTypes created above. pod1 := coretest.UnschedulablePod( coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("0.9")}, + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.9")}, }, }) pod2 := coretest.UnschedulablePod( coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("0.9")}, + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.9")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod1, pod2) @@ -519,29 +519,29 @@ var _ = Describe("CloudProvider", func() { instanceNames := lo.Map(uniqInstanceTypes, func(info *ec2.InstanceTypeInfo, _ int) string { return *info.InstanceType }) // Define NodePool that has minValues in multiple requirements. - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: instanceNames, }, // consider at least 2 unique instance types MinValues: lo.ToPtr(2), }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceFamily, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceFamily, + Operator: corev1.NodeSelectorOpIn, Values: instanceFamilies.UnsortedList(), }, // consider at least 3 unique instance families @@ -556,14 +556,14 @@ var _ = Describe("CloudProvider", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod1 := coretest.UnschedulablePod( coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("0.9")}, + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.9")}, }, }) pod2 := coretest.UnschedulablePod( coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("0.9")}, + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.9")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod1, pod2) @@ -670,9 +670,9 @@ var _ = Describe("CloudProvider", func() { }, }, }) - nodeClass.Status = v1beta1.EC2NodeClassStatus{ + nodeClass.Status = v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", - Subnets: []v1beta1.Subnet{ + Subnets: []v1.Subnet{ { ID: validSubnet1, Zone: "zone-1", @@ -682,22 +682,22 @@ var _ = Describe("CloudProvider", func() { Zone: "zone-2", }, }, - SecurityGroups: []v1beta1.SecurityGroup{ + SecurityGroups: []v1.SecurityGroup{ { ID: validSecurityGroup, }, }, - AMIs: []v1beta1.AMI{ + AMIs: []v1.AMI{ { ID: armAMIID, - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureArm64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureArm64}}, }, }, { ID: amdAMIID, - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, }, }, }, @@ -708,7 +708,7 @@ var _ = Describe("CloudProvider", func() { var ok bool selectedInstanceType, ok = lo.Find(instanceTypes, func(i *corecloudproivder.InstanceType) bool { return i.Requirements.Compatible(scheduling.NewLabelRequirements(map[string]string{ - v1.LabelArchStable: corev1beta1.ArchitectureAmd64, + corev1.LabelArchStable: karpv1.ArchitectureAmd64, })) == nil }) Expect(ok).To(BeTrue()) @@ -732,15 +732,15 @@ var _ = Describe("CloudProvider", func() { Reservations: []*ec2.Reservation{{Instances: []*ec2.Instance{instance}}}, }) nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash(), - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHash: nodeClass.Hash(), + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, }) nodeClaim.Status.ProviderID = fake.ProviderID(lo.FromPtr(instance.InstanceId)) nodeClaim.Annotations = lo.Assign(nodeClaim.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash(), - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHash: nodeClass.Hash(), + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, }) - nodeClaim.Labels = lo.Assign(nodeClaim.Labels, map[string]string{v1.LabelInstanceTypeStable: selectedInstanceType.Name}) + nodeClaim.Labels = lo.Assign(nodeClaim.Labels, map[string]string{corev1.LabelInstanceTypeStable: selectedInstanceType.Name}) }) It("should not fail if NodeClass does not exist", func() { ExpectDeleted(ctx, env.Client, nodeClass) @@ -768,7 +768,7 @@ var _ = Describe("CloudProvider", func() { instance.SecurityGroups = []*ec2.GroupIdentifier{{GroupId: aws.String(fake.SecurityGroupID())}} // Assign a fake hash nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "abcdefghijkl", + v1.AnnotationEC2NodeClassHash: "abcdefghijkl", }) ExpectApplied(ctx, env.Client, nodeClass) isDrifted, err := cloudProvider.IsDrifted(ctx, nodeClaim) @@ -783,7 +783,7 @@ var _ = Describe("CloudProvider", func() { }) It("should return an error if subnets are empty", func() { awsEnv.SubnetCache.Flush() - nodeClass.Status.Subnets = []v1beta1.Subnet{} + nodeClass.Status.Subnets = []v1.Subnet{} ExpectApplied(ctx, env.Client, nodeClass) _, err := cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).To(HaveOccurred()) @@ -794,7 +794,7 @@ var _ = Describe("CloudProvider", func() { Expect(isDrifted).To(BeEmpty()) }) It("should return an error if the security groups are empty", func() { - nodeClass.Status.SecurityGroups = []v1beta1.SecurityGroup{} + nodeClass.Status.SecurityGroups = []v1.SecurityGroup{} ExpectApplied(ctx, env.Client, nodeClass) // Instance is a reference to what we return in the GetInstances call instance.SecurityGroups = []*ec2.GroupIdentifier{{GroupId: aws.String(fake.SecurityGroupID())}} @@ -816,7 +816,7 @@ var _ = Describe("CloudProvider", func() { Expect(isDrifted).To(Equal(cloudprovider.SecurityGroupDrift)) }) It("should return drifted if more security groups are present than instance security groups then discovered from nodeclass", func() { - nodeClass.Status.SecurityGroups = []v1beta1.SecurityGroup{ + nodeClass.Status.SecurityGroups = []v1.SecurityGroup{ { ID: validSecurityGroup, Name: "test-securitygroup", @@ -837,12 +837,12 @@ var _ = Describe("CloudProvider", func() { Expect(isDrifted).To(BeEmpty()) }) It("should error if the NodeClaim doesn't have the instance-type label", func() { - delete(nodeClaim.Labels, v1.LabelInstanceTypeStable) + delete(nodeClaim.Labels, corev1.LabelInstanceTypeStable) _, err := cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).To(HaveOccurred()) }) It("should error drift if NodeClaim doesn't have provider id", func() { - nodeClaim.Status = corev1beta1.NodeClaimStatus{} + nodeClaim.Status = karpv1.NodeClaimStatus{} isDrifted, err := cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).To(HaveOccurred()) Expect(isDrifted).To(BeEmpty()) @@ -855,12 +855,12 @@ var _ = Describe("CloudProvider", func() { Expect(err).To(HaveOccurred()) }) It("should return drifted if the AMI no longer matches the existing NodeClaims instance type", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: amdAMIID}} - nodeClass.Status.AMIs = []v1beta1.AMI{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: amdAMIID}} + nodeClass.Status.AMIs = []v1.AMI{ { ID: amdAMIID, - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, }, }, } @@ -872,15 +872,15 @@ var _ = Describe("CloudProvider", func() { }) Context("Static Drift Detection", func() { BeforeEach(func() { - armRequirements := []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureArm64}}, + armRequirements := []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureArm64}}, } - amdRequirements := []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, + amdRequirements := []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, } - nodeClass = &v1beta1.EC2NodeClass{ + nodeClass = &v1.EC2NodeClass{ ObjectMeta: nodeClass.ObjectMeta, - Spec: v1beta1.EC2NodeClassSpec{ + Spec: v1.EC2NodeClassSpec{ SubnetSelectorTerms: nodeClass.Spec.SubnetSelectorTerms, SecurityGroupSelectorTerms: nodeClass.Spec.SecurityGroupSelectorTerms, Role: nodeClass.Spec.Role, @@ -890,19 +890,19 @@ var _ = Describe("CloudProvider", func() { }, Context: lo.ToPtr("fake-context"), DetailedMonitoring: lo.ToPtr(false), - AMIFamily: lo.ToPtr(v1beta1.AMIFamilyAL2023), + AMIFamily: lo.ToPtr(v1.AMIFamilyAL2023), AssociatePublicIPAddress: lo.ToPtr(false), - MetadataOptions: &v1beta1.MetadataOptions{ + MetadataOptions: &v1.MetadataOptions{ HTTPEndpoint: lo.ToPtr("disabled"), HTTPProtocolIPv6: lo.ToPtr("disabled"), HTTPPutResponseHopLimit: lo.ToPtr(int64(1)), HTTPTokens: lo.ToPtr("optional"), }, - BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + BlockDeviceMappings: []*v1.BlockDeviceMapping{ { DeviceName: lo.ToPtr("fakeName"), RootVolume: false, - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ DeleteOnTermination: lo.ToPtr(false), Encrypted: lo.ToPtr(false), IOPS: lo.ToPtr(int64(0)), @@ -915,9 +915,9 @@ var _ = Describe("CloudProvider", func() { }, }, }, - Status: v1beta1.EC2NodeClassStatus{ + Status: v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", - Subnets: []v1beta1.Subnet{ + Subnets: []v1.Subnet{ { ID: validSubnet1, Zone: "zone-1", @@ -927,12 +927,12 @@ var _ = Describe("CloudProvider", func() { Zone: "zone-2", }, }, - SecurityGroups: []v1beta1.SecurityGroup{ + SecurityGroups: []v1.SecurityGroup{ { ID: validSecurityGroup, }, }, - AMIs: []v1beta1.AMI{ + AMIs: []v1.AMI{ { ID: armAMIID, Requirements: armRequirements, @@ -944,50 +944,50 @@ var _ = Describe("CloudProvider", func() { }, }, } - nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) - nodeClaim.Annotations = lo.Assign(nodeClaim.Annotations, map[string]string{v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) + nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) + nodeClaim.Annotations = lo.Assign(nodeClaim.Annotations, map[string]string{v1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) }) DescribeTable("should return drifted if a statically drifted EC2NodeClass.Spec field is updated", - func(changes v1beta1.EC2NodeClass) { + func(changes v1.EC2NodeClass) { ExpectApplied(ctx, env.Client, nodePool, nodeClass) isDrifted, err := cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).NotTo(HaveOccurred()) Expect(isDrifted).To(BeEmpty()) Expect(mergo.Merge(nodeClass, changes, mergo.WithOverride, mergo.WithSliceDeepCopy)).To(Succeed()) - nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) + nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) ExpectApplied(ctx, env.Client, nodeClass) isDrifted, err = cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).NotTo(HaveOccurred()) Expect(isDrifted).To(Equal(cloudprovider.NodeClassDrift)) }, - Entry("UserData", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{UserData: lo.ToPtr("userdata-test-2")}}), - Entry("Tags", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}), - Entry("Context", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{Context: lo.ToPtr("context-2")}}), - Entry("DetailedMonitoring", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{DetailedMonitoring: aws.Bool(true)}}), - Entry("AMIFamily", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{AMIFamily: lo.ToPtr(v1beta1.AMIFamilyBottlerocket)}}), - Entry("InstanceStorePolicy", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{InstanceStorePolicy: lo.ToPtr(v1beta1.InstanceStorePolicyRAID0)}}), - Entry("AssociatePublicIPAddress", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{AssociatePublicIPAddress: lo.ToPtr(true)}}), - Entry("MetadataOptions HTTPEndpoint", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{MetadataOptions: &v1beta1.MetadataOptions{HTTPEndpoint: lo.ToPtr("enabled")}}}), - Entry("MetadataOptions HTTPProtocolIPv6", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{MetadataOptions: &v1beta1.MetadataOptions{HTTPProtocolIPv6: lo.ToPtr("enabled")}}}), - Entry("MetadataOptions HTTPPutResponseHopLimit", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{MetadataOptions: &v1beta1.MetadataOptions{HTTPPutResponseHopLimit: lo.ToPtr(int64(10))}}}), - Entry("MetadataOptions HTTPTokens", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{MetadataOptions: &v1beta1.MetadataOptions{HTTPTokens: lo.ToPtr("required")}}}), - Entry("BlockDeviceMapping DeviceName", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{DeviceName: lo.ToPtr("map-device-test-3")}}}}), - Entry("BlockDeviceMapping RootVolume", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{RootVolume: true}}}}), - Entry("BlockDeviceMapping DeleteOnTermination", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{DeleteOnTermination: lo.ToPtr(true)}}}}}), - Entry("BlockDeviceMapping Encrypted", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{Encrypted: lo.ToPtr(true)}}}}}), - Entry("BlockDeviceMapping IOPS", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{IOPS: lo.ToPtr(int64(10))}}}}}), - Entry("BlockDeviceMapping KMSKeyID", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{KMSKeyID: lo.ToPtr("test")}}}}}), - Entry("BlockDeviceMapping SnapshotID", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{SnapshotID: lo.ToPtr("test")}}}}}), - Entry("BlockDeviceMapping Throughput", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{Throughput: lo.ToPtr(int64(10))}}}}}), - Entry("BlockDeviceMapping VolumeType", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{EBS: &v1beta1.BlockDevice{VolumeType: lo.ToPtr("io1")}}}}}), + Entry("UserData", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{UserData: lo.ToPtr("userdata-test-2")}}), + Entry("Tags", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}), + Entry("Context", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{Context: lo.ToPtr("context-2")}}), + Entry("DetailedMonitoring", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{DetailedMonitoring: aws.Bool(true)}}), + Entry("AMIFamily", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{AMIFamily: lo.ToPtr(v1.AMIFamilyBottlerocket)}}), + Entry("InstanceStorePolicy", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{InstanceStorePolicy: lo.ToPtr(v1.InstanceStorePolicyRAID0)}}), + Entry("AssociatePublicIPAddress", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{AssociatePublicIPAddress: lo.ToPtr(true)}}), + Entry("MetadataOptions HTTPEndpoint", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{MetadataOptions: &v1.MetadataOptions{HTTPEndpoint: lo.ToPtr("enabled")}}}), + Entry("MetadataOptions HTTPProtocolIPv6", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{MetadataOptions: &v1.MetadataOptions{HTTPProtocolIPv6: lo.ToPtr("enabled")}}}), + Entry("MetadataOptions HTTPPutResponseHopLimit", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{MetadataOptions: &v1.MetadataOptions{HTTPPutResponseHopLimit: lo.ToPtr(int64(10))}}}), + Entry("MetadataOptions HTTPTokens", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{MetadataOptions: &v1.MetadataOptions{HTTPTokens: lo.ToPtr("required")}}}), + Entry("BlockDeviceMapping DeviceName", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{DeviceName: lo.ToPtr("map-device-test-3")}}}}), + Entry("BlockDeviceMapping RootVolume", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{RootVolume: true}}}}), + Entry("BlockDeviceMapping DeleteOnTermination", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{DeleteOnTermination: lo.ToPtr(true)}}}}}), + Entry("BlockDeviceMapping Encrypted", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{Encrypted: lo.ToPtr(true)}}}}}), + Entry("BlockDeviceMapping IOPS", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{IOPS: lo.ToPtr(int64(10))}}}}}), + Entry("BlockDeviceMapping KMSKeyID", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{KMSKeyID: lo.ToPtr("test")}}}}}), + Entry("BlockDeviceMapping SnapshotID", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{SnapshotID: lo.ToPtr("test")}}}}}), + Entry("BlockDeviceMapping Throughput", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{Throughput: lo.ToPtr(int64(10))}}}}}), + Entry("BlockDeviceMapping VolumeType", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{EBS: &v1.BlockDevice{VolumeType: lo.ToPtr("io1")}}}}}), ) // We create a separate test for updating blockDeviceMapping volumeSize, since resource.Quantity is a struct, and mergo.WithSliceDeepCopy // doesn't work well with unexported fields, like the ones that are present in resource.Quantity It("should return drifted when updating blockDeviceMapping volumeSize", func() { nodeClass.Spec.BlockDeviceMappings[0].EBS.VolumeSize = resource.NewScaledQuantity(10, resource.Giga) - nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) + nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) ExpectApplied(ctx, env.Client, nodeClass) isDrifted, err := cloudProvider.IsDrifted(ctx, nodeClaim) @@ -995,27 +995,27 @@ var _ = Describe("CloudProvider", func() { Expect(isDrifted).To(Equal(cloudprovider.NodeClassDrift)) }) DescribeTable("should not return drifted if dynamic fields are updated", - func(changes v1beta1.EC2NodeClass) { + func(changes v1.EC2NodeClass) { ExpectApplied(ctx, env.Client, nodePool, nodeClass) isDrifted, err := cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).NotTo(HaveOccurred()) Expect(isDrifted).To(BeEmpty()) Expect(mergo.Merge(nodeClass, changes, mergo.WithOverride)) - nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) + nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{v1.AnnotationEC2NodeClassHash: nodeClass.Hash()}) ExpectApplied(ctx, env.Client, nodeClass) isDrifted, err = cloudProvider.IsDrifted(ctx, nodeClaim) Expect(err).NotTo(HaveOccurred()) Expect(isDrifted).To(BeEmpty()) }, - Entry("AMI Drift", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{AMISelectorTerms: []v1beta1.AMISelectorTerm{{Tags: map[string]string{"ami-key-1": "ami-value-1"}}}}}), - Entry("Subnet Drift", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{{Tags: map[string]string{"sn-key-1": "sn-value-1"}}}}}), - Entry("SecurityGroup Drift", v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{{Tags: map[string]string{"sg-key": "sg-value"}}}}}), + Entry("AMI Drift", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{AMISelectorTerms: []v1.AMISelectorTerm{{Tags: map[string]string{"ami-key-1": "ami-value-1"}}}}}), + Entry("Subnet Drift", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{SubnetSelectorTerms: []v1.SubnetSelectorTerm{{Tags: map[string]string{"sn-key-1": "sn-value-1"}}}}}), + Entry("SecurityGroup Drift", v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{{Tags: map[string]string{"sg-key": "sg-value"}}}}}), ) It("should not return drifted if karpenter.k8s.aws/ec2nodeclass-hash annotation is not present on the NodeClaim", func() { nodeClaim.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, } nodeClass.Spec.Tags = map[string]string{ "Test Key": "Test Value", @@ -1027,12 +1027,12 @@ var _ = Describe("CloudProvider", func() { }) It("should not return drifted if the NodeClaim's karpenter.k8s.aws/ec2nodeclass-hash-version annotation does not match the EC2NodeClass's", func() { nodeClass.ObjectMeta.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-111111", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-hash-version-1", + v1.AnnotationEC2NodeClassHash: "test-hash-111111", + v1.AnnotationEC2NodeClassHashVersion: "test-hash-version-1", } nodeClaim.ObjectMeta.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-222222", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-hash-version-2", + v1.AnnotationEC2NodeClassHash: "test-hash-222222", + v1.AnnotationEC2NodeClassHashVersion: "test-hash-version-2", } ExpectApplied(ctx, env.Client, nodePool, nodeClass) isDrifted, err := cloudProvider.IsDrifted(ctx, nodeClaim) @@ -1041,11 +1041,11 @@ var _ = Describe("CloudProvider", func() { }) It("should not return drifted if karpenter.k8s.aws/ec2nodeclass-hash-version annotation is not present on the NodeClass", func() { nodeClass.ObjectMeta.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-111111", + v1.AnnotationEC2NodeClassHash: "test-hash-111111", } nodeClaim.ObjectMeta.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-222222", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-hash-version-2", + v1.AnnotationEC2NodeClassHash: "test-hash-222222", + v1.AnnotationEC2NodeClassHashVersion: "test-hash-version-2", } // should trigger drift nodeClass.Spec.Tags = map[string]string{ @@ -1058,11 +1058,11 @@ var _ = Describe("CloudProvider", func() { }) It("should not return drifted if karpenter.k8s.aws/ec2nodeclass-hash-version annotation is not present on the NodeClaim", func() { nodeClass.ObjectMeta.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-111111", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-hash-version-1", + v1.AnnotationEC2NodeClassHash: "test-hash-111111", + v1.AnnotationEC2NodeClassHashVersion: "test-hash-version-1", } nodeClaim.ObjectMeta.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-222222", + v1.AnnotationEC2NodeClassHash: "test-hash-222222", } // should trigger drift nodeClass.Spec.Tags = map[string]string{ @@ -1081,7 +1081,7 @@ var _ = Describe("CloudProvider", func() { It("should default to the cluster's subnets", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod( - coretest.PodOptions{NodeSelector: map[string]string{v1.LabelArchStable: corev1beta1.ArchitectureAmd64}}) + coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelArchStable: karpv1.ArchitectureAmd64}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) Expect(awsEnv.EC2API.CreateFleetBehavior.CalledWithInput.Len()).To(Equal(1)) @@ -1114,7 +1114,7 @@ var _ = Describe("CloudProvider", func() { controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) ExpectApplied(ctx, env.Client, nodePool, nodeClass) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}}) + pod := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) createFleetInput := awsEnv.EC2API.CreateFleetBehavior.CalledWithInput.Pop() @@ -1129,18 +1129,21 @@ var _ = Describe("CloudProvider", func() { Tags: []*ec2.Tag{{Key: aws.String("Name"), Value: aws.String("test-subnet-2")}}}, }}) controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: aws.Int32(1)} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ + MaxPods: aws.Int32(1), + } ExpectApplied(ctx, env.Client, nodePool, nodeClass) + nodeClass = ExpectExists(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) - pod1 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}}) - pod2 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}}) + pod1 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}}) + pod2 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod1, pod2) ExpectScheduled(ctx, env.Client, pod1) ExpectScheduled(ctx, env.Client, pod2) createFleetInput := awsEnv.EC2API.CreateFleetBehavior.CalledWithInput.Pop() Expect(fake.SubnetsFromFleetRequest(createFleetInput)).To(ConsistOf("test-subnet-2")) // Provision for another pod that should now use the other subnet since we've consumed some from the first launch. - pod3 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}}) + pod3 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod3) ExpectScheduled(ctx, env.Client, pod3) createFleetInput = awsEnv.EC2API.CreateFleetBehavior.CalledWithInput.Pop() @@ -1151,7 +1154,7 @@ var _ = Describe("CloudProvider", func() { {SubnetId: aws.String("test-subnet-1"), AvailabilityZone: aws.String("test-zone-1a"), AvailableIpAddressCount: aws.Int64(10), Tags: []*ec2.Tag{{Key: aws.String("Name"), Value: aws.String("test-subnet-1")}}}, }}) - pod1 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}}) + pod1 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}}) ExpectApplied(ctx, env.Client, nodePool, nodeClass, pod1) awsEnv.EC2API.CreateFleetBehavior.Error.Set(fmt.Errorf("CreateFleet synthetic error")) bindings := ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod1) @@ -1164,7 +1167,7 @@ var _ = Describe("CloudProvider", func() { {SubnetId: aws.String("test-subnet-2"), AvailabilityZone: aws.String("test-zone-1b"), AvailabilityZoneId: aws.String("tstz1-1a"), AvailableIpAddressCount: aws.Int64(100), Tags: []*ec2.Tag{{Key: aws.String("Name"), Value: aws.String("test-subnet-2")}}}, }}) - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{{Tags: map[string]string{"Name": "test-subnet-1"}}} + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{{Tags: map[string]string{"Name": "test-subnet-1"}}} ExpectApplied(ctx, env.Client, nodePool, nodeClass) controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) @@ -1174,36 +1177,36 @@ var _ = Describe("CloudProvider", func() { createFleetInput := awsEnv.EC2API.CreateFleetBehavior.CalledWithInput.Pop() Expect(fake.SubnetsFromFleetRequest(createFleetInput)).To(ConsistOf("test-subnet-1")) - nodeClass2 := test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass2 := test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"Name": "test-subnet-2"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, }, - Status: v1beta1.EC2NodeClassStatus{ + Status: v1.EC2NodeClassStatus{ AMIs: nodeClass.Status.AMIs, - SecurityGroups: []v1beta1.SecurityGroup{ + SecurityGroups: []v1.SecurityGroup{ { ID: "sg-test1", }, }, }, }) - nodePool2 := coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass2).GroupVersion().String(), - Kind: object.GVK(nodeClass2).Kind, - Name: nodeClass2.Name, + nodePool2 := coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass2).Group, + Kind: object.GVK(nodeClass2).Kind, + Name: nodeClass2.Name, }, }, }, @@ -1211,43 +1214,43 @@ var _ = Describe("CloudProvider", func() { }) ExpectApplied(ctx, env.Client, nodePool2, nodeClass2) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass2) - podSubnet2 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{corev1beta1.NodePoolLabelKey: nodePool2.Name}}) + podSubnet2 := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{karpv1.NodePoolLabelKey: nodePool2.Name}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, podSubnet2) ExpectScheduled(ctx, env.Client, podSubnet2) createFleetInput = awsEnv.EC2API.CreateFleetBehavior.CalledWithInput.Pop() Expect(fake.SubnetsFromFleetRequest(createFleetInput)).To(ConsistOf("test-subnet-2")) }) It("should launch instances with an alternate NodePool when a NodeClass selects 0 subnets, security groups, or amis", func() { - misconfiguredNodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ + misconfiguredNodeClass := test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ // select nothing! - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"Name": "nothing"}, }, }, // select nothing! - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"Name": "nothing"}, }, }, // select nothing! - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"Name": "nothing"}, }, }, }, }) - nodePool2 := coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(misconfiguredNodeClass).GroupVersion().String(), - Kind: object.GVK(misconfiguredNodeClass).Kind, - Name: misconfiguredNodeClass.Name, + nodePool2 := coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(misconfiguredNodeClass).Group, + Kind: object.GVK(misconfiguredNodeClass).Kind, + Name: misconfiguredNodeClass.Name, }, }, }, @@ -1261,27 +1264,27 @@ var _ = Describe("CloudProvider", func() { }) Context("EFA", func() { It("should include vpc.amazonaws.com/efa on a nodeclaim if it requests it", func() { - nodeClaim.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"dl1.24xlarge"}, }, }, } - nodeClaim.Spec.Resources.Requests = v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("1")} + nodeClaim.Spec.Resources.Requests = corev1.ResourceList{v1.ResourceEFA: resource.MustParse("1")} ExpectApplied(ctx, env.Client, nodePool, nodeClass, nodeClaim) cloudProviderNodeClaim, err := cloudProvider.Create(ctx, nodeClaim) Expect(err).To(BeNil()) - Expect(lo.Keys(cloudProviderNodeClaim.Status.Allocatable)).To(ContainElement(v1beta1.ResourceEFA)) + Expect(lo.Keys(cloudProviderNodeClaim.Status.Allocatable)).To(ContainElement(v1.ResourceEFA)) }) It("shouldn't include vpc.amazonaws.com/efa on a nodeclaim if it doesn't request it", func() { - nodeClaim.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"dl1.24xlarge"}, }, }, @@ -1289,7 +1292,7 @@ var _ = Describe("CloudProvider", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass, nodeClaim) cloudProviderNodeClaim, err := cloudProvider.Create(ctx, nodeClaim) Expect(err).To(BeNil()) - Expect(lo.Keys(cloudProviderNodeClaim.Status.Allocatable)).ToNot(ContainElement(v1beta1.ResourceEFA)) + Expect(lo.Keys(cloudProviderNodeClaim.Status.Allocatable)).ToNot(ContainElement(v1.ResourceEFA)) }) }) }) diff --git a/pkg/controllers/controllers.go b/pkg/controllers/controllers.go index dbeaf727a282..c45f00938299 100644 --- a/pkg/controllers/controllers.go +++ b/pkg/controllers/controllers.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/karpenter/pkg/cloudprovider" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" nodeclasshash "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/hash" nodeclassstatus "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" nodeclasstermination "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/termination" @@ -66,7 +66,7 @@ func NewControllers(ctx context.Context, mgr manager.Manager, sess *session.Sess nodeclaimtagging.NewController(kubeClient, instanceProvider), controllerspricing.NewController(pricingProvider), controllersinstancetype.NewController(instanceTypeProvider), - status.NewController[*v1beta1.EC2NodeClass](kubeClient, mgr.GetEventRecorderFor("karpenter")), + status.NewController[*v1.EC2NodeClass](kubeClient, mgr.GetEventRecorderFor("karpenter")), } if options.FromContext(ctx).InterruptionQueue != "" { sqsapi := servicesqs.New(sess) diff --git a/pkg/controllers/interruption/controller.go b/pkg/controllers/interruption/controller.go index cabf5960b0be..4e46d371012d 100644 --- a/pkg/controllers/interruption/controller.go +++ b/pkg/controllers/interruption/controller.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/samber/lo" "go.uber.org/multierr" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" "k8s.io/utils/clock" @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/karpenter/pkg/metrics" "sigs.k8s.io/karpenter/pkg/operator/injection" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/utils/pretty" "github.com/aws/karpenter-provider-aws/pkg/cache" @@ -146,8 +146,8 @@ func (c *Controller) parseMessage(raw *sqsapi.Message) (messages.Message, error) } // handleMessage takes an action against every node involved in the message that is owned by a NodePool -func (c *Controller) handleMessage(ctx context.Context, nodeClaimInstanceIDMap map[string]*v1beta1.NodeClaim, - nodeInstanceIDMap map[string]*v1.Node, msg messages.Message) (err error) { +func (c *Controller) handleMessage(ctx context.Context, nodeClaimInstanceIDMap map[string]*karpv1.NodeClaim, + nodeInstanceIDMap map[string]*corev1.Node, msg messages.Message) (err error) { ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("messageKind", msg.Kind())) receivedMessages.WithLabelValues(string(msg.Kind())).Inc() @@ -182,7 +182,7 @@ func (c *Controller) deleteMessage(ctx context.Context, msg *sqsapi.Message) err } // handleNodeClaim retrieves the action for the message and then performs the appropriate action against the node -func (c *Controller) handleNodeClaim(ctx context.Context, msg messages.Message, nodeClaim *v1beta1.NodeClaim, node *v1.Node) error { +func (c *Controller) handleNodeClaim(ctx context.Context, msg messages.Message, nodeClaim *karpv1.NodeClaim, node *corev1.Node) error { action := actionForMessage(msg) ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("NodeClaim", klog.KRef("", nodeClaim.Name), "action", string(action))) if node != nil { @@ -194,16 +194,16 @@ func (c *Controller) handleNodeClaim(ctx context.Context, msg messages.Message, actionsPerformed.With( prometheus.Labels{ actionTypeLabel: string(action), - metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], + metrics.NodePoolLabel: nodeClaim.Labels[karpv1.NodePoolLabelKey], }, ).Inc() // Mark the offering as unavailable in the ICE cache since we got a spot interruption warning if msg.Kind() == messages.SpotInterruptionKind { - zone := nodeClaim.Labels[v1.LabelTopologyZone] - instanceType := nodeClaim.Labels[v1.LabelInstanceTypeStable] + zone := nodeClaim.Labels[corev1.LabelTopologyZone] + instanceType := nodeClaim.Labels[corev1.LabelInstanceTypeStable] if zone != "" && instanceType != "" { - c.unavailableOfferingsCache.MarkUnavailable(ctx, string(msg.Kind()), instanceType, zone, v1beta1.CapacityTypeSpot) + c.unavailableOfferingsCache.MarkUnavailable(ctx, string(msg.Kind()), instanceType, zone, karpv1.CapacityTypeSpot) } } if action != NoAction { @@ -213,7 +213,7 @@ func (c *Controller) handleNodeClaim(ctx context.Context, msg messages.Message, } // deleteNodeClaim removes the NodeClaim from the api-server -func (c *Controller) deleteNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeClaim, node *v1.Node) error { +func (c *Controller) deleteNodeClaim(ctx context.Context, nodeClaim *karpv1.NodeClaim, node *corev1.Node) error { if !nodeClaim.DeletionTimestamp.IsZero() { return nil } @@ -224,14 +224,14 @@ func (c *Controller) deleteNodeClaim(ctx context.Context, nodeClaim *v1beta1.Nod c.recorder.Publish(interruptionevents.TerminatingOnInterruption(node, nodeClaim)...) metrics.NodeClaimsTerminatedCounter.With(prometheus.Labels{ metrics.ReasonLabel: terminationReasonLabel, - metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], - metrics.CapacityTypeLabel: nodeClaim.Labels[v1beta1.CapacityTypeLabelKey], + metrics.NodePoolLabel: nodeClaim.Labels[karpv1.NodePoolLabelKey], + metrics.CapacityTypeLabel: nodeClaim.Labels[karpv1.CapacityTypeLabelKey], }).Inc() return nil } // notifyForMessage publishes the relevant alert based on the message kind -func (c *Controller) notifyForMessage(msg messages.Message, nodeClaim *v1beta1.NodeClaim, n *v1.Node) { +func (c *Controller) notifyForMessage(msg messages.Message, nodeClaim *karpv1.NodeClaim, n *corev1.Node) { switch msg.Kind() { case messages.RebalanceRecommendationKind: c.recorder.Publish(interruptionevents.RebalanceRecommendation(n, nodeClaim)...) @@ -256,9 +256,9 @@ func (c *Controller) notifyForMessage(msg messages.Message, nodeClaim *v1beta1.N // makeNodeClaimInstanceIDMap builds a map between the instance id that is stored in the // NodeClaim .status.providerID and the NodeClaim -func (c *Controller) makeNodeClaimInstanceIDMap(ctx context.Context) (map[string]*v1beta1.NodeClaim, error) { - m := map[string]*v1beta1.NodeClaim{} - nodeClaimList := &v1beta1.NodeClaimList{} +func (c *Controller) makeNodeClaimInstanceIDMap(ctx context.Context) (map[string]*karpv1.NodeClaim, error) { + m := map[string]*karpv1.NodeClaim{} + nodeClaimList := &karpv1.NodeClaimList{} if err := c.kubeClient.List(ctx, nodeClaimList); err != nil { return nil, err } @@ -277,9 +277,9 @@ func (c *Controller) makeNodeClaimInstanceIDMap(ctx context.Context) (map[string // makeNodeInstanceIDMap builds a map between the instance id that is stored in the // node .spec.providerID and the node -func (c *Controller) makeNodeInstanceIDMap(ctx context.Context) (map[string]*v1.Node, error) { - m := map[string]*v1.Node{} - nodeList := &v1.NodeList{} +func (c *Controller) makeNodeInstanceIDMap(ctx context.Context) (map[string]*corev1.Node, error) { + m := map[string]*corev1.Node{} + nodeList := &corev1.NodeList{} if err := c.kubeClient.List(ctx, nodeList); err != nil { return nil, fmt.Errorf("listing nodes, %w", err) } diff --git a/pkg/controllers/interruption/events/events.go b/pkg/controllers/interruption/events/events.go index 90146e8ff7fa..fdcecabd0be0 100644 --- a/pkg/controllers/interruption/events/events.go +++ b/pkg/controllers/interruption/events/events.go @@ -15,16 +15,16 @@ limitations under the License. package events import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/events" ) -func SpotInterrupted(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) { +func SpotInterrupted(node *corev1.Node, nodeClaim *karpv1.NodeClaim) (evts []events.Event) { evts = append(evts, events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "SpotInterrupted", Message: "Spot interruption warning was triggered", DedupeValues: []string{string(nodeClaim.UID)}, @@ -32,7 +32,7 @@ func SpotInterrupted(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events if node != nil { evts = append(evts, events.Event{ InvolvedObject: node, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "SpotInterrupted", Message: "Spot interruption warning was triggered", DedupeValues: []string{string(node.UID)}, @@ -41,10 +41,10 @@ func SpotInterrupted(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events return evts } -func RebalanceRecommendation(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) { +func RebalanceRecommendation(node *corev1.Node, nodeClaim *karpv1.NodeClaim) (evts []events.Event) { evts = append(evts, events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeNormal, + Type: corev1.EventTypeNormal, Reason: "SpotRebalanceRecommendation", Message: "Spot rebalance recommendation was triggered", DedupeValues: []string{string(nodeClaim.UID)}, @@ -52,7 +52,7 @@ func RebalanceRecommendation(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts if node != nil { evts = append(evts, events.Event{ InvolvedObject: node, - Type: v1.EventTypeNormal, + Type: corev1.EventTypeNormal, Reason: "SpotRebalanceRecommendation", Message: "Spot rebalance recommendation was triggered", DedupeValues: []string{string(node.UID)}, @@ -61,10 +61,10 @@ func RebalanceRecommendation(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts return evts } -func Stopping(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) { +func Stopping(node *corev1.Node, nodeClaim *karpv1.NodeClaim) (evts []events.Event) { evts = append(evts, events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "InstanceStopping", Message: "Instance is stopping", DedupeValues: []string{string(nodeClaim.UID)}, @@ -72,7 +72,7 @@ func Stopping(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) if node != nil { evts = append(evts, events.Event{ InvolvedObject: node, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "InstanceStopping", Message: "Instance is stopping", DedupeValues: []string{string(node.UID)}, @@ -81,10 +81,10 @@ func Stopping(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) return evts } -func Terminating(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) { +func Terminating(node *corev1.Node, nodeClaim *karpv1.NodeClaim) (evts []events.Event) { evts = append(evts, events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "InstanceTerminating", Message: "Instance is terminating", DedupeValues: []string{string(nodeClaim.UID)}, @@ -92,7 +92,7 @@ func Terminating(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Eve if node != nil { evts = append(evts, events.Event{ InvolvedObject: node, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "InstanceTerminating", Message: "Instance is terminating", DedupeValues: []string{string(node.UID)}, @@ -101,10 +101,10 @@ func Terminating(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Eve return evts } -func Unhealthy(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) { +func Unhealthy(node *corev1.Node, nodeClaim *karpv1.NodeClaim) (evts []events.Event) { evts = append(evts, events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "InstanceUnhealthy", Message: "An unhealthy warning was triggered for the instance", DedupeValues: []string{string(nodeClaim.UID)}, @@ -112,7 +112,7 @@ func Unhealthy(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event if node != nil { evts = append(evts, events.Event{ InvolvedObject: node, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "InstanceUnhealthy", Message: "An unhealthy warning was triggered for the instance", DedupeValues: []string{string(node.UID)}, @@ -121,10 +121,10 @@ func Unhealthy(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event return evts } -func TerminatingOnInterruption(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evts []events.Event) { +func TerminatingOnInterruption(node *corev1.Node, nodeClaim *karpv1.NodeClaim) (evts []events.Event) { evts = append(evts, events.Event{ InvolvedObject: nodeClaim, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "TerminatingOnInterruption", Message: "Interruption triggered termination for the NodeClaim", DedupeValues: []string{string(nodeClaim.UID)}, @@ -132,7 +132,7 @@ func TerminatingOnInterruption(node *v1.Node, nodeClaim *v1beta1.NodeClaim) (evt if node != nil { evts = append(evts, events.Event{ InvolvedObject: node, - Type: v1.EventTypeWarning, + Type: corev1.EventTypeWarning, Reason: "TerminatingOnInterruption", Message: "Interruption triggered termination for the Node", DedupeValues: []string{string(node.UID)}, diff --git a/pkg/controllers/interruption/interruption_benchmark_test.go b/pkg/controllers/interruption/interruption_benchmark_test.go index a5aa33982e7d..534dcdc0b89f 100644 --- a/pkg/controllers/interruption/interruption_benchmark_test.go +++ b/pkg/controllers/interruption/interruption_benchmark_test.go @@ -36,14 +36,14 @@ import ( "github.com/samber/lo" "go.uber.org/multierr" "go.uber.org/zap" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" clock "k8s.io/utils/clock/testing" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" awscache "github.com/aws/karpenter-provider-aws/pkg/cache" "github.com/aws/karpenter-provider-aws/pkg/controllers/interruption" @@ -232,7 +232,7 @@ func (p *providerSet) monitorMessagesProcessed(ctx context.Context, eventRecorde return done } -func provisionNodes(ctx context.Context, kubeClient client.Client, nodes []*v1.Node) error { +func provisionNodes(ctx context.Context, kubeClient client.Client, nodes []*corev1.Node) error { errs := make([]error, len(nodes)) workqueue.ParallelizeUntil(ctx, 20, len(nodes), func(i int) { if err := retry.Do(func() error { @@ -244,9 +244,9 @@ func provisionNodes(ctx context.Context, kubeClient client.Client, nodes []*v1.N return multierr.Combine(errs...) } -func makeDiverseMessagesAndNodes(count int) ([]interface{}, []*v1.Node) { +func makeDiverseMessagesAndNodes(count int) ([]interface{}, []*corev1.Node) { var messages []interface{} - var nodes []*v1.Node + var nodes []*corev1.Node newMessages, newNodes := makeScheduledChangeMessagesAndNodes(count / 3) messages = append(messages, newMessages...) @@ -265,16 +265,16 @@ func makeDiverseMessagesAndNodes(count int) ([]interface{}, []*v1.Node) { return messages, nodes } -func makeScheduledChangeMessagesAndNodes(count int) ([]interface{}, []*v1.Node) { +func makeScheduledChangeMessagesAndNodes(count int) ([]interface{}, []*corev1.Node) { var msgs []interface{} - var nodes []*v1.Node + var nodes []*corev1.Node for i := 0; i < count; i++ { instanceID := fake.InstanceID() msgs = append(msgs, scheduledChangeMessage(instanceID)) nodes = append(nodes, coretest.Node(coretest.NodeOptions{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - v1beta1.NodePoolLabelKey: "default", + karpv1.NodePoolLabelKey: "default", }, }, ProviderID: fake.ProviderID(instanceID), @@ -283,9 +283,9 @@ func makeScheduledChangeMessagesAndNodes(count int) ([]interface{}, []*v1.Node) return msgs, nodes } -func makeStateChangeMessagesAndNodes(count int, states []string) ([]interface{}, []*v1.Node) { +func makeStateChangeMessagesAndNodes(count int, states []string) ([]interface{}, []*corev1.Node) { var msgs []interface{} - var nodes []*v1.Node + var nodes []*corev1.Node for i := 0; i < count; i++ { state := states[r.Intn(len(states))] instanceID := fake.InstanceID() @@ -293,7 +293,7 @@ func makeStateChangeMessagesAndNodes(count int, states []string) ([]interface{}, nodes = append(nodes, coretest.Node(coretest.NodeOptions{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - v1beta1.NodePoolLabelKey: "default", + karpv1.NodePoolLabelKey: "default", }, }, ProviderID: fake.ProviderID(instanceID), @@ -302,16 +302,16 @@ func makeStateChangeMessagesAndNodes(count int, states []string) ([]interface{}, return msgs, nodes } -func makeSpotInterruptionMessagesAndNodes(count int) ([]interface{}, []*v1.Node) { +func makeSpotInterruptionMessagesAndNodes(count int) ([]interface{}, []*corev1.Node) { var msgs []interface{} - var nodes []*v1.Node + var nodes []*corev1.Node for i := 0; i < count; i++ { instanceID := fake.InstanceID() msgs = append(msgs, spotInterruptionMessage(instanceID)) nodes = append(nodes, coretest.Node(coretest.NodeOptions{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - v1beta1.NodePoolLabelKey: "default", + karpv1.NodePoolLabelKey: "default", }, }, ProviderID: fake.ProviderID(instanceID), diff --git a/pkg/controllers/interruption/suite_test.go b/pkg/controllers/interruption/suite_test.go index 0937343a7236..4f3ff299cb28 100644 --- a/pkg/controllers/interruption/suite_test.go +++ b/pkg/controllers/interruption/suite_test.go @@ -27,14 +27,14 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" servicesqs "github.com/aws/aws-sdk-go/service/sqs" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/events" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" coretest "sigs.k8s.io/karpenter/pkg/test" @@ -100,16 +100,16 @@ var _ = AfterEach(func() { }) var _ = Describe("InterruptionHandling", func() { - var node *v1.Node - var nodeClaim *corev1beta1.NodeClaim + var node *corev1.Node + var nodeClaim *karpv1.NodeClaim BeforeEach(func() { - nodeClaim, node = coretest.NodeClaimAndNode(corev1beta1.NodeClaim{ + nodeClaim, node = coretest.NodeClaimAndNode(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - corev1beta1.NodePoolLabelKey: "default", + karpv1.NodePoolLabelKey: "default", }, }, - Status: corev1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.RandomProviderID(), }, }) @@ -134,17 +134,17 @@ var _ = Describe("InterruptionHandling", func() { Expect(sqsapi.DeleteMessageBehavior.SuccessfulCalls()).To(Equal(1)) }) It("should delete the NodeClaim when receiving a state change message", func() { - var nodeClaims []*corev1beta1.NodeClaim + var nodeClaims []*karpv1.NodeClaim var messages []interface{} for _, state := range []string{"terminated", "stopped", "stopping", "shutting-down"} { instanceID := fake.InstanceID() - nc, n := coretest.NodeClaimAndNode(corev1beta1.NodeClaim{ + nc, n := coretest.NodeClaimAndNode(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - corev1beta1.NodePoolLabelKey: "default", + karpv1.NodePoolLabelKey: "default", }, }, - Status: corev1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(instanceID), }, }) @@ -155,21 +155,21 @@ var _ = Describe("InterruptionHandling", func() { ExpectMessagesCreated(messages...) ExpectSingletonReconciled(ctx, controller) Expect(sqsapi.ReceiveMessageBehavior.SuccessfulCalls()).To(Equal(1)) - ExpectNotFound(ctx, env.Client, lo.Map(nodeClaims, func(nc *corev1beta1.NodeClaim, _ int) client.Object { return nc })...) + ExpectNotFound(ctx, env.Client, lo.Map(nodeClaims, func(nc *karpv1.NodeClaim, _ int) client.Object { return nc })...) Expect(sqsapi.DeleteMessageBehavior.SuccessfulCalls()).To(Equal(4)) }) It("should handle multiple messages that cause nodeClaim deletion", func() { - var nodeClaims []*corev1beta1.NodeClaim + var nodeClaims []*karpv1.NodeClaim var instanceIDs []string for i := 0; i < 100; i++ { instanceID := fake.InstanceID() - nc, n := coretest.NodeClaimAndNode(corev1beta1.NodeClaim{ + nc, n := coretest.NodeClaimAndNode(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - corev1beta1.NodePoolLabelKey: "default", + karpv1.NodePoolLabelKey: "default", }, }, - Status: corev1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(instanceID), }, }) @@ -185,7 +185,7 @@ var _ = Describe("InterruptionHandling", func() { ExpectMessagesCreated(messages...) ExpectSingletonReconciled(ctx, controller) Expect(sqsapi.ReceiveMessageBehavior.SuccessfulCalls()).To(Equal(1)) - ExpectNotFound(ctx, env.Client, lo.Map(nodeClaims, func(nc *corev1beta1.NodeClaim, _ int) client.Object { return nc })...) + ExpectNotFound(ctx, env.Client, lo.Map(nodeClaims, func(nc *karpv1.NodeClaim, _ int) client.Object { return nc })...) Expect(sqsapi.DeleteMessageBehavior.SuccessfulCalls()).To(Equal(100)) }) It("should delete a message when the message can't be parsed", func() { @@ -214,9 +214,9 @@ var _ = Describe("InterruptionHandling", func() { }) It("should mark the ICE cache for the offering when getting a spot interruption warning", func() { nodeClaim.Labels = lo.Assign(nodeClaim.Labels, map[string]string{ - v1.LabelTopologyZone: "coretest-zone-1a", - v1.LabelInstanceTypeStable: "t3.large", - corev1beta1.CapacityTypeLabelKey: corev1beta1.CapacityTypeSpot, + corev1.LabelTopologyZone: "coretest-zone-1a", + corev1.LabelInstanceTypeStable: "t3.large", + karpv1.CapacityTypeLabelKey: karpv1.CapacityTypeSpot, }) ExpectMessagesCreated(spotInterruptionMessage(lo.Must(utils.ParseInstanceID(nodeClaim.Status.ProviderID)))) ExpectApplied(ctx, env.Client, nodeClaim, node) @@ -227,7 +227,7 @@ var _ = Describe("InterruptionHandling", func() { Expect(sqsapi.DeleteMessageBehavior.SuccessfulCalls()).To(Equal(1)) // Expect a t3.large in coretest-zone-1a to be added to the ICE cache - Expect(unavailableOfferingsCache.IsUnavailable("t3.large", "coretest-zone-1a", corev1beta1.CapacityTypeSpot)).To(BeTrue()) + Expect(unavailableOfferingsCache.IsUnavailable("t3.large", "coretest-zone-1a", karpv1.CapacityTypeSpot)).To(BeTrue()) }) }) }) diff --git a/pkg/controllers/nodeclaim/garbagecollection/controller.go b/pkg/controllers/nodeclaim/garbagecollection/controller.go index 04b27aa88af5..aeb3c70fa0e9 100644 --- a/pkg/controllers/nodeclaim/garbagecollection/controller.go +++ b/pkg/controllers/nodeclaim/garbagecollection/controller.go @@ -22,7 +22,7 @@ import ( "github.com/awslabs/operatorpkg/singleton" "github.com/samber/lo" "go.uber.org/multierr" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/operator/injection" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" ) type Controller struct { @@ -61,18 +61,18 @@ func (c *Controller) Reconcile(ctx context.Context) (reconcile.Result, error) { if err != nil { return reconcile.Result{}, fmt.Errorf("listing cloudprovider machines, %w", err) } - managedRetrieved := lo.Filter(retrieved, func(nc *v1beta1.NodeClaim, _ int) bool { - return nc.Annotations[v1beta1.ManagedByAnnotationKey] != "" && nc.DeletionTimestamp.IsZero() + managedRetrieved := lo.Filter(retrieved, func(nc *karpv1.NodeClaim, _ int) bool { + return nc.Annotations[karpv1.ManagedByAnnotationKey] != "" && nc.DeletionTimestamp.IsZero() }) - nodeClaimList := &v1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} if err = c.kubeClient.List(ctx, nodeClaimList); err != nil { return reconcile.Result{}, err } - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} if err = c.kubeClient.List(ctx, nodeList); err != nil { return reconcile.Result{}, err } - resolvedProviderIDs := sets.New[string](lo.FilterMap(nodeClaimList.Items, func(n v1beta1.NodeClaim, _ int) (string, bool) { + resolvedProviderIDs := sets.New[string](lo.FilterMap(nodeClaimList.Items, func(n karpv1.NodeClaim, _ int) (string, bool) { return n.Status.ProviderID, n.Status.ProviderID != "" })...) errs := make([]error, len(retrieved)) @@ -89,7 +89,7 @@ func (c *Controller) Reconcile(ctx context.Context) (reconcile.Result, error) { return reconcile.Result{RequeueAfter: lo.Ternary(c.successfulCount <= 20, time.Second*10, time.Minute*2)}, nil } -func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *v1beta1.NodeClaim, nodeList *v1.NodeList) error { +func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *karpv1.NodeClaim, nodeList *corev1.NodeList) error { ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID)) if err := c.cloudProvider.Delete(ctx, nodeClaim); err != nil { return cloudprovider.IgnoreNodeClaimNotFoundError(err) @@ -97,7 +97,7 @@ func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *v1beta1.Node log.FromContext(ctx).V(1).Info("garbage collected cloudprovider instance") // Go ahead and cleanup the node if we know that it exists to make scheduling go quicker - if node, ok := lo.Find(nodeList.Items, func(n v1.Node) bool { + if node, ok := lo.Find(nodeList.Items, func(n corev1.Node) bool { return n.Spec.ProviderID == nodeClaim.Status.ProviderID }); ok { if err := c.kubeClient.Delete(ctx, &node); err != nil { diff --git a/pkg/controllers/nodeclaim/garbagecollection/suite_test.go b/pkg/controllers/nodeclaim/garbagecollection/suite_test.go index 74d950bb60b0..a6eca8cf7138 100644 --- a/pkg/controllers/nodeclaim/garbagecollection/suite_test.go +++ b/pkg/controllers/nodeclaim/garbagecollection/suite_test.go @@ -27,15 +27,15 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" - corecloudprovider "sigs.k8s.io/karpenter/pkg/cloudprovider" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" + karpcloudprovider "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/events" coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclaim/garbagecollection" "github.com/aws/karpenter-provider-aws/pkg/fake" @@ -79,21 +79,21 @@ var _ = BeforeEach(func() { var _ = Describe("GarbageCollection", func() { var instance *ec2.Instance - var nodeClass *v1beta1.EC2NodeClass + var nodeClass *v1.EC2NodeClass var providerID string BeforeEach(func() { instanceID := fake.InstanceID() providerID = fake.ProviderID(instanceID) nodeClass = test.EC2NodeClass() - nodePool := coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool := coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, @@ -109,15 +109,15 @@ var _ = Describe("GarbageCollection", func() { Value: aws.String("owned"), }, { - Key: aws.String(corev1beta1.NodePoolLabelKey), + Key: aws.String(karpv1.NodePoolLabelKey), Value: aws.String(nodePool.Name), }, { - Key: aws.String(v1beta1.LabelNodeClass), + Key: aws.String(v1.LabelNodeClass), Value: aws.String(nodeClass.Name), }, { - Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Key: aws.String(karpv1.ManagedByAnnotationKey), Value: aws.String(options.FromContext(ctx).ClusterName), }, }, @@ -141,7 +141,7 @@ var _ = Describe("GarbageCollection", func() { ExpectSingletonReconciled(ctx, garbageCollectionController) _, err := cloudProvider.Get(ctx, providerID) Expect(err).To(HaveOccurred()) - Expect(corecloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) + Expect(karpcloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) }) It("should delete an instance along with the node if there is no NodeClaim owner (to quicken scheduling)", func() { // Launch time was 1m ago @@ -156,7 +156,7 @@ var _ = Describe("GarbageCollection", func() { ExpectSingletonReconciled(ctx, garbageCollectionController) _, err := cloudProvider.Get(ctx, providerID) Expect(err).To(HaveOccurred()) - Expect(corecloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) + Expect(karpcloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) ExpectNotFound(ctx, env.Client, node) }) @@ -177,15 +177,15 @@ var _ = Describe("GarbageCollection", func() { Value: aws.String("owned"), }, { - Key: aws.String(corev1beta1.NodePoolLabelKey), + Key: aws.String(karpv1.NodePoolLabelKey), Value: aws.String("default"), }, { - Key: aws.String(v1beta1.LabelNodeClass), + Key: aws.String(v1.LabelNodeClass), Value: aws.String("default"), }, { - Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Key: aws.String(karpv1.ManagedByAnnotationKey), Value: aws.String(options.FromContext(ctx).ClusterName), }, }, @@ -212,7 +212,7 @@ var _ = Describe("GarbageCollection", func() { _, err := cloudProvider.Get(ctx, fake.ProviderID(id)) Expect(err).To(HaveOccurred()) - Expect(corecloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) + Expect(karpcloudprovider.IsNodeClaimNotFoundError(err)).To(BeTrue()) }(id) } wg.Wait() @@ -220,7 +220,7 @@ var _ = Describe("GarbageCollection", func() { It("should not delete all instances if they all have NodeClaim owners", func() { // Generate 100 instances that have different instanceIDs var ids []string - var nodeClaims []*corev1beta1.NodeClaim + var nodeClaims []*karpv1.NodeClaim for i := 0; i < 100; i++ { instanceID := fake.InstanceID() awsEnv.EC2API.Instances.Store( @@ -245,15 +245,15 @@ var _ = Describe("GarbageCollection", func() { InstanceType: aws.String("m5.large"), }, ) - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, - Status: corev1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(instanceID), }, }) @@ -292,7 +292,7 @@ var _ = Describe("GarbageCollection", func() { It("should not delete an instance if it was not launched by a NodeClaim", func() { // Remove the "karpenter.sh/managed-by" tag (this isn't launched by a machine) instance.Tags = lo.Reject(instance.Tags, func(t *ec2.Tag, _ int) bool { - return aws.StringValue(t.Key) == corev1beta1.ManagedByAnnotationKey + return aws.StringValue(t.Key) == karpv1.ManagedByAnnotationKey }) // Launch time was 1m ago @@ -308,15 +308,15 @@ var _ = Describe("GarbageCollection", func() { instance.LaunchTime = aws.Time(time.Now().Add(-time.Minute)) awsEnv.EC2API.Instances.Store(aws.StringValue(instance.InstanceId), instance) - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, - Status: corev1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: providerID, }, }) @@ -333,7 +333,7 @@ var _ = Describe("GarbageCollection", func() { It("should not delete many instances or nodes if they already have NodeClaim owners that match it", func() { // Generate 100 instances that have different instanceIDs that have NodeClaims var ids []string - var nodes []*v1.Node + var nodes []*corev1.Node for i := 0; i < 100; i++ { instanceID := fake.InstanceID() awsEnv.EC2API.Instances.Store( @@ -348,11 +348,11 @@ var _ = Describe("GarbageCollection", func() { Value: aws.String("owned"), }, { - Key: aws.String(corev1beta1.NodePoolLabelKey), + Key: aws.String(karpv1.NodePoolLabelKey), Value: aws.String("default"), }, { - Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Key: aws.String(karpv1.ManagedByAnnotationKey), Value: aws.String(options.FromContext(ctx).ClusterName), }, }, @@ -366,15 +366,15 @@ var _ = Describe("GarbageCollection", func() { InstanceType: aws.String("m5.large"), }, ) - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, - Status: corev1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(instanceID), }, }) @@ -390,7 +390,7 @@ var _ = Describe("GarbageCollection", func() { wg := sync.WaitGroup{} for i := range ids { wg.Add(1) - go func(id string, node *v1.Node) { + go func(id string, node *corev1.Node) { defer GinkgoRecover() defer wg.Done() diff --git a/pkg/controllers/nodeclaim/tagging/controller.go b/pkg/controllers/nodeclaim/tagging/controller.go index 6e9d4519d898..d11d97442c5b 100644 --- a/pkg/controllers/nodeclaim/tagging/controller.go +++ b/pkg/controllers/nodeclaim/tagging/controller.go @@ -33,11 +33,11 @@ import ( "github.com/awslabs/operatorpkg/reasonable" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/instance" "github.com/aws/karpenter-provider-aws/pkg/utils" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" ) @@ -53,7 +53,7 @@ func NewController(kubeClient client.Client, instanceProvider instance.Provider) } } -func (c *Controller) Reconcile(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) (reconcile.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, nodeClaim *karpv1.NodeClaim) (reconcile.Result, error) { ctx = injection.WithControllerName(ctx, "nodeclaim.tagging") stored := nodeClaim.DeepCopy() @@ -70,7 +70,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *corev1beta1.NodeC if err = c.tagInstance(ctx, nodeClaim, id); err != nil { return reconcile.Result{}, cloudprovider.IgnoreNodeClaimNotFoundError(err) } - nodeClaim.Annotations = lo.Assign(nodeClaim.Annotations, map[string]string{v1beta1.AnnotationInstanceTagged: "true"}) + nodeClaim.Annotations = lo.Assign(nodeClaim.Annotations, map[string]string{v1.AnnotationInstanceTagged: "true"}) if !equality.Semantic.DeepEqual(nodeClaim, stored) { if err := c.kubeClient.Patch(ctx, nodeClaim, client.MergeFrom(stored)); err != nil { return reconcile.Result{}, client.IgnoreNotFound(err) @@ -82,9 +82,9 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *corev1beta1.NodeC func (c *Controller) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("nodeclaim.tagging"). - For(&corev1beta1.NodeClaim{}). + For(&karpv1.NodeClaim{}). WithEventFilter(predicate.NewPredicateFuncs(func(o client.Object) bool { - return isTaggable(o.(*corev1beta1.NodeClaim)) + return isTaggable(o.(*karpv1.NodeClaim)) })). // Ok with using the default MaxConcurrentReconciles of 1 to avoid throttling from CreateTag write API WithOptions(controller.Options{ @@ -93,10 +93,10 @@ func (c *Controller) Register(_ context.Context, m manager.Manager) error { Complete(reconcile.AsReconciler(m.GetClient(), c)) } -func (c *Controller) tagInstance(ctx context.Context, nc *corev1beta1.NodeClaim, id string) error { +func (c *Controller) tagInstance(ctx context.Context, nc *karpv1.NodeClaim, id string) error { tags := map[string]string{ - v1beta1.TagName: nc.Status.NodeName, - v1beta1.TagNodeClaim: nc.Name, + v1.TagName: nc.Status.NodeName, + v1.TagNodeClaim: nc.Name, } // Remove tags which have been already populated @@ -118,9 +118,9 @@ func (c *Controller) tagInstance(ctx context.Context, nc *corev1beta1.NodeClaim, return nil } -func isTaggable(nc *corev1beta1.NodeClaim) bool { +func isTaggable(nc *karpv1.NodeClaim) bool { // Instance has already been tagged - if val := nc.Annotations[v1beta1.AnnotationInstanceTagged]; val == "true" { + if val := nc.Annotations[v1.AnnotationInstanceTagged]; val == "true" { return false } // Node name is not yet known diff --git a/pkg/controllers/nodeclaim/tagging/suite_test.go b/pkg/controllers/nodeclaim/tagging/suite_test.go index 0ca930cae630..56690f4c2923 100644 --- a/pkg/controllers/nodeclaim/tagging/suite_test.go +++ b/pkg/controllers/nodeclaim/tagging/suite_test.go @@ -24,12 +24,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclaim/tagging" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -88,11 +88,11 @@ var _ = Describe("TaggingController", func() { Value: aws.String("owned"), }, { - Key: aws.String(corev1beta1.NodePoolLabelKey), + Key: aws.String(karpv1.NodePoolLabelKey), Value: aws.String("default"), }, { - Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Key: aws.String(karpv1.ManagedByAnnotationKey), Value: aws.String(options.FromContext(ctx).ClusterName), }, }, @@ -108,23 +108,23 @@ var _ = Describe("TaggingController", func() { }) It("shouldn't tag instances without a Node", func() { - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Status: corev1beta1.NodeClaimStatus{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(*ec2Instance.InstanceId), }, }) ExpectApplied(ctx, env.Client, nodeClaim) ExpectObjectReconciled(ctx, env.Client, taggingController, nodeClaim) - Expect(nodeClaim.Annotations).To(Not(HaveKey(v1beta1.AnnotationInstanceTagged))) + Expect(nodeClaim.Annotations).To(Not(HaveKey(v1.AnnotationInstanceTagged))) Expect(lo.ContainsBy(ec2Instance.Tags, func(tag *ec2.Tag) bool { - return *tag.Key == v1beta1.TagName + return *tag.Key == v1.TagName })).To(BeFalse()) }) It("shouldn't tag nodeclaim with a malformed provderID", func() { - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Status: corev1beta1.NodeClaimStatus{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Status: karpv1.NodeClaimStatus{ ProviderID: "Bad providerID", NodeName: "default", }, @@ -132,15 +132,15 @@ var _ = Describe("TaggingController", func() { ExpectApplied(ctx, env.Client, nodeClaim) ExpectObjectReconciled(ctx, env.Client, taggingController, nodeClaim) - Expect(nodeClaim.Annotations).To(Not(HaveKey(v1beta1.AnnotationInstanceTagged))) + Expect(nodeClaim.Annotations).To(Not(HaveKey(v1.AnnotationInstanceTagged))) Expect(lo.ContainsBy(ec2Instance.Tags, func(tag *ec2.Tag) bool { - return *tag.Key == v1beta1.TagName + return *tag.Key == v1.TagName })).To(BeFalse()) }) It("should gracefully handle missing NodeClaim", func() { - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Status: corev1beta1.NodeClaimStatus{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(*ec2Instance.InstanceId), NodeName: "default", }, @@ -152,8 +152,8 @@ var _ = Describe("TaggingController", func() { }) It("should gracefully handle missing instance", func() { - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Status: corev1beta1.NodeClaimStatus{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(*ec2Instance.InstanceId), NodeName: "default", }, @@ -162,16 +162,16 @@ var _ = Describe("TaggingController", func() { ExpectApplied(ctx, env.Client, nodeClaim) awsEnv.EC2API.Instances.Delete(*ec2Instance.InstanceId) ExpectObjectReconciled(ctx, env.Client, taggingController, nodeClaim) - Expect(nodeClaim.Annotations).To(Not(HaveKey(v1beta1.AnnotationInstanceTagged))) + Expect(nodeClaim.Annotations).To(Not(HaveKey(v1.AnnotationInstanceTagged))) }) It("shouldn't tag nodeclaim with deletion timestamp set", func() { - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Status: corev1beta1.NodeClaimStatus{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(*ec2Instance.InstanceId), NodeName: "default", }, - ObjectMeta: v1.ObjectMeta{ + ObjectMeta: corev1.ObjectMeta{ Finalizers: []string{"testing/finalizer"}, }, }) @@ -179,17 +179,17 @@ var _ = Describe("TaggingController", func() { ExpectApplied(ctx, env.Client, nodeClaim) Expect(env.Client.Delete(ctx, nodeClaim)).To(Succeed()) ExpectObjectReconciled(ctx, env.Client, taggingController, nodeClaim) - Expect(nodeClaim.Annotations).To(Not(HaveKey(v1beta1.AnnotationInstanceTagged))) + Expect(nodeClaim.Annotations).To(Not(HaveKey(v1.AnnotationInstanceTagged))) Expect(lo.ContainsBy(ec2Instance.Tags, func(tag *ec2.Tag) bool { - return *tag.Key == v1beta1.TagName + return *tag.Key == v1.TagName })).To(BeFalse()) }) DescribeTable( "should tag taggable instances", func(customTags ...string) { - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ - Status: corev1beta1.NodeClaimStatus{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ + Status: karpv1.NodeClaimStatus{ ProviderID: fake.ProviderID(*ec2Instance.InstanceId), NodeName: "default", }, @@ -206,11 +206,11 @@ var _ = Describe("TaggingController", func() { ExpectApplied(ctx, env.Client, nodeClaim) ExpectObjectReconciled(ctx, env.Client, taggingController, nodeClaim) nodeClaim = ExpectExists(ctx, env.Client, nodeClaim) - Expect(nodeClaim.Annotations).To(HaveKey(v1beta1.AnnotationInstanceTagged)) + Expect(nodeClaim.Annotations).To(HaveKey(v1.AnnotationInstanceTagged)) expectedTags := map[string]string{ - v1beta1.TagName: nodeClaim.Status.NodeName, - v1beta1.TagNodeClaim: nodeClaim.Name, + v1.TagName: nodeClaim.Status.NodeName, + v1.TagNodeClaim: nodeClaim.Name, } instanceTags := instance.NewInstance(ec2Instance).Tags for tag, value := range expectedTags { @@ -220,9 +220,9 @@ var _ = Describe("TaggingController", func() { Expect(instanceTags).To(HaveKeyWithValue(tag, value)) } }, - Entry("with only karpenter.k8s.aws/nodeclaim tag", v1beta1.TagName), - Entry("with only Name tag", v1beta1.TagNodeClaim), + Entry("with only karpenter.k8s.aws/nodeclaim tag", v1.TagName), + Entry("with only Name tag", v1.TagNodeClaim), Entry("with both Name and karpenter.k8s.aws/nodeclaim tags"), - Entry("with nothing to tag", v1beta1.TagName, v1beta1.TagNodeClaim), + Entry("with nothing to tag", v1.TagName, v1.TagNodeClaim), ) }) diff --git a/pkg/controllers/nodeclass/hash/controller.go b/pkg/controllers/nodeclass/hash/controller.go index 711b8572b939..b3a87cce597c 100644 --- a/pkg/controllers/nodeclass/hash/controller.go +++ b/pkg/controllers/nodeclass/hash/controller.go @@ -28,9 +28,9 @@ import ( "sigs.k8s.io/karpenter/pkg/operator/injection" "github.com/awslabs/operatorpkg/reasonable" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) type Controller struct { @@ -43,19 +43,19 @@ func NewController(kubeClient client.Client) *Controller { } } -func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { ctx = injection.WithControllerName(ctx, "nodeclass.hash") stored := nodeClass.DeepCopy() - if nodeClass.Annotations[v1beta1.AnnotationEC2NodeClassHashVersion] != v1beta1.EC2NodeClassHashVersion { + if nodeClass.Annotations[v1.AnnotationEC2NodeClassHashVersion] != v1.EC2NodeClassHashVersion { if err := c.updateNodeClaimHash(ctx, nodeClass); err != nil { return reconcile.Result{}, err } } nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash(), - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHash: nodeClass.Hash(), + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, }) if !equality.Semantic.DeepEqual(stored, nodeClass) { @@ -70,7 +70,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeCl func (c *Controller) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("nodeclass.hash"). - For(&v1beta1.EC2NodeClass{}). + For(&v1.EC2NodeClass{}). WithOptions(controller.Options{ RateLimiter: reasonable.RateLimiter(), MaxConcurrentReconciles: 10, @@ -82,8 +82,8 @@ func (c *Controller) Register(_ context.Context, m manager.Manager) error { // `ec2nodeclass-hash` annotation on the EC2NodeClass will be updated, due to the breaking change, making the `ec2nodeclass-hash` on the NodeClaim different from // EC2NodeClass. Since, we cannot rely on the `ec2nodeclass-hash` on the NodeClaims, due to the breaking change, we will need to re-calculate the hash and update the annotation. // For more information on the Drift Hash Versioning: https://github.com/kubernetes-sigs/karpenter/blob/main/designs/drift-hash-versioning.md -func (c *Controller) updateNodeClaimHash(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) error { - ncList := &corev1beta1.NodeClaimList{} +func (c *Controller) updateNodeClaimHash(ctx context.Context, nodeClass *v1.EC2NodeClass) error { + ncList := &karpv1.NodeClaimList{} if err := c.kubeClient.List(ctx, ncList, client.MatchingFields{"spec.nodeClassRef.name": nodeClass.Name}); err != nil { return err } @@ -93,16 +93,16 @@ func (c *Controller) updateNodeClaimHash(ctx context.Context, nodeClass *v1beta1 nc := ncList.Items[i] stored := nc.DeepCopy() - if nc.Annotations[v1beta1.AnnotationEC2NodeClassHashVersion] != v1beta1.EC2NodeClassHashVersion { + if nc.Annotations[v1.AnnotationEC2NodeClassHashVersion] != v1.EC2NodeClassHashVersion { nc.Annotations = lo.Assign(nc.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, }) // Any NodeClaim that is already drifted will remain drifted if the karpenter.k8s.aws/nodepool-hash-version doesn't match // Since the hashing mechanism has changed we will not be able to determine if the drifted status of the NodeClaim has changed - if nc.StatusConditions().Get(corev1beta1.ConditionTypeDrifted) == nil { + if nc.StatusConditions().Get(karpv1.ConditionTypeDrifted) == nil { nc.Annotations = lo.Assign(nc.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: nodeClass.Hash(), + v1.AnnotationEC2NodeClassHash: nodeClass.Hash(), }) } diff --git a/pkg/controllers/nodeclass/hash/suite_test.go b/pkg/controllers/nodeclass/hash/suite_test.go index 3d6ebc8ca738..243d40fdbd99 100644 --- a/pkg/controllers/nodeclass/hash/suite_test.go +++ b/pkg/controllers/nodeclass/hash/suite_test.go @@ -24,12 +24,12 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/imdario/mergo" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/hash" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -74,21 +74,21 @@ var _ = AfterEach(func() { }) var _ = Describe("NodeClass Hash Controller", func() { - var nodeClass *v1beta1.EC2NodeClass + var nodeClass *v1.EC2NodeClass BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -96,13 +96,13 @@ var _ = Describe("NodeClass Hash Controller", func() { }, }) }) - DescribeTable("should update the drift hash when static field is updated", func(changes *v1beta1.EC2NodeClass) { + DescribeTable("should update the drift hash when static field is updated", func(changes *v1.EC2NodeClass) { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) expectedHash := nodeClass.Hash() - Expect(nodeClass.ObjectMeta.Annotations[v1beta1.AnnotationEC2NodeClassHash]).To(Equal(expectedHash)) + Expect(nodeClass.ObjectMeta.Annotations[v1.AnnotationEC2NodeClassHash]).To(Equal(expectedHash)) Expect(mergo.Merge(nodeClass, changes, mergo.WithOverride)).To(Succeed()) @@ -111,17 +111,17 @@ var _ = Describe("NodeClass Hash Controller", func() { nodeClass = ExpectExists(ctx, env.Client, nodeClass) expectedHashTwo := nodeClass.Hash() - Expect(nodeClass.Annotations[v1beta1.AnnotationEC2NodeClassHash]).To(Equal(expectedHashTwo)) + Expect(nodeClass.Annotations[v1.AnnotationEC2NodeClassHash]).To(Equal(expectedHashTwo)) Expect(expectedHash).ToNot(Equal(expectedHashTwo)) }, - Entry("AMIFamily Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{AMIFamily: aws.String(v1beta1.AMIFamilyBottlerocket)}}), - Entry("UserData Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{UserData: aws.String("userdata-test-2")}}), - Entry("Tags Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}), - Entry("BlockDeviceMappings Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{DeviceName: aws.String("map-device-test-3")}}}}), - Entry("DetailedMonitoring Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{DetailedMonitoring: aws.Bool(true)}}), - Entry("MetadataOptions Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{MetadataOptions: &v1beta1.MetadataOptions{HTTPEndpoint: aws.String("disabled")}}}), - Entry("Context Drift", &v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{Context: aws.String("context-2")}}), + Entry("AMIFamily Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{AMIFamily: aws.String(v1.AMIFamilyBottlerocket)}}), + Entry("UserData Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{UserData: aws.String("userdata-test-2")}}), + Entry("Tags Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}}), + Entry("BlockDeviceMappings Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{{DeviceName: aws.String("map-device-test-3")}}}}), + Entry("DetailedMonitoring Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{DetailedMonitoring: aws.Bool(true)}}), + Entry("MetadataOptions Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{MetadataOptions: &v1.MetadataOptions{HTTPEndpoint: aws.String("disabled")}}}), + Entry("Context Drift", &v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{Context: aws.String("context-2")}}), ) It("should not update the drift hash when dynamic field is updated", func() { ExpectApplied(ctx, env.Client, nodeClass) @@ -129,19 +129,19 @@ var _ = Describe("NodeClass Hash Controller", func() { nodeClass = ExpectExists(ctx, env.Client, nodeClass) expectedHash := nodeClass.Hash() - Expect(nodeClass.Annotations[v1beta1.AnnotationEC2NodeClassHash]).To(Equal(expectedHash)) + Expect(nodeClass.Annotations[v1.AnnotationEC2NodeClassHash]).To(Equal(expectedHash)) - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test1", }, } - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test1", }, } - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Tags: map[string]string{"ami-test-key": "ami-test-value"}, }, @@ -150,12 +150,12 @@ var _ = Describe("NodeClass Hash Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Annotations[v1beta1.AnnotationEC2NodeClassHash]).To(Equal(expectedHash)) + Expect(nodeClass.Annotations[v1.AnnotationEC2NodeClassHash]).To(Equal(expectedHash)) }) It("should update ec2nodeclass-hash-version annotation when the ec2nodeclass-hash-version on the NodeClass does not match with the controller hash version", func() { nodeClass.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "abceduefed", - v1beta1.AnnotationEC2NodeClassHashVersion: "test", + v1.AnnotationEC2NodeClassHash: "abceduefed", + v1.AnnotationEC2NodeClassHashVersion: "test", } ExpectApplied(ctx, env.Client, nodeClass) @@ -164,41 +164,41 @@ var _ = Describe("NodeClass Hash Controller", func() { expectedHash := nodeClass.Hash() // Expect ec2nodeclass-hash on the NodeClass to be updated - Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) }) It("should update ec2nodeclass-hash-versions on all NodeClaims when the ec2nodeclass-hash-version does not match with the controller hash version", func() { nodeClass.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "abceduefed", - v1beta1.AnnotationEC2NodeClassHashVersion: "test", + v1.AnnotationEC2NodeClassHash: "abceduefed", + v1.AnnotationEC2NodeClassHashVersion: "test", } - nodeClaimOne := coretest.NodeClaim(corev1beta1.NodeClaim{ + nodeClaimOne := coretest.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "123456", - v1beta1.AnnotationEC2NodeClassHashVersion: "test", + v1.AnnotationEC2NodeClassHash: "123456", + v1.AnnotationEC2NodeClassHashVersion: "test", }, }, - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) - nodeClaimTwo := coretest.NodeClaim(corev1beta1.NodeClaim{ + nodeClaimTwo := coretest.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "123456", - v1beta1.AnnotationEC2NodeClassHashVersion: "test", + v1.AnnotationEC2NodeClassHash: "123456", + v1.AnnotationEC2NodeClassHashVersion: "test", }, }, - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -212,28 +212,28 @@ var _ = Describe("NodeClass Hash Controller", func() { expectedHash := nodeClass.Hash() // Expect ec2nodeclass-hash on the NodeClaims to be updated - Expect(nodeClaimOne.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - Expect(nodeClaimOne.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) - Expect(nodeClaimTwo.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - Expect(nodeClaimTwo.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + Expect(nodeClaimOne.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + Expect(nodeClaimOne.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) + Expect(nodeClaimTwo.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + Expect(nodeClaimTwo.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) }) It("should not update ec2nodeclass-hash on all NodeClaims when the ec2nodeclass-hash-version matches the controller hash version", func() { nodeClass.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "abceduefed", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-version", + v1.AnnotationEC2NodeClassHash: "abceduefed", + v1.AnnotationEC2NodeClassHashVersion: "test-version", } - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "1234564654", - v1beta1.AnnotationEC2NodeClassHashVersion: v1beta1.EC2NodeClassHashVersion, + v1.AnnotationEC2NodeClassHash: "1234564654", + v1.AnnotationEC2NodeClassHashVersion: v1.EC2NodeClassHashVersion, }, }, - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -246,40 +246,40 @@ var _ = Describe("NodeClass Hash Controller", func() { expectedHash := nodeClass.Hash() // Expect ec2nodeclass-hash on the NodeClass to be updated - Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) // Expect ec2nodeclass-hash on the NodeClaims to stay the same - Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, "1234564654")) - Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, "1234564654")) + Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) }) It("should not update ec2nodeclass-hash on the NodeClaim if it's drifted and the ec2nodeclass-hash-version does not match the controller hash version", func() { nodeClass.Annotations = map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "abceduefed", - v1beta1.AnnotationEC2NodeClassHashVersion: "test", + v1.AnnotationEC2NodeClassHash: "abceduefed", + v1.AnnotationEC2NodeClassHashVersion: "test", } - nodeClaim := coretest.NodeClaim(corev1beta1.NodeClaim{ + nodeClaim := coretest.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "123456", - v1beta1.AnnotationEC2NodeClassHashVersion: "test", + v1.AnnotationEC2NodeClassHash: "123456", + v1.AnnotationEC2NodeClassHashVersion: "test", }, }, - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) - nodeClaim.StatusConditions().SetTrue(corev1beta1.ConditionTypeDrifted) + nodeClaim.StatusConditions().SetTrue(karpv1.ConditionTypeDrifted) ExpectApplied(ctx, env.Client, nodeClass, nodeClaim) ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass) nodeClaim = ExpectExists(ctx, env.Client, nodeClaim) // Expect ec2nodeclass-hash on the NodeClaims to stay the same - Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, "123456")) - Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, "123456")) + Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) }) }) diff --git a/pkg/controllers/nodeclass/status/ami.go b/pkg/controllers/nodeclass/status/ami.go index f8899660e0c8..faf94d2dac08 100644 --- a/pkg/controllers/nodeclass/status/ami.go +++ b/pkg/controllers/nodeclass/status/ami.go @@ -21,12 +21,12 @@ import ( "time" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" ) @@ -34,18 +34,18 @@ type AMI struct { amiProvider amifamily.Provider } -func (a *AMI) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (a *AMI) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { amis, err := a.amiProvider.List(ctx, nodeClass) if err != nil { return reconcile.Result{}, fmt.Errorf("getting amis, %w", err) } if len(amis) == 0 { nodeClass.Status.AMIs = nil - nodeClass.StatusConditions().SetFalse(v1beta1.ConditionTypeAMIsReady, "AMINotFound", "AMISelector did not match any AMIs") + nodeClass.StatusConditions().SetFalse(v1.ConditionTypeAMIsReady, "AMINotFound", "AMISelector did not match any AMIs") return reconcile.Result{}, nil } - nodeClass.Status.AMIs = lo.Map(amis, func(ami amifamily.AMI, _ int) v1beta1.AMI { - reqs := lo.Map(ami.Requirements.NodeSelectorRequirements(), func(item corev1beta1.NodeSelectorRequirementWithMinValues, _ int) v1.NodeSelectorRequirement { + nodeClass.Status.AMIs = lo.Map(amis, func(ami amifamily.AMI, _ int) v1.AMI { + reqs := lo.Map(ami.Requirements.NodeSelectorRequirements(), func(item karpv1.NodeSelectorRequirementWithMinValues, _ int) corev1.NodeSelectorRequirement { return item.NodeSelectorRequirement }) @@ -55,12 +55,12 @@ func (a *AMI) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (r } return reqs[i].Key < reqs[j].Key }) - return v1beta1.AMI{ + return v1.AMI{ Name: ami.Name, ID: ami.AmiID, Requirements: reqs, } }) - nodeClass.StatusConditions().SetTrue(v1beta1.ConditionTypeAMIsReady) + nodeClass.StatusConditions().SetTrue(v1.ConditionTypeAMIsReady) return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil } diff --git a/pkg/controllers/nodeclass/status/ami_test.go b/pkg/controllers/nodeclass/status/ami_test.go index f3f705ce2d6a..0ab001dd7d4c 100644 --- a/pkg/controllers/nodeclass/status/ami_test.go +++ b/pkg/controllers/nodeclass/status/ami_test.go @@ -21,10 +21,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + corev1 "k8s.io/api/core/v1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/test" . "github.com/onsi/ginkgo/v2" @@ -34,19 +34,19 @@ import ( var _ = Describe("NodeClass AMI Status Controller", func() { BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -92,9 +92,9 @@ var _ = Describe("NodeClass AMI Status Controller", func() { version := lo.Must(awsEnv.VersionProvider.Get(ctx)) awsEnv.SSMAPI.Parameters = map[string]string{ - fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", version): "ami-id-123", - fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-gpu/recommended/image_id", version): "ami-id-456", - fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2%s/recommended/image_id", version, fmt.Sprintf("-%s", corev1beta1.ArchitectureArm64)): "ami-id-789", + fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", version): "ami-id-123", + fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-gpu/recommended/image_id", version): "ami-id-456", + fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2%s/recommended/image_id", version, fmt.Sprintf("-%s", karpv1.ArchitectureArm64)): "ami-id-789", } awsEnv.EC2API.DescribeImagesOutput.Set(&ec2.DescribeImagesOutput{ @@ -135,77 +135,77 @@ var _ = Describe("NodeClass AMI Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.AMIs).To(Equal([]v1beta1.AMI{ + Expect(nodeClass.Status.AMIs).To(Equal([]v1.AMI{ { Name: "test-ami-3", ID: "ami-id-789", - Requirements: []v1.NodeSelectorRequirement{ + Requirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureArm64}, + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureArm64}, }, { - Key: v1beta1.LabelInstanceGPUCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceGPUCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, { - Key: v1beta1.LabelInstanceAcceleratorCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceAcceleratorCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, }, }, { Name: "test-ami-2", ID: "ami-id-456", - Requirements: []v1.NodeSelectorRequirement{ + Requirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureAmd64}, + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureAmd64}, }, { - Key: v1beta1.LabelInstanceGPUCount, - Operator: v1.NodeSelectorOpExists, + Key: v1.LabelInstanceGPUCount, + Operator: corev1.NodeSelectorOpExists, }, }, }, { Name: "test-ami-2", ID: "ami-id-456", - Requirements: []v1.NodeSelectorRequirement{ + Requirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureAmd64}, + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureAmd64}, }, { - Key: v1beta1.LabelInstanceAcceleratorCount, - Operator: v1.NodeSelectorOpExists, + Key: v1.LabelInstanceAcceleratorCount, + Operator: corev1.NodeSelectorOpExists, }, }, }, { Name: "test-ami-1", ID: "ami-id-123", - Requirements: []v1.NodeSelectorRequirement{ + Requirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureAmd64}, + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureAmd64}, }, { - Key: v1beta1.LabelInstanceGPUCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceGPUCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, { - Key: v1beta1.LabelInstanceAcceleratorCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceAcceleratorCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, }, }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeAMIsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeAMIsReady)).To(BeTrue()) }) It("should resolve amiSelector AMis and requirements into status when all SSM aliases don't resolve", func() { version := lo.Must(awsEnv.VersionProvider.Get(ctx)) @@ -214,7 +214,7 @@ var _ = Describe("NodeClass AMI Status Controller", func() { fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/x86_64/latest/image_id", version): "ami-id-123", fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/arm64/latest/image_id", version): "ami-id-456", } - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket nodeClass.Spec.AMISelectorTerms = nil awsEnv.EC2API.DescribeImagesOutput.Set(&ec2.DescribeImagesOutput{ Images: []*ec2.Image{ @@ -244,73 +244,73 @@ var _ = Describe("NodeClass AMI Status Controller", func() { ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.AMIs).To(Equal([]v1beta1.AMI{ + Expect(nodeClass.Status.AMIs).To(Equal([]v1.AMI{ { Name: "test-ami-2", ID: "ami-id-456", - Requirements: []v1.NodeSelectorRequirement{ + Requirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureArm64}, + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureArm64}, }, { - Key: v1beta1.LabelInstanceGPUCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceGPUCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, { - Key: v1beta1.LabelInstanceAcceleratorCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceAcceleratorCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, }, }, { Name: "test-ami-1", ID: "ami-id-123", - Requirements: []v1.NodeSelectorRequirement{ + Requirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureAmd64}, + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureAmd64}, }, { - Key: v1beta1.LabelInstanceGPUCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceGPUCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, { - Key: v1beta1.LabelInstanceAcceleratorCount, - Operator: v1.NodeSelectorOpDoesNotExist, + Key: v1.LabelInstanceAcceleratorCount, + Operator: corev1.NodeSelectorOpDoesNotExist, }, }, }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeAMIsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeAMIsReady)).To(BeTrue()) }) It("Should resolve a valid AMI selector", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.AMIs).To(Equal( - []v1beta1.AMI{ + []v1.AMI{ { Name: "test-ami-3", ID: "ami-test3", - Requirements: []v1.NodeSelectorRequirement{{ - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureAmd64}, + Requirements: []corev1.NodeSelectorRequirement{{ + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureAmd64}, }, }, }, }, )) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeAMIsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeAMIsReady)).To(BeTrue()) }) It("should get error when resolving AMIs and have status condition set to false", func() { awsEnv.EC2API.NextError.Set(fmt.Errorf("unable to resolve AMI")) ExpectApplied(ctx, env.Client, nodeClass) _ = ExpectObjectReconcileFailed(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeAMIsReady)).To(BeFalse()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeAMIsReady)).To(BeFalse()) }) }) diff --git a/pkg/controllers/nodeclass/status/controller.go b/pkg/controllers/nodeclass/status/controller.go index 8c32ff832d14..1714da820348 100644 --- a/pkg/controllers/nodeclass/status/controller.go +++ b/pkg/controllers/nodeclass/status/controller.go @@ -31,7 +31,7 @@ import ( "github.com/awslabs/operatorpkg/reasonable" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" "github.com/aws/karpenter-provider-aws/pkg/providers/instanceprofile" "github.com/aws/karpenter-provider-aws/pkg/providers/launchtemplate" @@ -40,7 +40,7 @@ import ( ) type nodeClassStatusReconciler interface { - Reconcile(context.Context, *v1beta1.EC2NodeClass) (reconcile.Result, error) + Reconcile(context.Context, *v1.EC2NodeClass) (reconcile.Result, error) } type Controller struct { @@ -66,12 +66,12 @@ func NewController(kubeClient client.Client, subnetProvider subnet.Provider, sec } } -func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { ctx = injection.WithControllerName(ctx, "nodeclass.status") - if !controllerutil.ContainsFinalizer(nodeClass, v1beta1.TerminationFinalizer) { + if !controllerutil.ContainsFinalizer(nodeClass, v1.TerminationFinalizer) { stored := nodeClass.DeepCopy() - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) if err := c.kubeClient.Patch(ctx, nodeClass, client.MergeFrom(stored)); err != nil { return reconcile.Result{}, err } @@ -106,7 +106,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeCl func (c *Controller) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("nodeclass.status"). - For(&v1beta1.EC2NodeClass{}). + For(&v1.EC2NodeClass{}). WithOptions(controller.Options{ RateLimiter: reasonable.RateLimiter(), MaxConcurrentReconciles: 10, diff --git a/pkg/controllers/nodeclass/status/instanceprofile.go b/pkg/controllers/nodeclass/status/instanceprofile.go index 882cbc5e7ed7..9ff92d299bae 100644 --- a/pkg/controllers/nodeclass/status/instanceprofile.go +++ b/pkg/controllers/nodeclass/status/instanceprofile.go @@ -21,7 +21,7 @@ import ( "github.com/samber/lo" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/instanceprofile" ) @@ -29,7 +29,7 @@ type InstanceProfile struct { instanceProfileProvider instanceprofile.Provider } -func (ip *InstanceProfile) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (ip *InstanceProfile) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { if nodeClass.Spec.Role != "" { name, err := ip.instanceProfileProvider.Create(ctx, nodeClass) if err != nil { @@ -39,6 +39,6 @@ func (ip *InstanceProfile) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2 } else { nodeClass.Status.InstanceProfile = lo.FromPtr(nodeClass.Spec.InstanceProfile) } - nodeClass.StatusConditions().SetTrue(v1beta1.ConditionTypeInstanceProfileReady) + nodeClass.StatusConditions().SetTrue(v1.ConditionTypeInstanceProfileReady) return reconcile.Result{}, nil } diff --git a/pkg/controllers/nodeclass/status/instanceprofile_test.go b/pkg/controllers/nodeclass/status/instanceprofile_test.go index 14c3317955cb..bf57747ce6af 100644 --- a/pkg/controllers/nodeclass/status/instanceprofile_test.go +++ b/pkg/controllers/nodeclass/status/instanceprofile_test.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go/service/iam" "github.com/samber/lo" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -45,7 +45,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.InstanceProfile).To(Equal(profileName)) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeInstanceProfileReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeInstanceProfileReady)).To(BeTrue()) }) It("should add the role to the instance profile when it exists without a role", func() { awsEnv.IAMAPI.InstanceProfiles = map[string]*iam.InstanceProfile{ @@ -65,7 +65,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.InstanceProfile).To(Equal(profileName)) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeInstanceProfileReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeInstanceProfileReady)).To(BeTrue()) }) It("should update the role for the instance profile when the wrong role exists", func() { awsEnv.IAMAPI.InstanceProfiles = map[string]*iam.InstanceProfile{ @@ -90,7 +90,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.InstanceProfile).To(Equal(profileName)) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeInstanceProfileReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeInstanceProfileReady)).To(BeTrue()) }) It("should not call CreateInstanceProfile or AddRoleToInstanceProfile when instance profile exists with correct role", func() { awsEnv.IAMAPI.InstanceProfiles = map[string]*iam.InstanceProfile{ @@ -116,7 +116,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { Expect(awsEnv.IAMAPI.CreateInstanceProfileBehavior.Calls()).To(BeZero()) Expect(awsEnv.IAMAPI.AddRoleToInstanceProfileBehavior.Calls()).To(BeZero()) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeInstanceProfileReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeInstanceProfileReady)).To(BeTrue()) }) It("should resolve the specified instance profile into the status when using instanceProfile field", func() { nodeClass.Spec.Role = "" @@ -126,7 +126,7 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.InstanceProfile).To(Equal(lo.FromPtr(nodeClass.Spec.InstanceProfile))) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeInstanceProfileReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeInstanceProfileReady)).To(BeTrue()) }) It("should not call the the IAM API when specifying an instance profile", func() { nodeClass.Spec.Role = "" @@ -137,6 +137,6 @@ var _ = Describe("NodeClass InstanceProfile Status Controller", func() { Expect(awsEnv.IAMAPI.CreateInstanceProfileBehavior.Calls()).To(BeZero()) Expect(awsEnv.IAMAPI.AddRoleToInstanceProfileBehavior.Calls()).To(BeZero()) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeInstanceProfileReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeInstanceProfileReady)).To(BeTrue()) }) }) diff --git a/pkg/controllers/nodeclass/status/launchtemplate_test.go b/pkg/controllers/nodeclass/status/launchtemplate_test.go index 67a38fe11451..5035d03eba5f 100644 --- a/pkg/controllers/nodeclass/status/launchtemplate_test.go +++ b/pkg/controllers/nodeclass/status/launchtemplate_test.go @@ -19,7 +19,7 @@ import ( "github.com/awslabs/operatorpkg/status" "github.com/samber/lo" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/test" . "github.com/onsi/ginkgo/v2" @@ -29,19 +29,19 @@ import ( var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() { BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -53,12 +53,12 @@ var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() }) It("shouldn't resolve cluster CIDR for non-AL2023 NodeClasses", func() { for _, family := range []string{ - v1beta1.AMIFamilyAL2, - v1beta1.AMIFamilyBottlerocket, - v1beta1.AMIFamilyUbuntu, - v1beta1.AMIFamilyWindows2019, - v1beta1.AMIFamilyWindows2022, - v1beta1.AMIFamilyCustom, + v1.AMIFamilyAL2, + v1.AMIFamilyBottlerocket, + v1.AMIFamilyUbuntu, + v1.AMIFamilyWindows2019, + v1.AMIFamilyWindows2022, + v1.AMIFamilyCustom, } { nodeClass.Spec.AMIFamily = lo.ToPtr(family) ExpectApplied(ctx, env.Client, nodeClass) @@ -67,7 +67,7 @@ var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() } }) It("should resolve cluster CIDR for IPv4 clusters", func() { - nodeClass.Spec.AMIFamily = lo.ToPtr(v1beta1.AMIFamilyAL2023) + nodeClass.Spec.AMIFamily = lo.ToPtr(v1.AMIFamilyAL2023) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) Expect(lo.FromPtr(awsEnv.LaunchTemplateProvider.ClusterCIDR.Load())).To(Equal("10.100.0.0/16")) @@ -82,7 +82,7 @@ var _ = Describe("NodeClass Launch Template CIDR Resolution Controller", func() }, }, }) - nodeClass.Spec.AMIFamily = lo.ToPtr(v1beta1.AMIFamilyAL2023) + nodeClass.Spec.AMIFamily = lo.ToPtr(v1.AMIFamilyAL2023) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) Expect(lo.FromPtr(awsEnv.LaunchTemplateProvider.ClusterCIDR.Load())).To(Equal("2001:db8::/64")) diff --git a/pkg/controllers/nodeclass/status/readiness.go b/pkg/controllers/nodeclass/status/readiness.go index 06ce3bad486f..1905267fbeae 100644 --- a/pkg/controllers/nodeclass/status/readiness.go +++ b/pkg/controllers/nodeclass/status/readiness.go @@ -25,18 +25,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) type Readiness struct { launchTemplateProvider launchtemplate.Provider } -func (n Readiness) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (n Readiness) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { // A NodeClass that uses AL2023 requires the cluster CIDR for launching nodes. // To allow Karpenter to be used for Non-EKS clusters, resolving the Cluster CIDR // will not be done at startup but instead in a reconcile loop. - if lo.FromPtr(nodeClass.Spec.AMIFamily) == v1beta1.AMIFamilyAL2023 { + if lo.FromPtr(nodeClass.Spec.AMIFamily) == v1.AMIFamilyAL2023 { if err := n.launchTemplateProvider.ResolveClusterCIDR(ctx); err != nil { nodeClass.StatusConditions().SetFalse(status.ConditionReady, "NodeClassNotReady", "Failed to detect the cluster CIDR") return reconcile.Result{}, fmt.Errorf("failed to detect the cluster CIDR, %w", err) diff --git a/pkg/controllers/nodeclass/status/readiness_test.go b/pkg/controllers/nodeclass/status/readiness_test.go index 2cb807c33b6c..de1e06a7971b 100644 --- a/pkg/controllers/nodeclass/status/readiness_test.go +++ b/pkg/controllers/nodeclass/status/readiness_test.go @@ -17,7 +17,7 @@ package status_test import ( "github.com/awslabs/operatorpkg/status" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/test" . "github.com/onsi/ginkgo/v2" @@ -27,19 +27,19 @@ import ( var _ = Describe("NodeClass Status Condition Controller", func() { BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -55,7 +55,7 @@ var _ = Describe("NodeClass Status Condition Controller", func() { Expect(nodeClass.StatusConditions().Get(status.ConditionReady).IsTrue()).To(BeTrue()) }) It("should update status condition as Not Ready", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"foo": "invalid"}, }, diff --git a/pkg/controllers/nodeclass/status/securitygroup.go b/pkg/controllers/nodeclass/status/securitygroup.go index 59817d4544b1..f003b2dfd3ea 100644 --- a/pkg/controllers/nodeclass/status/securitygroup.go +++ b/pkg/controllers/nodeclass/status/securitygroup.go @@ -24,7 +24,7 @@ import ( "github.com/samber/lo" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/securitygroup" ) @@ -32,25 +32,25 @@ type SecurityGroup struct { securityGroupProvider securitygroup.Provider } -func (sg *SecurityGroup) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (sg *SecurityGroup) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { securityGroups, err := sg.securityGroupProvider.List(ctx, nodeClass) if err != nil { return reconcile.Result{}, fmt.Errorf("getting security groups, %w", err) } if len(securityGroups) == 0 && len(nodeClass.Spec.SecurityGroupSelectorTerms) > 0 { nodeClass.Status.SecurityGroups = nil - nodeClass.StatusConditions().SetFalse(v1beta1.ConditionTypeSecurityGroupsReady, "SecurityGroupsNotFound", "SecurityGroupSelector did not match any SecurityGroups") + nodeClass.StatusConditions().SetFalse(v1.ConditionTypeSecurityGroupsReady, "SecurityGroupsNotFound", "SecurityGroupSelector did not match any SecurityGroups") return reconcile.Result{}, nil } sort.Slice(securityGroups, func(i, j int) bool { return *securityGroups[i].GroupId < *securityGroups[j].GroupId }) - nodeClass.Status.SecurityGroups = lo.Map(securityGroups, func(securityGroup *ec2.SecurityGroup, _ int) v1beta1.SecurityGroup { - return v1beta1.SecurityGroup{ + nodeClass.Status.SecurityGroups = lo.Map(securityGroups, func(securityGroup *ec2.SecurityGroup, _ int) v1.SecurityGroup { + return v1.SecurityGroup{ ID: *securityGroup.GroupId, Name: *securityGroup.GroupName, } }) - nodeClass.StatusConditions().SetTrue(v1beta1.ConditionTypeSecurityGroupsReady) + nodeClass.StatusConditions().SetTrue(v1.ConditionTypeSecurityGroupsReady) return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil } diff --git a/pkg/controllers/nodeclass/status/securitygroup_test.go b/pkg/controllers/nodeclass/status/securitygroup_test.go index 4daf63852b6f..c43143f99010 100644 --- a/pkg/controllers/nodeclass/status/securitygroup_test.go +++ b/pkg/controllers/nodeclass/status/securitygroup_test.go @@ -15,7 +15,7 @@ limitations under the License. package status_test import ( - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/test" . "github.com/onsi/ginkgo/v2" @@ -25,19 +25,19 @@ import ( var _ = Describe("NodeClass Security Group Status Controller", func() { BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -49,7 +49,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -63,10 +63,10 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { Name: "securityGroup-test3", }, })) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) }) It("Should resolve a valid selectors for Security Groups by tags", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"Name": "test-security-group-1"}, }, @@ -77,7 +77,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -87,10 +87,10 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { Name: "securityGroup-test2", }, })) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) }) It("Should resolve a valid selectors for Security Groups by ids", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test1", }, @@ -98,19 +98,19 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", }, })) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) }) It("Should update Security Groups status when the Security Groups selector gets updated by tags", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -125,7 +125,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, })) - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"Name": "test-security-group-1"}, }, @@ -136,7 +136,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -146,13 +146,13 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { Name: "securityGroup-test2", }, })) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) }) It("Should update Security Groups status when the Security Groups selector gets updated by ids", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -167,7 +167,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, })) - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test1", }, @@ -175,16 +175,16 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", }, })) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsTrue()).To(BeTrue()) }) It("Should not resolve a invalid selectors for Security Groups", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{`foo`: `invalid`}, }, @@ -193,13 +193,13 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(BeNil()) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsFalse()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsFalse()).To(BeTrue()) }) It("Should not resolve a invalid selectors for an updated Security Groups selector", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1beta1.SecurityGroup{ + Expect(nodeClass.Status.SecurityGroups).To(Equal([]v1.SecurityGroup{ { ID: "sg-test1", Name: "securityGroup-test1", @@ -214,7 +214,7 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { }, })) - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{`foo`: `invalid`}, }, @@ -223,6 +223,6 @@ var _ = Describe("NodeClass Security Group Status Controller", func() { ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.SecurityGroups).To(BeNil()) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSecurityGroupsReady).IsFalse()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSecurityGroupsReady).IsFalse()).To(BeTrue()) }) }) diff --git a/pkg/controllers/nodeclass/status/subnet.go b/pkg/controllers/nodeclass/status/subnet.go index 5d987eed74b0..4e71dd0384a1 100644 --- a/pkg/controllers/nodeclass/status/subnet.go +++ b/pkg/controllers/nodeclass/status/subnet.go @@ -24,7 +24,7 @@ import ( "github.com/samber/lo" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/subnet" ) @@ -32,14 +32,14 @@ type Subnet struct { subnetProvider subnet.Provider } -func (s *Subnet) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (s *Subnet) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { subnets, err := s.subnetProvider.List(ctx, nodeClass) if err != nil { return reconcile.Result{}, fmt.Errorf("getting subnets, %w", err) } if len(subnets) == 0 { nodeClass.Status.Subnets = nil - nodeClass.StatusConditions().SetFalse(v1beta1.ConditionTypeSubnetsReady, "SubnetsNotFound", "SubnetSelector did not match any Subnets") + nodeClass.StatusConditions().SetFalse(v1.ConditionTypeSubnetsReady, "SubnetsNotFound", "SubnetSelector did not match any Subnets") return reconcile.Result{}, nil } sort.Slice(subnets, func(i, j int) bool { @@ -48,13 +48,13 @@ func (s *Subnet) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) } return *subnets[i].SubnetId < *subnets[j].SubnetId }) - nodeClass.Status.Subnets = lo.Map(subnets, func(ec2subnet *ec2.Subnet, _ int) v1beta1.Subnet { - return v1beta1.Subnet{ + nodeClass.Status.Subnets = lo.Map(subnets, func(ec2subnet *ec2.Subnet, _ int) v1.Subnet { + return v1.Subnet{ ID: *ec2subnet.SubnetId, Zone: *ec2subnet.AvailabilityZone, ZoneID: *ec2subnet.AvailabilityZoneId, } }) - nodeClass.StatusConditions().SetTrue(v1beta1.ConditionTypeSubnetsReady) + nodeClass.StatusConditions().SetTrue(v1.ConditionTypeSubnetsReady) return reconcile.Result{RequeueAfter: time.Minute}, nil } diff --git a/pkg/controllers/nodeclass/status/subnet_test.go b/pkg/controllers/nodeclass/status/subnet_test.go index 01c4c90e4115..1de3c799492c 100644 --- a/pkg/controllers/nodeclass/status/subnet_test.go +++ b/pkg/controllers/nodeclass/status/subnet_test.go @@ -18,7 +18,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/test" . "github.com/onsi/ginkgo/v2" @@ -28,19 +28,19 @@ import ( var _ = Describe("NodeClass Subnet Status Controller", func() { BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -52,7 +52,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -74,7 +74,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ZoneID: "tstz1-1alocal", }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeSubnetsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeSubnetsReady)).To(BeTrue()) }) It("Should have the correct ordering for the Subnets", func() { awsEnv.EC2API.DescribeSubnetsOutput.Set(&ec2.DescribeSubnetsOutput{Subnets: []*ec2.Subnet{ @@ -85,7 +85,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test2", Zone: "test-zone-1b", @@ -102,10 +102,10 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ZoneID: "tstz1-1a", }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeSubnetsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeSubnetsReady)).To(BeTrue()) }) It("Should resolve a valid selectors for Subnet by tags", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{`Name`: `test-subnet-1`}, }, @@ -116,7 +116,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -128,10 +128,10 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ZoneID: "tstz1-1b", }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeSubnetsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeSubnetsReady)).To(BeTrue()) }) It("Should resolve a valid selectors for Subnet by ids", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test1", }, @@ -139,20 +139,20 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", ZoneID: "tstz1-1a", }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeSubnetsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeSubnetsReady)).To(BeTrue()) }) It("Should update Subnet status when the Subnet selector gets updated by tags", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -175,7 +175,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, })) - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{ "Name": "test-subnet-1", @@ -190,7 +190,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -202,13 +202,13 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ZoneID: "tstz1-1b", }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeSubnetsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeSubnetsReady)).To(BeTrue()) }) It("Should update Subnet status when the Subnet selector gets updated by ids", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -231,7 +231,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, })) - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test1", }, @@ -239,17 +239,17 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", ZoneID: "tstz1-1a", }, })) - Expect(nodeClass.StatusConditions().IsTrue(v1beta1.ConditionTypeSubnetsReady)).To(BeTrue()) + Expect(nodeClass.StatusConditions().IsTrue(v1.ConditionTypeSubnetsReady)).To(BeTrue()) }) It("Should not resolve a invalid selectors for Subnet", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{`foo`: `invalid`}, }, @@ -258,13 +258,13 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(BeNil()) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSubnetsReady).IsFalse()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSubnetsReady).IsFalse()).To(BeTrue()) }) It("Should not resolve a invalid selectors for an updated subnet selector", func() { ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) - Expect(nodeClass.Status.Subnets).To(Equal([]v1beta1.Subnet{ + Expect(nodeClass.Status.Subnets).To(Equal([]v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -287,7 +287,7 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { }, })) - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{`foo`: `invalid`}, }, @@ -296,6 +296,6 @@ var _ = Describe("NodeClass Subnet Status Controller", func() { ExpectObjectReconciled(ctx, env.Client, statusController, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) Expect(nodeClass.Status.Subnets).To(BeNil()) - Expect(nodeClass.StatusConditions().Get(v1beta1.ConditionTypeSubnetsReady).IsFalse()).To(BeTrue()) + Expect(nodeClass.StatusConditions().Get(v1.ConditionTypeSubnetsReady).IsFalse()).To(BeTrue()) }) }) diff --git a/pkg/controllers/nodeclass/status/suite_test.go b/pkg/controllers/nodeclass/status/suite_test.go index 3d780e426e04..3eb2b1152ec5 100644 --- a/pkg/controllers/nodeclass/status/suite_test.go +++ b/pkg/controllers/nodeclass/status/suite_test.go @@ -24,7 +24,7 @@ import ( coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -38,7 +38,7 @@ import ( var ctx context.Context var env *coretest.Environment var awsEnv *test.Environment -var nodeClass *v1beta1.EC2NodeClass +var nodeClass *v1.EC2NodeClass var statusController *status.Controller func TestAPIs(t *testing.T) { diff --git a/pkg/controllers/nodeclass/termination/controller.go b/pkg/controllers/nodeclass/termination/controller.go index 884c3a7c8405..5785da72a43c 100644 --- a/pkg/controllers/nodeclass/termination/controller.go +++ b/pkg/controllers/nodeclass/termination/controller.go @@ -39,10 +39,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/awslabs/operatorpkg/reasonable" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/events" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/instanceprofile" ) @@ -64,7 +64,7 @@ func NewController(kubeClient client.Client, recorder events.Recorder, } } -func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { ctx = injection.WithControllerName(ctx, "nodeclass.termination") if !nodeClass.GetDeletionTimestamp().IsZero() { @@ -73,17 +73,17 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClass *v1beta1.EC2NodeCl return reconcile.Result{}, nil } -func (c *Controller) finalize(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (reconcile.Result, error) { +func (c *Controller) finalize(ctx context.Context, nodeClass *v1.EC2NodeClass) (reconcile.Result, error) { stored := nodeClass.DeepCopy() - if !controllerutil.ContainsFinalizer(nodeClass, v1beta1.TerminationFinalizer) { + if !controllerutil.ContainsFinalizer(nodeClass, v1.TerminationFinalizer) { return reconcile.Result{}, nil } - nodeClaimList := &corev1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} if err := c.kubeClient.List(ctx, nodeClaimList, client.MatchingFields{"spec.nodeClassRef.name": nodeClass.Name}); err != nil { return reconcile.Result{}, fmt.Errorf("listing nodeclaims that are using nodeclass, %w", err) } if len(nodeClaimList.Items) > 0 { - c.recorder.Publish(WaitingOnNodeClaimTerminationEvent(nodeClass, lo.Map(nodeClaimList.Items, func(nc corev1beta1.NodeClaim, _ int) string { return nc.Name }))) + c.recorder.Publish(WaitingOnNodeClaimTerminationEvent(nodeClass, lo.Map(nodeClaimList.Items, func(nc karpv1.NodeClaim, _ int) string { return nc.Name }))) return reconcile.Result{RequeueAfter: time.Minute * 10}, nil // periodically fire the event } if nodeClass.Spec.Role != "" { @@ -94,7 +94,7 @@ func (c *Controller) finalize(ctx context.Context, nodeClass *v1beta1.EC2NodeCla if err := c.launchTemplateProvider.DeleteAll(ctx, nodeClass); err != nil { return reconcile.Result{}, fmt.Errorf("deleting launch templates, %w", err) } - controllerutil.RemoveFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.RemoveFinalizer(nodeClass, v1.TerminationFinalizer) if !equality.Semantic.DeepEqual(stored, nodeClass) { // We call Update() here rather than Patch() because patching a list with a JSON merge patch // can cause races due to the fact that it fully replaces the list on a change @@ -112,11 +112,11 @@ func (c *Controller) finalize(ctx context.Context, nodeClass *v1beta1.EC2NodeCla func (c *Controller) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("nodeclass.termination"). - For(&v1beta1.EC2NodeClass{}). + For(&v1.EC2NodeClass{}). Watches( - &corev1beta1.NodeClaim{}, + &karpv1.NodeClaim{}, handler.EnqueueRequestsFromMapFunc(func(_ context.Context, o client.Object) []reconcile.Request { - nc := o.(*corev1beta1.NodeClaim) + nc := o.(*karpv1.NodeClaim) if nc.Spec.NodeClassRef == nil { return nil } diff --git a/pkg/controllers/nodeclass/termination/events.go b/pkg/controllers/nodeclass/termination/events.go index 8da543ed2bd8..3823e871c806 100644 --- a/pkg/controllers/nodeclass/termination/events.go +++ b/pkg/controllers/nodeclass/termination/events.go @@ -17,18 +17,18 @@ package termination import ( "fmt" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/karpenter/pkg/events" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/utils" ) -func WaitingOnNodeClaimTerminationEvent(nodeClass *v1beta1.EC2NodeClass, names []string) events.Event { +func WaitingOnNodeClaimTerminationEvent(nodeClass *v1.EC2NodeClass, names []string) events.Event { return events.Event{ InvolvedObject: nodeClass, - Type: v1.EventTypeNormal, + Type: corev1.EventTypeNormal, Reason: "WaitingOnNodeClaimTermination", Message: fmt.Sprintf("Waiting on NodeClaim termination for %s", utils.PrettySlice(names, 5)), DedupeValues: []string{string(nodeClass.UID)}, diff --git a/pkg/controllers/nodeclass/termination/suite_test.go b/pkg/controllers/nodeclass/termination/suite_test.go index 4fdb80b28c07..4e49222999f7 100644 --- a/pkg/controllers/nodeclass/termination/suite_test.go +++ b/pkg/controllers/nodeclass/termination/suite_test.go @@ -29,13 +29,13 @@ import ( "github.com/samber/lo" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/events" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/termination" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -81,22 +81,22 @@ var _ = AfterEach(func() { }) var _ = Describe("NodeClass Termination", func() { - var nodeClass *v1beta1.EC2NodeClass + var nodeClass *v1.EC2NodeClass var profileName string BeforeEach(func() { - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"*": "*"}, }, }, - AMISelectorTerms: []v1beta1.AMISelectorTerm{ + AMISelectorTerms: []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -111,7 +111,7 @@ var _ = Describe("NodeClass Termination", func() { awsEnv.EC2API.LaunchTemplates.Store(launchTemplateName, &ec2.LaunchTemplate{LaunchTemplateName: launchTemplateName, LaunchTemplateId: aws.String(fake.LaunchTemplateID()), Tags: []*ec2.Tag{&ec2.Tag{Key: aws.String("karpenter.k8s.aws/cluster"), Value: aws.String("test-cluster")}}}) _, ok := awsEnv.EC2API.LaunchTemplates.Load(launchTemplateName) Expect(ok).To(BeTrue()) - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) @@ -125,7 +125,7 @@ var _ = Describe("NodeClass Termination", func() { awsEnv.EC2API.LaunchTemplates.Store(launchTemplateName, &ec2.LaunchTemplate{LaunchTemplateName: launchTemplateName, LaunchTemplateId: aws.String(fake.LaunchTemplateID()), Tags: []*ec2.Tag{&ec2.Tag{Key: aws.String("karpenter.k8s.aws/cluster"), Value: aws.String("test-cluster")}}}) _, ok := awsEnv.EC2API.LaunchTemplates.Load(launchTemplateName) Expect(ok).To(BeTrue()) - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) @@ -144,7 +144,7 @@ var _ = Describe("NodeClass Termination", func() { Expect(ok).To(BeTrue()) _, ok = awsEnv.EC2API.LaunchTemplates.Load(ltName2) Expect(ok).To(BeTrue()) - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) @@ -168,7 +168,7 @@ var _ = Describe("NodeClass Termination", func() { }, }, } - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) @@ -184,7 +184,7 @@ var _ = Describe("NodeClass Termination", func() { InstanceProfileName: aws.String(profileName), }, } - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) @@ -196,7 +196,7 @@ var _ = Describe("NodeClass Termination", func() { }) It("should succeed to delete the NodeClass when the instance profile doesn't exist", func() { Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(0)) - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) Expect(env.Client.Delete(ctx, nodeClass)).To(Succeed()) @@ -205,14 +205,14 @@ var _ = Describe("NodeClass Termination", func() { ExpectNotFound(ctx, env.Client, nodeClass) }) It("should not delete the EC2NodeClass until all associated NodeClaims are terminated", func() { - var nodeClaims []*corev1beta1.NodeClaim + var nodeClaims []*karpv1.NodeClaim for i := 0; i < 2; i++ { - nc := coretest.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nc := coretest.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -230,7 +230,7 @@ var _ = Describe("NodeClass Termination", func() { }, }, } - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) @@ -270,7 +270,7 @@ var _ = Describe("NodeClass Termination", func() { } nodeClass.Spec.Role = "" nodeClass.Spec.InstanceProfile = lo.ToPtr("test-instance-profile") - controllerutil.AddFinalizer(nodeClass, v1beta1.TerminationFinalizer) + controllerutil.AddFinalizer(nodeClass, v1.TerminationFinalizer) ExpectApplied(ctx, env.Client, nodeClass) ExpectObjectReconciled(ctx, env.Client, terminationController, nodeClass) Expect(awsEnv.IAMAPI.InstanceProfiles).To(HaveLen(1)) diff --git a/pkg/controllers/providers/instancetype/suite_test.go b/pkg/controllers/providers/instancetype/suite_test.go index b0036518272a..7f209fc15203 100644 --- a/pkg/controllers/providers/instancetype/suite_test.go +++ b/pkg/controllers/providers/instancetype/suite_test.go @@ -20,8 +20,7 @@ import ( "sigs.k8s.io/karpenter/pkg/test/v1alpha1" - v1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + corev1 "k8s.io/api/core/v1" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" coretest "sigs.k8s.io/karpenter/pkg/test" @@ -29,7 +28,7 @@ import ( "github.com/samber/lo" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" controllersinstancetype "github.com/aws/karpenter-provider-aws/pkg/controllers/providers/instancetype" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -90,9 +89,9 @@ var _ = Describe("InstanceType", func() { }) ExpectSingletonReconciled(ctx, controller) - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, &corev1beta1.KubeletConfiguration{}, &v1beta1.EC2NodeClass{ - Status: v1beta1.EC2NodeClassStatus{ - Subnets: []v1beta1.Subnet{ + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, &v1.KubeletConfiguration{}, &v1.EC2NodeClass{ + Status: v1.EC2NodeClassStatus{ + Subnets: []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -124,9 +123,9 @@ var _ = Describe("InstanceType", func() { }) ExpectSingletonReconciled(ctx, controller) - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, &corev1beta1.KubeletConfiguration{}, &v1beta1.EC2NodeClass{ - Status: v1beta1.EC2NodeClassStatus{ - Subnets: []v1beta1.Subnet{ + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, &v1.KubeletConfiguration{}, &v1.EC2NodeClass{ + Status: v1.EC2NodeClassStatus{ + Subnets: []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -151,7 +150,7 @@ var _ = Describe("InstanceType", func() { }) Expect(found).To(BeTrue()) for y := range instanceTypes[x].Offerings { - Expect(instanceTypes[x].Offerings[y].Requirements.Get(v1.LabelTopologyZone).Any()).To(Equal(lo.FromPtr(offering.Location))) + Expect(instanceTypes[x].Offerings[y].Requirements.Get(corev1.LabelTopologyZone).Any()).To(Equal(lo.FromPtr(offering.Location))) } } }) @@ -159,14 +158,14 @@ var _ = Describe("InstanceType", func() { awsEnv.EC2API.DescribeInstanceTypesOutput.Set(&ec2.DescribeInstanceTypesOutput{}) awsEnv.EC2API.DescribeInstanceTypeOfferingsOutput.Set(&ec2.DescribeInstanceTypeOfferingsOutput{}) ExpectSingletonReconciled(ctx, controller) - _, err := awsEnv.InstanceTypesProvider.List(ctx, &corev1beta1.KubeletConfiguration{}, &v1beta1.EC2NodeClass{}) + _, err := awsEnv.InstanceTypesProvider.List(ctx, &v1.KubeletConfiguration{}, &v1.EC2NodeClass{}) Expect(err).ToNot(BeNil()) }) It("should not update instance type offering date with response from the DescribeInstanceTypesOfferings API", func() { awsEnv.EC2API.DescribeInstanceTypesOutput.Set(&ec2.DescribeInstanceTypesOutput{}) awsEnv.EC2API.DescribeInstanceTypeOfferingsOutput.Set(&ec2.DescribeInstanceTypeOfferingsOutput{}) ExpectSingletonReconciled(ctx, controller) - _, err := awsEnv.InstanceTypesProvider.List(ctx, &corev1beta1.KubeletConfiguration{}, &v1beta1.EC2NodeClass{}) + _, err := awsEnv.InstanceTypesProvider.List(ctx, &v1.KubeletConfiguration{}, &v1.EC2NodeClass{}) Expect(err).ToNot(BeNil()) }) }) diff --git a/pkg/fake/cloudprovider.go b/pkg/fake/cloudprovider.go index 04e430a3a66c..98b05ad876c3 100644 --- a/pkg/fake/cloudprovider.go +++ b/pkg/fake/cloudprovider.go @@ -21,11 +21,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" corecloudprovider "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/test" - providerv1beta1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) const ( @@ -40,19 +40,19 @@ type CloudProvider struct { ValidAMIs []string } -func (c *CloudProvider) Create(_ context.Context, _ *v1beta1.NodeClaim) (*v1beta1.NodeClaim, error) { +func (c *CloudProvider) Create(_ context.Context, _ *karpv1.NodeClaim) (*karpv1.NodeClaim, error) { name := test.RandomName() - return &v1beta1.NodeClaim{ + return &karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Status: v1beta1.NodeClaimStatus{ + Status: karpv1.NodeClaimStatus{ ProviderID: RandomProviderID(), }, }, nil } -func (c *CloudProvider) GetInstanceTypes(_ context.Context, _ *v1beta1.NodePool) ([]*corecloudprovider.InstanceType, error) { +func (c *CloudProvider) GetInstanceTypes(_ context.Context, _ *karpv1.NodePool) ([]*corecloudprovider.InstanceType, error) { if c.InstanceTypes != nil { return c.InstanceTypes, nil } @@ -61,19 +61,19 @@ func (c *CloudProvider) GetInstanceTypes(_ context.Context, _ *v1beta1.NodePool) }, nil } -func (c *CloudProvider) IsDrifted(_ context.Context, nodeClaim *v1beta1.NodeClaim) (corecloudprovider.DriftReason, error) { +func (c *CloudProvider) IsDrifted(_ context.Context, nodeClaim *karpv1.NodeClaim) (corecloudprovider.DriftReason, error) { return "drifted", nil } -func (c *CloudProvider) Get(context.Context, string) (*v1beta1.NodeClaim, error) { +func (c *CloudProvider) Get(context.Context, string) (*karpv1.NodeClaim, error) { return nil, nil } -func (c *CloudProvider) List(context.Context) ([]*v1beta1.NodeClaim, error) { +func (c *CloudProvider) List(context.Context) ([]*karpv1.NodeClaim, error) { return nil, nil } -func (c *CloudProvider) Delete(context.Context, *v1beta1.NodeClaim) error { +func (c *CloudProvider) Delete(context.Context, *karpv1.NodeClaim) error { return nil } @@ -83,5 +83,5 @@ func (c *CloudProvider) Name() string { } func (c *CloudProvider) GetSupportedNodeClasses() []status.Object { - return []status.Object{&providerv1beta1.EC2NodeClass{}} + return []status.Object{&v1.EC2NodeClass{}} } diff --git a/pkg/fake/ec2api.go b/pkg/fake/ec2api.go index 7ba906379501..060e0fb67134 100644 --- a/pkg/fake/ec2api.go +++ b/pkg/fake/ec2api.go @@ -31,7 +31,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/samber/lo" "k8s.io/apimachinery/pkg/util/sets" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" "sigs.k8s.io/karpenter/pkg/utils/atomic" @@ -118,7 +118,7 @@ func (e *EC2API) CreateFleetWithContext(_ context.Context, input *ec2.CreateFlee var skippedPools []CapacityPool var spotInstanceRequestID *string - if aws.StringValue(input.TargetCapacitySpecification.DefaultTargetCapacityType) == corev1beta1.CapacityTypeSpot { + if aws.StringValue(input.TargetCapacitySpecification.DefaultTargetCapacityType) == karpv1.CapacityTypeSpot { spotInstanceRequestID = aws.String(test.RandomName()) } diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index b6a4ddd45508..1158569fe44c 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -48,7 +48,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/operator" awscache "github.com/aws/karpenter-provider-aws/pkg/cache" @@ -65,7 +66,8 @@ import ( ) func init() { - corev1beta1.NormalizedLabels = lo.Assign(corev1beta1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) + karpv1beta1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) + karpv1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) } // Operator is injected into the AWS CloudProvider's factories diff --git a/pkg/providers/amifamily/al2.go b/pkg/providers/amifamily/al2.go index ad71153d0a3c..078d8e0ddae9 100644 --- a/pkg/providers/amifamily/al2.go +++ b/pkg/providers/amifamily/al2.go @@ -18,12 +18,12 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/scheduling" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -41,31 +41,31 @@ func (a AL2) DefaultAMIs(version string) []DefaultAMIOutput { { Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpDoesNotExist), ), }, { Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-gpu/recommended/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpExists), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpExists), ), }, { Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-gpu/recommended/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpExists), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpExists), ), }, { - Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-%s/recommended/image_id", version, corev1beta1.ArchitectureArm64), + Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-%s/recommended/image_id", version, karpv1.ArchitectureArm64), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureArm64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureArm64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpDoesNotExist), ), }, } @@ -75,7 +75,7 @@ func (a AL2) DefaultAMIs(version string) []DefaultAMIOutput { // even if elements of those inputs are in differing orders, // guaranteeing it won't cause spurious hash differences. // AL2 userdata also works on Ubuntu -func (a AL2) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints []v1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, instanceStorePolicy *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper { +func (a AL2) UserData(kubeletConfig *v1.KubeletConfiguration, taints []corev1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, instanceStorePolicy *v1.InstanceStorePolicy) bootstrap.Bootstrapper { return bootstrap.EKS{ Options: bootstrap.Options{ ClusterName: a.Options.ClusterName, @@ -91,8 +91,8 @@ func (a AL2) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints [] } // DefaultBlockDeviceMappings returns the default block device mappings for the AMI Family -func (a AL2) DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping { - return []*v1beta1.BlockDeviceMapping{{ +func (a AL2) DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping { + return []*v1.BlockDeviceMapping{{ DeviceName: a.EphemeralBlockDevice(), EBS: &DefaultEBS, }} diff --git a/pkg/providers/amifamily/al2023.go b/pkg/providers/amifamily/al2023.go index d586d66e35fd..94f95321e23b 100644 --- a/pkg/providers/amifamily/al2023.go +++ b/pkg/providers/amifamily/al2023.go @@ -18,12 +18,12 @@ import ( "fmt" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + corev1 "k8s.io/api/core/v1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/scheduling" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap" ) @@ -37,19 +37,19 @@ func (a AL2023) DefaultAMIs(version string) []DefaultAMIOutput { { Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/x86_64/standard/recommended/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), ), }, { Query: fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/arm64/standard/recommended/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureArm64), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureArm64), ), }, } } -func (a AL2023) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints []v1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, instanceStorePolicy *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper { +func (a AL2023) UserData(kubeletConfig *v1.KubeletConfiguration, taints []corev1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, instanceStorePolicy *v1.InstanceStorePolicy) bootstrap.Bootstrapper { return bootstrap.Nodeadm{ Options: bootstrap.Options{ ClusterName: a.Options.ClusterName, @@ -67,8 +67,8 @@ func (a AL2023) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints } // DefaultBlockDeviceMappings returns the default block device mappings for the AMI Family -func (a AL2023) DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping { - return []*v1beta1.BlockDeviceMapping{{ +func (a AL2023) DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping { + return []*v1.BlockDeviceMapping{{ DeviceName: a.EphemeralBlockDevice(), EBS: &DefaultEBS, }} diff --git a/pkg/providers/amifamily/ami.go b/pkg/providers/amifamily/ami.go index 5ed0f57d78d4..90ba3a30de8b 100644 --- a/pkg/providers/amifamily/ami.go +++ b/pkg/providers/amifamily/ami.go @@ -29,10 +29,10 @@ import ( "github.com/mitchellh/hashstructure/v2" "github.com/patrickmn/go-cache" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/version" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -41,7 +41,7 @@ import ( ) type Provider interface { - List(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (AMIs, error) + List(ctx context.Context, nodeClass *v1.EC2NodeClass) (AMIs, error) } type DefaultProvider struct { @@ -76,7 +76,7 @@ func (a AMIs) Sort() { } // MapToInstanceTypes returns a map of AMIIDs that are the most recent on creationDate to compatible instancetypes -func MapToInstanceTypes(instanceTypes []*cloudprovider.InstanceType, amis []v1beta1.AMI) map[string][]*cloudprovider.InstanceType { +func MapToInstanceTypes(instanceTypes []*cloudprovider.InstanceType, amis []v1.AMI) map[string][]*cloudprovider.InstanceType { amiIDs := map[string][]*cloudprovider.InstanceType{} for _, instanceType := range instanceTypes { for _, ami := range amis { @@ -100,7 +100,7 @@ func NewDefaultProvider(versionProvider version.Provider, ssm ssmiface.SSMAPI, e } // Get Returning a list of AMIs with its associated requirements -func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (AMIs, error) { +func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1.EC2NodeClass) (AMIs, error) { p.Lock() defer p.Unlock() @@ -126,7 +126,7 @@ func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1beta1.EC2NodeCl return amis, nil } -func (p *DefaultProvider) getDefaultAMIs(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) (res AMIs, err error) { +func (p *DefaultProvider) getDefaultAMIs(ctx context.Context, nodeClass *v1.EC2NodeClass) (res AMIs, err error) { if images, ok := p.cache.Get(lo.FromPtr(nodeClass.Spec.AMIFamily)); ok { // Ensure what's returned from this function is a deep-copy of AMIs so alterations // to the data don't affect the original @@ -175,7 +175,7 @@ func (p *DefaultProvider) resolveSSMParameter(ctx context.Context, ssmQuery stri return ami, nil } -func (p *DefaultProvider) getAMIs(ctx context.Context, terms []v1beta1.AMISelectorTerm) (AMIs, error) { +func (p *DefaultProvider) getAMIs(ctx context.Context, terms []v1.AMISelectorTerm) (AMIs, error) { filterAndOwnerSets := GetFilterAndOwnerSets(terms) hash, err := hashstructure.Hash(filterAndOwnerSets, hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true}) if err != nil { @@ -196,7 +196,7 @@ func (p *DefaultProvider) getAMIs(ctx context.Context, terms []v1beta1.AMISelect }, func(page *ec2.DescribeImagesOutput, _ bool) bool { for i := range page.Images { reqs := p.getRequirementsFromImage(page.Images[i]) - if !v1beta1.WellKnownArchitectures.Has(reqs.Get(v1.LabelArchStable).Any()) { + if !v1.WellKnownArchitectures.Has(reqs.Get(corev1.LabelArchStable).Any()) { continue } reqsHash := lo.Must(hashstructure.Hash(reqs.NodeSelectorRequirements(), hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true})) @@ -232,7 +232,7 @@ type FiltersAndOwners struct { Owners []string } -func GetFilterAndOwnerSets(terms []v1beta1.AMISelectorTerm) (res []FiltersAndOwners) { +func GetFilterAndOwnerSets(terms []v1.AMISelectorTerm) (res []FiltersAndOwners) { idFilter := &ec2.Filter{Name: aws.String("image-id")} for _, term := range terms { switch { @@ -280,9 +280,9 @@ func (p *DefaultProvider) getRequirementsFromImage(ec2Image *ec2.Image) scheduli requirements := scheduling.NewRequirements() // Always add the architecture of an image as a requirement, irrespective of what's specified in EC2 tags. architecture := *ec2Image.Architecture - if value, ok := v1beta1.AWSToKubeArchitectures[architecture]; ok { + if value, ok := v1.AWSToKubeArchitectures[architecture]; ok { architecture = value } - requirements.Add(scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, architecture)) + requirements.Add(scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, architecture)) return requirements } diff --git a/pkg/providers/amifamily/bootstrap/bootstrap.go b/pkg/providers/amifamily/bootstrap/bootstrap.go index 5387c4fa665f..052ac10a2510 100644 --- a/pkg/providers/amifamily/bootstrap/bootstrap.go +++ b/pkg/providers/amifamily/bootstrap/bootstrap.go @@ -20,12 +20,10 @@ import ( "strings" "github.com/samber/lo" - core "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" - - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) // Options is the node bootstrapping parameters passed from Karpenter to the provisioning node @@ -33,14 +31,14 @@ type Options struct { ClusterName string ClusterEndpoint string ClusterCIDR *string - KubeletConfig *corev1beta1.KubeletConfiguration - Taints []core.Taint `hash:"set"` + KubeletConfig *v1.KubeletConfiguration + Taints []corev1.Taint `hash:"set"` Labels map[string]string `hash:"set"` CABundle *string AWSENILimitedPodDensity bool ContainerRuntime *string CustomUserData *string - InstanceStorePolicy *v1beta1.InstanceStorePolicy + InstanceStorePolicy *v1.InstanceStorePolicy } func (o Options) kubeletExtraArgs() (args []string) { diff --git a/pkg/providers/amifamily/bootstrap/eksbootstrap.go b/pkg/providers/amifamily/bootstrap/eksbootstrap.go index baf874a1133d..9c0a53ccf2fe 100644 --- a/pkg/providers/amifamily/bootstrap/eksbootstrap.go +++ b/pkg/providers/amifamily/bootstrap/eksbootstrap.go @@ -29,7 +29,7 @@ import ( "github.com/samber/lo" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) type EKS struct { @@ -77,7 +77,7 @@ func (e EKS) eksBootstrapScript() string { if args := e.kubeletExtraArgs(); len(args) > 0 { userData.WriteString(fmt.Sprintf(" \\\n--kubelet-extra-args '%s'", strings.Join(args, " "))) } - if lo.FromPtr(e.InstanceStorePolicy) == v1beta1.InstanceStorePolicyRAID0 { + if lo.FromPtr(e.InstanceStorePolicy) == v1.InstanceStorePolicyRAID0 { userData.WriteString(" \\\n--local-disks raid0") } return userData.String() diff --git a/pkg/providers/amifamily/bootstrap/nodeadm.go b/pkg/providers/amifamily/bootstrap/nodeadm.go index dfd0b5bd4566..c5d0ba847a63 100644 --- a/pkg/providers/amifamily/bootstrap/nodeadm.go +++ b/pkg/providers/amifamily/bootstrap/nodeadm.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/yaml" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap/mime" ) @@ -83,7 +83,7 @@ func (n Nodeadm) getNodeConfigYAML() (string, error) { } else { return "", cloudprovider.NewNodeClassNotReadyError(fmt.Errorf("resolving cluster CIDR")) } - if lo.FromPtr(n.InstanceStorePolicy) == v1beta1.InstanceStorePolicyRAID0 { + if lo.FromPtr(n.InstanceStorePolicy) == v1.InstanceStorePolicyRAID0 { config.Spec.Instance.LocalStorage.Strategy = admv1alpha1.LocalStorageRAID0 } inlineConfig, err := n.generateInlineKubeletConfiguration() diff --git a/pkg/providers/amifamily/bottlerocket.go b/pkg/providers/amifamily/bottlerocket.go index 5daef92e6a10..084004988697 100644 --- a/pkg/providers/amifamily/bottlerocket.go +++ b/pkg/providers/amifamily/bottlerocket.go @@ -19,16 +19,16 @@ import ( "github.com/samber/lo" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/scheduling" "github.com/aws/aws-sdk-go/aws" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) @@ -43,52 +43,52 @@ func (b Bottlerocket) DefaultAMIs(version string) []DefaultAMIOutput { { Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/x86_64/latest/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpDoesNotExist), ), }, { Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/x86_64/latest/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpExists), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpExists), ), }, { Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/x86_64/latest/image_id", version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpExists), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpExists), ), }, { - Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/%s/latest/image_id", version, corev1beta1.ArchitectureArm64), + Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/%s/latest/image_id", version, karpv1.ArchitectureArm64), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureArm64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureArm64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpDoesNotExist), ), }, { - Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/%s/latest/image_id", version, corev1beta1.ArchitectureArm64), + Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/%s/latest/image_id", version, karpv1.ArchitectureArm64), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureArm64), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpExists), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureArm64), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpExists), ), }, { - Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/%s/latest/image_id", version, corev1beta1.ArchitectureArm64), + Query: fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/%s/latest/image_id", version, karpv1.ArchitectureArm64), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureArm64), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpExists), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureArm64), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpExists), ), }, } } // UserData returns the default userdata script for the AMI Family -func (b Bottlerocket) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints []v1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper { +func (b Bottlerocket) UserData(kubeletConfig *v1.KubeletConfiguration, taints []corev1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1.InstanceStorePolicy) bootstrap.Bootstrapper { return bootstrap.Bottlerocket{ Options: bootstrap.Options{ ClusterName: b.Options.ClusterName, @@ -103,10 +103,10 @@ func (b Bottlerocket) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, } // DefaultBlockDeviceMappings returns the default block device mappings for the AMI Family -func (b Bottlerocket) DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping { +func (b Bottlerocket) DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping { xvdaEBS := DefaultEBS xvdaEBS.VolumeSize = lo.ToPtr(resource.MustParse("4Gi")) - return []*v1beta1.BlockDeviceMapping{ + return []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), EBS: &xvdaEBS, diff --git a/pkg/providers/amifamily/custom.go b/pkg/providers/amifamily/custom.go index f76f71c82a32..3b15d4060bca 100644 --- a/pkg/providers/amifamily/custom.go +++ b/pkg/providers/amifamily/custom.go @@ -15,12 +15,11 @@ limitations under the License. package amifamily import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap" ) @@ -31,7 +30,7 @@ type Custom struct { } // UserData returns the default userdata script for the AMI Family -func (c Custom) UserData(_ *corev1beta1.KubeletConfiguration, _ []v1.Taint, _ map[string]string, _ *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper { +func (c Custom) UserData(_ *v1.KubeletConfiguration, _ []corev1.Taint, _ map[string]string, _ *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1.InstanceStorePolicy) bootstrap.Bootstrapper { return bootstrap.Custom{ Options: bootstrap.Options{ CustomUserData: customUserData, @@ -43,7 +42,7 @@ func (c Custom) DefaultAMIs(_ string) []DefaultAMIOutput { return nil } -func (c Custom) DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping { +func (c Custom) DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping { // By returning nil, we ensure that EC2 will automatically choose the volumes defined by the AMI // and we don't need to describe the AMI ourselves. return nil diff --git a/pkg/providers/amifamily/resolver.go b/pkg/providers/amifamily/resolver.go index 836f469b69ef..0c5838af5231 100644 --- a/pkg/providers/amifamily/resolver.go +++ b/pkg/providers/amifamily/resolver.go @@ -20,21 +20,21 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/imdario/mergo" "github.com/samber/lo" - core "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap" + "github.com/aws/karpenter-provider-aws/pkg/utils" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/scheduling" ) -var DefaultEBS = v1beta1.BlockDevice{ +var DefaultEBS = v1.BlockDevice{ Encrypted: aws.Bool(true), VolumeType: aws.String(ec2.VolumeTypeGp3), VolumeSize: lo.ToPtr(resource.MustParse("20Gi")), @@ -52,9 +52,9 @@ type Options struct { ClusterCIDR *string InstanceProfile string CABundle *string `hash:"ignore"` - InstanceStorePolicy *v1beta1.InstanceStorePolicy + InstanceStorePolicy *v1.InstanceStorePolicy // Level-triggered fields that may change out of sync. - SecurityGroups []v1beta1.SecurityGroup + SecurityGroups []v1.SecurityGroup Tags map[string]string Labels map[string]string `hash:"ignore"` KubeDNSIP net.IP @@ -66,8 +66,8 @@ type Options struct { type LaunchTemplate struct { *Options UserData bootstrap.Bootstrapper - BlockDeviceMappings []*v1beta1.BlockDeviceMapping - MetadataOptions *v1beta1.MetadataOptions + BlockDeviceMappings []*v1.BlockDeviceMapping + MetadataOptions *v1.MetadataOptions AMIID string InstanceTypes []*cloudprovider.InstanceType `hash:"ignore"` DetailedMonitoring bool @@ -78,9 +78,9 @@ type LaunchTemplate struct { // AMIFamily can be implemented to override the default logic for generating dynamic launch template parameters type AMIFamily interface { DefaultAMIs(version string) []DefaultAMIOutput - UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints []core.Taint, labels map[string]string, caBundle *string, instanceTypes []*cloudprovider.InstanceType, customUserData *string, instanceStorePolicy *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper - DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping - DefaultMetadataOptions() *v1beta1.MetadataOptions + UserData(kubeletConfig *v1.KubeletConfiguration, taints []corev1.Taint, labels map[string]string, caBundle *string, instanceTypes []*cloudprovider.InstanceType, customUserData *string, instanceStorePolicy *v1.InstanceStorePolicy) bootstrap.Bootstrapper + DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping + DefaultMetadataOptions() *v1.MetadataOptions EphemeralBlockDevice() *string FeatureFlags() FeatureFlags } @@ -119,14 +119,14 @@ func NewResolver(amiProvider Provider) *Resolver { // Resolve generates launch templates using the static options and dynamically generates launch template parameters. // Multiple ResolvedTemplates are returned based on the instanceTypes passed in to support special AMIs for certain instance types like GPUs. -func (r Resolver) Resolve(nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, capacityType string, options *Options) ([]*LaunchTemplate, error) { +func (r Resolver) Resolve(nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, capacityType string, options *Options) ([]*LaunchTemplate, error) { amiFamily := GetAMIFamily(nodeClass.Spec.AMIFamily, options) if len(nodeClass.Status.AMIs) == 0 { return nil, fmt.Errorf("no amis exist given constraints") } mappedAMIs := MapToInstanceTypes(instanceTypes, nodeClass.Status.AMIs) if len(mappedAMIs) == 0 { - return nil, fmt.Errorf("no instance types satisfy requirements of amis %v", lo.Uniq(lo.Map(nodeClass.Status.AMIs, func(a v1beta1.AMI, _ int) string { return a.ID }))) + return nil, fmt.Errorf("no instance types satisfy requirements of amis %v", lo.Uniq(lo.Map(nodeClass.Status.AMIs, func(a v1.AMI, _ int) string { return a.ID }))) } var resolvedTemplates []*LaunchTemplate for amiID, instanceTypes := range mappedAMIs { @@ -142,8 +142,8 @@ func (r Resolver) Resolve(nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta paramsToInstanceTypes := lo.GroupBy(instanceTypes, func(instanceType *cloudprovider.InstanceType) launchTemplateParams { return launchTemplateParams{ efaCount: lo.Ternary( - lo.Contains(lo.Keys(nodeClaim.Spec.Resources.Requests), v1beta1.ResourceEFA), - int(lo.ToPtr(instanceType.Capacity[v1beta1.ResourceEFA]).Value()), + lo.Contains(lo.Keys(nodeClaim.Spec.Resources.Requests), v1.ResourceEFA), + int(lo.ToPtr(instanceType.Capacity[v1.ResourceEFA]).Value()), 0, ), maxPods: int(instanceType.Capacity.Pods().Value()), @@ -162,25 +162,25 @@ func (r Resolver) Resolve(nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta func GetAMIFamily(amiFamily *string, options *Options) AMIFamily { switch aws.StringValue(amiFamily) { - case v1beta1.AMIFamilyBottlerocket: + case v1.AMIFamilyBottlerocket: return &Bottlerocket{Options: options} - case v1beta1.AMIFamilyUbuntu: + case v1.AMIFamilyUbuntu: return &Ubuntu{Options: options} - case v1beta1.AMIFamilyWindows2019: - return &Windows{Options: options, Version: v1beta1.Windows2019, Build: v1beta1.Windows2019Build} - case v1beta1.AMIFamilyWindows2022: - return &Windows{Options: options, Version: v1beta1.Windows2022, Build: v1beta1.Windows2022Build} - case v1beta1.AMIFamilyCustom: + case v1.AMIFamilyWindows2019: + return &Windows{Options: options, Version: v1.Windows2019, Build: v1.Windows2019Build} + case v1.AMIFamilyWindows2022: + return &Windows{Options: options, Version: v1.Windows2022, Build: v1.Windows2022Build} + case v1.AMIFamilyCustom: return &Custom{Options: options} - case v1beta1.AMIFamilyAL2023: + case v1.AMIFamilyAL2023: return &AL2023{Options: options} default: return &AL2{Options: options} } } -func (o Options) DefaultMetadataOptions() *v1beta1.MetadataOptions { - return &v1beta1.MetadataOptions{ +func (o Options) DefaultMetadataOptions() *v1.MetadataOptions { + return &v1.MetadataOptions{ HTTPEndpoint: aws.String(ec2.LaunchTemplateInstanceMetadataEndpointStateEnabled), HTTPProtocolIPv6: aws.String(lo.Ternary(o.KubeDNSIP == nil || o.KubeDNSIP.To4() != nil, ec2.LaunchTemplateInstanceMetadataProtocolIpv6Disabled, ec2.LaunchTemplateInstanceMetadataProtocolIpv6Enabled)), HTTPPutResponseHopLimit: aws.Int64(2), @@ -188,7 +188,7 @@ func (o Options) DefaultMetadataOptions() *v1beta1.MetadataOptions { } } -func (r Resolver) defaultClusterDNS(opts *Options, kubeletConfig *corev1beta1.KubeletConfiguration) *corev1beta1.KubeletConfiguration { +func (r Resolver) defaultClusterDNS(opts *Options, kubeletConfig *v1.KubeletConfiguration) *v1.KubeletConfiguration { if opts.KubeDNSIP == nil { return kubeletConfig } @@ -196,7 +196,7 @@ func (r Resolver) defaultClusterDNS(opts *Options, kubeletConfig *corev1beta1.Ku return kubeletConfig } if kubeletConfig == nil { - return &corev1beta1.KubeletConfiguration{ + return &v1.KubeletConfiguration{ ClusterDNS: []string{opts.KubeDNSIP.String()}, } } @@ -205,25 +205,26 @@ func (r Resolver) defaultClusterDNS(opts *Options, kubeletConfig *corev1beta1.Ku return newKubeletConfig } -func (r Resolver) resolveLaunchTemplate(nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, capacityType string, +func (r Resolver) resolveLaunchTemplate(nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, capacityType string, amiFamily AMIFamily, amiID string, maxPods int, efaCount int, options *Options) (*LaunchTemplate, error) { - kubeletConfig := &corev1beta1.KubeletConfiguration{} - if nodeClaim.Spec.Kubelet != nil { - if err := mergo.Merge(kubeletConfig, nodeClaim.Spec.Kubelet); err != nil { - return nil, err - } + kubeletConfig, err := utils.GetKubeletConfigurationWithNodeClaim(nodeClaim, nodeClass) + if err != nil { + return nil, fmt.Errorf("resolving kubelet configuration, %w", err) + } + if kubeletConfig == nil { + kubeletConfig = &v1.KubeletConfiguration{} } if kubeletConfig.MaxPods == nil { kubeletConfig.MaxPods = lo.ToPtr(int32(maxPods)) } - taints := lo.Flatten([][]core.Taint{ + taints := lo.Flatten([][]corev1.Taint{ nodeClaim.Spec.Taints, nodeClaim.Spec.StartupTaints, }) - if _, found := lo.Find(taints, func(t core.Taint) bool { - return t.MatchTaint(&core.Taint{Key: "karpenter.sh/unregistered", Effect: core.TaintEffectNoExecute}) + if _, found := lo.Find(taints, func(t corev1.Taint) bool { + return t.MatchTaint(&corev1.Taint{Key: "karpenter.sh/unregistered", Effect: corev1.TaintEffectNoExecute}) }); !found { - taints = append(taints, core.Taint{Key: "karpenter.sh/unregistered", Effect: core.TaintEffectNoExecute}) + taints = append(taints, corev1.Taint{Key: "karpenter.sh/unregistered", Effect: corev1.TaintEffectNoExecute}) } resolved := &LaunchTemplate{ diff --git a/pkg/providers/amifamily/suite_test.go b/pkg/providers/amifamily/suite_test.go index 13bb2506bfad..57e1f2f0c8dd 100644 --- a/pkg/providers/amifamily/suite_test.go +++ b/pkg/providers/amifamily/suite_test.go @@ -32,15 +32,15 @@ import ( . "sigs.k8s.io/karpenter/pkg/utils/testing" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" "sigs.k8s.io/karpenter/pkg/scheduling" coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -49,7 +49,7 @@ import ( var ctx context.Context var env *coretest.Environment var awsEnv *test.Environment -var nodeClass *v1beta1.EC2NodeClass +var nodeClass *v1.EC2NodeClass func TestAWS(t *testing.T) { ctx = TestContextWithLogger(t) @@ -134,7 +134,7 @@ var _ = Describe("AMIProvider", func() { nodeClass = test.EC2NodeClass() }) It("should succeed to resolve AMIs (AL2)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", version): amd64AMI, fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2-gpu/recommended/image_id", version): amd64NvidiaAMI, @@ -145,7 +145,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(4)) }) It("should succeed to resolve AMIs (AL2023)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/x86_64/standard/recommended/image_id", version): amd64AMI, fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/arm64/standard/recommended/image_id", version): arm64AMI, @@ -155,7 +155,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(2)) }) It("should succeed to resolve AMIs (Bottlerocket)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/x86_64/latest/image_id", version): amd64AMI, fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s-nvidia/x86_64/latest/image_id", version): amd64NvidiaAMI, @@ -167,7 +167,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(6)) }) It("should succeed to resolve AMIs (Ubuntu)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyUbuntu + nodeClass.Spec.AMIFamily = &v1.AMIFamilyUbuntu awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/amd64/hvm/ebs-gp2/ami-id", version): amd64AMI, fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/arm64/hvm/ebs-gp2/ami-id", version): arm64AMI, @@ -177,7 +177,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(2)) }) It("should succeed to resolve AMIs (Windows2019)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2019 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2019 awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/ami-windows-latest/Windows_Server-2019-English-Core-EKS_Optimized-%s/image_id", version): amd64AMI, } @@ -186,7 +186,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(1)) }) It("should succeed to resolve AMIs (Windows2022)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2022 awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/ami-windows-latest/Windows_Server-2022-English-Core-EKS_Optimized-%s/image_id", version): amd64AMI, } @@ -195,13 +195,13 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(1)) }) It("should succeed to resolve AMIs (Custom)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom amis, err := awsEnv.AMIProvider.List(ctx, nodeClass) Expect(err).ToNot(HaveOccurred()) Expect(amis).To(HaveLen(0)) }) It("should not cause data races when calling Get() simultaneously", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: "amd64-ami-id", }, @@ -227,7 +227,7 @@ var _ = Describe("AMIProvider", func() { AmiID: "arm64-ami-id", CreationDate: time.Time{}.Add(time.Minute).Format(time.RFC3339), Requirements: scheduling.NewLabelRequirements(map[string]string{ - v1.LabelArchStable: corev1beta1.ArchitectureArm64, + corev1.LabelArchStable: karpv1.ArchitectureArm64, }), }, { @@ -235,7 +235,7 @@ var _ = Describe("AMIProvider", func() { AmiID: "amd64-ami-id", CreationDate: time.Time{}.Format(time.RFC3339), Requirements: scheduling.NewLabelRequirements(map[string]string{ - v1.LabelArchStable: corev1beta1.ArchitectureAmd64, + corev1.LabelArchStable: karpv1.ArchitectureAmd64, }), }, })) @@ -245,7 +245,7 @@ var _ = Describe("AMIProvider", func() { }) Context("SSM Alias Missing", func() { It("should succeed to partially resolve AMIs if all SSM aliases don't exist (Al2)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 // No GPU AMI exists here awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", version): amd64AMI, @@ -257,7 +257,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(2)) }) It("should succeed to partially resolve AMIs if all SSM aliases don't exist (AL2023)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/x86_64/standard/recommended/image_id", version): amd64AMI, } @@ -266,7 +266,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(1)) }) It("should succeed to partially resolve AMIs if all SSM aliases don't exist (Bottlerocket)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket // No GPU AMI exists for AM64 here awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/bottlerocket/aws-k8s-%s/x86_64/latest/image_id", version): amd64AMI, @@ -279,7 +279,7 @@ var _ = Describe("AMIProvider", func() { Expect(amis).To(HaveLen(4)) }) It("should succeed to partially resolve AMIs if all SSM aliases don't exist (Ubuntu)", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyUbuntu + nodeClass.Spec.AMIFamily = &v1.AMIFamilyUbuntu // No AMD64 AMI exists here awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/arm64/hvm/ebs-gp2/ami-id", version): arm64AMI, @@ -301,8 +301,8 @@ var _ = Describe("AMIProvider", func() { Tags: []*ec2.Tag{ {Key: aws.String("Name"), Value: aws.String(amd64AMI)}, {Key: aws.String("foo"), Value: aws.String("bar")}, - {Key: aws.String(v1.LabelInstanceTypeStable), Value: aws.String("m5.large")}, - {Key: aws.String(v1.LabelTopologyZone), Value: aws.String("test-zone-1a")}, + {Key: aws.String(corev1.LabelInstanceTypeStable), Value: aws.String("m5.large")}, + {Key: aws.String(corev1.LabelTopologyZone), Value: aws.String("test-zone-1a")}, }, } awsEnv.EC2API.DescribeImagesOutput.Set(&ec2.DescribeImagesOutput{ @@ -312,7 +312,7 @@ var _ = Describe("AMIProvider", func() { }) }) It("should succeed to not resolve tags as requirements for NodeClasses", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Tags: map[string]string{"*": "*"}, }, @@ -325,7 +325,7 @@ var _ = Describe("AMIProvider", func() { AmiID: aws.StringValue(img.ImageId), CreationDate: aws.StringValue(img.CreationDate), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), ), })) }) @@ -334,7 +334,7 @@ var _ = Describe("AMIProvider", func() { // When you tag public or shared resources, the tags you assign are available only to your AWS account; no other AWS account will have access to those tags // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions It("should have empty owners and use tags when prefixes aren't set", func() { - amiSelectorTerms := []v1beta1.AMISelectorTerm{ + amiSelectorTerms := []v1.AMISelectorTerm{ { Tags: map[string]string{ "Name": "my-ami", @@ -355,7 +355,7 @@ var _ = Describe("AMIProvider", func() { }, filterAndOwnersSets) }) It("should have default owners and use name when prefixed", func() { - amiSelectorTerms := []v1beta1.AMISelectorTerm{ + amiSelectorTerms := []v1.AMISelectorTerm{ { Name: "my-ami", }, @@ -377,7 +377,7 @@ var _ = Describe("AMIProvider", func() { }, filterAndOwnersSets) }) It("should not set owners when legacy ids are passed", func() { - amiSelectorTerms := []v1beta1.AMISelectorTerm{ + amiSelectorTerms := []v1.AMISelectorTerm{ { ID: "ami-abcd1234", }, @@ -398,7 +398,7 @@ var _ = Describe("AMIProvider", func() { }, filterAndOwnersSets) }) It("should allow only specifying owners", func() { - amiSelectorTerms := []v1beta1.AMISelectorTerm{ + amiSelectorTerms := []v1.AMISelectorTerm{ { Owner: "abcdef", }, @@ -417,7 +417,7 @@ var _ = Describe("AMIProvider", func() { }, filterAndOwnersSets) }) It("should allow prefixed name and prefixed owners", func() { - amiSelectorTerms := []v1beta1.AMISelectorTerm{ + amiSelectorTerms := []v1.AMISelectorTerm{ { Name: "my-name", Owner: "0123456789", diff --git a/pkg/providers/amifamily/ubuntu.go b/pkg/providers/amifamily/ubuntu.go index fc90203b3ff7..5731bf889f54 100644 --- a/pkg/providers/amifamily/ubuntu.go +++ b/pkg/providers/amifamily/ubuntu.go @@ -18,11 +18,11 @@ import ( "fmt" "github.com/aws/aws-sdk-go/aws" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -38,22 +38,22 @@ type Ubuntu struct { func (u Ubuntu) DefaultAMIs(version string) []DefaultAMIOutput { return []DefaultAMIOutput{ { - Query: fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/%s/hvm/ebs-gp2/ami-id", version, corev1beta1.ArchitectureAmd64), + Query: fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/%s/hvm/ebs-gp2/ami-id", version, karpv1.ArchitectureAmd64), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), ), }, { - Query: fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/%s/hvm/ebs-gp2/ami-id", version, corev1beta1.ArchitectureArm64), + Query: fmt.Sprintf("/aws/service/canonical/ubuntu/eks/20.04/%s/stable/current/%s/hvm/ebs-gp2/ami-id", version, karpv1.ArchitectureArm64), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureArm64), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureArm64), ), }, } } // UserData returns the default userdata script for the AMI Family -func (u Ubuntu) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints []v1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper { +func (u Ubuntu) UserData(kubeletConfig *v1.KubeletConfiguration, taints []corev1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1.InstanceStorePolicy) bootstrap.Bootstrapper { return bootstrap.EKS{ Options: bootstrap.Options{ ClusterName: u.Options.ClusterName, @@ -68,8 +68,8 @@ func (u Ubuntu) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints } // DefaultBlockDeviceMappings returns the default block device mappings for the AMI Family -func (u Ubuntu) DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping { - return []*v1beta1.BlockDeviceMapping{{ +func (u Ubuntu) DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping { + return []*v1.BlockDeviceMapping{{ DeviceName: u.EphemeralBlockDevice(), EBS: &DefaultEBS, }} diff --git a/pkg/providers/amifamily/windows.go b/pkg/providers/amifamily/windows.go index 33cf9266b844..e92c801bcf0b 100644 --- a/pkg/providers/amifamily/windows.go +++ b/pkg/providers/amifamily/windows.go @@ -17,10 +17,10 @@ package amifamily import ( "fmt" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/scheduling" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" @@ -29,7 +29,7 @@ import ( "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily/bootstrap" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" ) @@ -44,18 +44,18 @@ type Windows struct { func (w Windows) DefaultAMIs(version string) []DefaultAMIOutput { return []DefaultAMIOutput{ { - Query: fmt.Sprintf("/aws/service/ami-windows-latest/Windows_Server-%s-English-%s-EKS_Optimized-%s/image_id", w.Version, v1beta1.WindowsCore, version), + Query: fmt.Sprintf("/aws/service/ami-windows-latest/Windows_Server-%s-English-%s-EKS_Optimized-%s/image_id", w.Version, v1.WindowsCore, version), Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, corev1beta1.ArchitectureAmd64), - scheduling.NewRequirement(v1.LabelOSStable, v1.NodeSelectorOpIn, string(v1.Windows)), - scheduling.NewRequirement(v1.LabelWindowsBuild, v1.NodeSelectorOpIn, w.Build), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, karpv1.ArchitectureAmd64), + scheduling.NewRequirement(corev1.LabelOSStable, corev1.NodeSelectorOpIn, string(corev1.Windows)), + scheduling.NewRequirement(corev1.LabelWindowsBuild, corev1.NodeSelectorOpIn, w.Build), ), }, } } // UserData returns the default userdata script for the AMI Family -func (w Windows) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taints []v1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1beta1.InstanceStorePolicy) bootstrap.Bootstrapper { +func (w Windows) UserData(kubeletConfig *v1.KubeletConfiguration, taints []corev1.Taint, labels map[string]string, caBundle *string, _ []*cloudprovider.InstanceType, customUserData *string, _ *v1.InstanceStorePolicy) bootstrap.Bootstrapper { return bootstrap.Windows{ Options: bootstrap.Options{ ClusterName: w.Options.ClusterName, @@ -70,10 +70,10 @@ func (w Windows) UserData(kubeletConfig *corev1beta1.KubeletConfiguration, taint } // DefaultBlockDeviceMappings returns the default block device mappings for the AMI Family -func (w Windows) DefaultBlockDeviceMappings() []*v1beta1.BlockDeviceMapping { +func (w Windows) DefaultBlockDeviceMappings() []*v1.BlockDeviceMapping { sda1EBS := DefaultEBS sda1EBS.VolumeSize = lo.ToPtr(resource.MustParse("50Gi")) - return []*v1beta1.BlockDeviceMapping{{ + return []*v1.BlockDeviceMapping{{ DeviceName: w.EphemeralBlockDevice(), EBS: &sda1EBS, }} diff --git a/pkg/providers/instance/instance.go b/pkg/providers/instance/instance.go index 95c17cefb206..f0152e5bb32e 100644 --- a/pkg/providers/instance/instance.go +++ b/pkg/providers/instance/instance.go @@ -28,14 +28,14 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/samber/lo" "go.uber.org/multierr" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/log" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/utils/resources" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/batcher" "github.com/aws/karpenter-provider-aws/pkg/cache" awserrors "github.com/aws/karpenter-provider-aws/pkg/errors" @@ -62,7 +62,7 @@ var ( ) type Provider interface { - Create(context.Context, *v1beta1.EC2NodeClass, *corev1beta1.NodeClaim, []*cloudprovider.InstanceType) (*Instance, error) + Create(context.Context, *v1.EC2NodeClass, *karpv1.NodeClaim, []*cloudprovider.InstanceType) (*Instance, error) Get(context.Context, string) (*Instance, error) List(context.Context) ([]*Instance, error) Delete(context.Context, string) error @@ -92,7 +92,7 @@ func NewDefaultProvider(ctx context.Context, region string, ec2api ec2iface.EC2A } } -func (p *DefaultProvider) Create(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) (*Instance, error) { +func (p *DefaultProvider) Create(ctx context.Context, nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) (*Instance, error) { schedulingRequirements := scheduling.NewNodeSelectorRequirementsWithMinValues(nodeClaim.Spec.Requirements...) // Only filter the instances if there are no minValues in the requirement. if !schedulingRequirements.HasMinValues() { @@ -112,7 +112,7 @@ func (p *DefaultProvider) Create(ctx context.Context, nodeClass *v1beta1.EC2Node if err != nil { return nil, err } - efaEnabled := lo.Contains(lo.Keys(nodeClaim.Spec.Resources.Requests), v1beta1.ResourceEFA) + efaEnabled := lo.Contains(lo.Keys(nodeClaim.Spec.Resources.Requests), v1.ResourceEFA) return NewInstanceFromFleet(fleetInstance, tags, efaEnabled), nil } @@ -143,11 +143,11 @@ func (p *DefaultProvider) List(ctx context.Context) ([]*Instance, error) { Filters: []*ec2.Filter{ { Name: aws.String("tag-key"), - Values: aws.StringSlice([]string{corev1beta1.NodePoolLabelKey}), + Values: aws.StringSlice([]string{karpv1.NodePoolLabelKey}), }, { Name: aws.String("tag-key"), - Values: aws.StringSlice([]string{v1beta1.LabelNodeClass}), + Values: aws.StringSlice([]string{v1.LabelNodeClass}), }, { Name: aws.String("tag-key"), @@ -200,7 +200,7 @@ func (p *DefaultProvider) CreateTags(ctx context.Context, id string, tags map[st return nil } -func (p *DefaultProvider) launchInstance(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, tags map[string]string) (*ec2.CreateFleetInstance, error) { +func (p *DefaultProvider) launchInstance(ctx context.Context, nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, tags map[string]string) (*ec2.CreateFleetInstance, error) { capacityType := p.getCapacityType(nodeClaim, instanceTypes) zonalSubnets, err := p.subnetProvider.ZonalSubnetsForLaunch(ctx, nodeClass, instanceTypes, capacityType) if err != nil { @@ -230,7 +230,7 @@ func (p *DefaultProvider) launchInstance(ctx context.Context, nodeClass *v1beta1 {ResourceType: aws.String(ec2.ResourceTypeFleet), Tags: utils.MergeTags(tags)}, }, } - if capacityType == corev1beta1.CapacityTypeSpot { + if capacityType == karpv1.CapacityTypeSpot { createFleetInput.SpotOptions = &ec2.SpotOptionsRequest{AllocationStrategy: aws.String(ec2.SpotAllocationStrategyPriceCapacityOptimized)} } else { createFleetInput.OnDemandOptions = &ec2.OnDemandOptionsRequest{AllocationStrategy: aws.String(ec2.FleetOnDemandAllocationStrategyLowestPrice)} @@ -258,19 +258,19 @@ func (p *DefaultProvider) launchInstance(ctx context.Context, nodeClass *v1beta1 return createFleetOutput.Instances[0], nil } -func getTags(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim) map[string]string { +func getTags(ctx context.Context, nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim) map[string]string { staticTags := map[string]string{ fmt.Sprintf("kubernetes.io/cluster/%s", options.FromContext(ctx).ClusterName): "owned", - corev1beta1.NodePoolLabelKey: nodeClaim.Labels[corev1beta1.NodePoolLabelKey], - corev1beta1.ManagedByAnnotationKey: options.FromContext(ctx).ClusterName, - v1beta1.LabelNodeClass: nodeClass.Name, + karpv1.NodePoolLabelKey: nodeClaim.Labels[karpv1.NodePoolLabelKey], + karpv1.ManagedByAnnotationKey: options.FromContext(ctx).ClusterName, + v1.LabelNodeClass: nodeClass.Name, } return lo.Assign(nodeClass.Spec.Tags, staticTags) } -func (p *DefaultProvider) checkODFallback(nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, launchTemplateConfigs []*ec2.FleetLaunchTemplateConfigRequest) error { +func (p *DefaultProvider) checkODFallback(nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, launchTemplateConfigs []*ec2.FleetLaunchTemplateConfigRequest) error { // only evaluate for on-demand fallback if the capacity type for the request is OD and both OD and spot are allowed in requirements - if p.getCapacityType(nodeClaim, instanceTypes) != corev1beta1.CapacityTypeOnDemand || !scheduling.NewNodeSelectorRequirementsWithMinValues(nodeClaim.Spec.Requirements...).Get(corev1beta1.CapacityTypeLabelKey).Has(corev1beta1.CapacityTypeSpot) { + if p.getCapacityType(nodeClaim, instanceTypes) != karpv1.CapacityTypeOnDemand || !scheduling.NewNodeSelectorRequirementsWithMinValues(nodeClaim.Spec.Requirements...).Get(karpv1.CapacityTypeLabelKey).Has(karpv1.CapacityTypeSpot) { return nil } @@ -291,7 +291,7 @@ func (p *DefaultProvider) checkODFallback(nodeClaim *corev1beta1.NodeClaim, inst return nil } -func (p *DefaultProvider) getLaunchTemplateConfigs(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim, +func (p *DefaultProvider) getLaunchTemplateConfigs(ctx context.Context, nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, zonalSubnets map[string]*subnet.Subnet, capacityType string, tags map[string]string) ([]*ec2.FleetLaunchTemplateConfigRequest, error) { var launchTemplateConfigs []*ec2.FleetLaunchTemplateConfigRequest launchTemplates, err := p.launchTemplateProvider.EnsureAll(ctx, nodeClass, nodeClaim, instanceTypes, capacityType, tags) @@ -299,7 +299,7 @@ func (p *DefaultProvider) getLaunchTemplateConfigs(ctx context.Context, nodeClas return nil, fmt.Errorf("getting launch templates, %w", err) } requirements := scheduling.NewNodeSelectorRequirementsWithMinValues(nodeClaim.Spec.Requirements...) - requirements[corev1beta1.CapacityTypeLabelKey] = scheduling.NewRequirement(corev1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, capacityType) + requirements[karpv1.CapacityTypeLabelKey] = scheduling.NewRequirement(karpv1.CapacityTypeLabelKey, corev1.NodeSelectorOpIn, capacityType) for _, launchTemplate := range launchTemplates { launchTemplateConfig := &ec2.FleetLaunchTemplateConfigRequest{ Overrides: p.getOverrides(launchTemplate.InstanceTypes, zonalSubnets, requirements, launchTemplate.ImageID), @@ -342,7 +342,7 @@ func (p *DefaultProvider) getOverrides(instanceTypes []*cloudprovider.InstanceTy if reqs.Compatible(offering.Requirements, scheduling.AllowUndefinedWellKnownLabels) != nil { continue } - subnet, ok := zonalSubnets[offering.Requirements.Get(v1.LabelTopologyZone).Any()] + subnet, ok := zonalSubnets[offering.Requirements.Get(corev1.LabelTopologyZone).Any()] if !ok { continue } @@ -369,24 +369,24 @@ func (p *DefaultProvider) updateUnavailableOfferingsCache(ctx context.Context, e // getCapacityType selects spot if both constraints are flexible and there is an // available offering. The AWS Cloud Provider defaults to [ on-demand ], so spot // must be explicitly included in capacity type requirements. -func (p *DefaultProvider) getCapacityType(nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) string { +func (p *DefaultProvider) getCapacityType(nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) string { requirements := scheduling.NewNodeSelectorRequirementsWithMinValues(nodeClaim.Spec.Requirements...) - if requirements.Get(corev1beta1.CapacityTypeLabelKey).Has(corev1beta1.CapacityTypeSpot) { - requirements[corev1beta1.CapacityTypeLabelKey] = scheduling.NewRequirement(corev1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, corev1beta1.CapacityTypeSpot) + if requirements.Get(karpv1.CapacityTypeLabelKey).Has(karpv1.CapacityTypeSpot) { + requirements[karpv1.CapacityTypeLabelKey] = scheduling.NewRequirement(karpv1.CapacityTypeLabelKey, corev1.NodeSelectorOpIn, karpv1.CapacityTypeSpot) for _, instanceType := range instanceTypes { for _, offering := range instanceType.Offerings.Available() { if requirements.Compatible(offering.Requirements, scheduling.AllowUndefinedWellKnownLabels) == nil { - return corev1beta1.CapacityTypeSpot + return karpv1.CapacityTypeSpot } } } } - return corev1beta1.CapacityTypeOnDemand + return karpv1.CapacityTypeOnDemand } // filterInstanceTypes is used to provide filtering on the list of potential instance types to further limit it to those // that make the most sense given our specific AWS cloudprovider. -func (p *DefaultProvider) filterInstanceTypes(nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) []*cloudprovider.InstanceType { +func (p *DefaultProvider) filterInstanceTypes(nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) []*cloudprovider.InstanceType { instanceTypes = filterExoticInstanceTypes(instanceTypes) // If we could potentially launch either a spot or on-demand node, we want to filter out the spot instance types that // are more expensive than the cheapest on-demand type. @@ -398,22 +398,22 @@ func (p *DefaultProvider) filterInstanceTypes(nodeClaim *corev1beta1.NodeClaim, // isMixedCapacityLaunch returns true if nodepools and available offerings could potentially allow either a spot or // and on-demand node to launch -func (p *DefaultProvider) isMixedCapacityLaunch(nodeClaim *corev1beta1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) bool { +func (p *DefaultProvider) isMixedCapacityLaunch(nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType) bool { requirements := scheduling.NewNodeSelectorRequirementsWithMinValues(nodeClaim.Spec.Requirements...) // requirements must allow both - if !requirements.Get(corev1beta1.CapacityTypeLabelKey).Has(corev1beta1.CapacityTypeSpot) || - !requirements.Get(corev1beta1.CapacityTypeLabelKey).Has(corev1beta1.CapacityTypeOnDemand) { + if !requirements.Get(karpv1.CapacityTypeLabelKey).Has(karpv1.CapacityTypeSpot) || + !requirements.Get(karpv1.CapacityTypeLabelKey).Has(karpv1.CapacityTypeOnDemand) { return false } hasSpotOfferings := false hasODOffering := false - if requirements.Get(corev1beta1.CapacityTypeLabelKey).Has(corev1beta1.CapacityTypeSpot) { + if requirements.Get(karpv1.CapacityTypeLabelKey).Has(karpv1.CapacityTypeSpot) { for _, instanceType := range instanceTypes { for _, offering := range instanceType.Offerings.Available() { if requirements.Compatible(offering.Requirements, scheduling.AllowUndefinedWellKnownLabels) != nil { continue } - if offering.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any() == corev1beta1.CapacityTypeSpot { + if offering.Requirements.Get(karpv1.CapacityTypeLabelKey).Any() == karpv1.CapacityTypeSpot { hasSpotOfferings = true } else { hasODOffering = true @@ -431,7 +431,7 @@ func filterUnwantedSpot(instanceTypes []*cloudprovider.InstanceType) []*cloudpro // first, find the price of our cheapest available on-demand instance type that could support this node for _, it := range instanceTypes { for _, o := range it.Offerings.Available() { - if o.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any() == corev1beta1.CapacityTypeOnDemand && o.Price < cheapestOnDemand { + if o.Requirements.Get(karpv1.CapacityTypeLabelKey).Any() == karpv1.CapacityTypeOnDemand && o.Price < cheapestOnDemand { cheapestOnDemand = o.Price } } @@ -458,13 +458,13 @@ func filterExoticInstanceTypes(instanceTypes []*cloudprovider.InstanceType) []*c for _, it := range instanceTypes { // deprioritize metal even if our opinionated filter isn't applied due to something like an instance family // requirement - if _, ok := lo.Find(it.Requirements.Get(v1beta1.LabelInstanceSize).Values(), func(size string) bool { return strings.Contains(size, "metal") }); ok { + if _, ok := lo.Find(it.Requirements.Get(v1.LabelInstanceSize).Values(), func(size string) bool { return strings.Contains(size, "metal") }); ok { continue } - if !resources.IsZero(it.Capacity[v1beta1.ResourceAWSNeuron]) || - !resources.IsZero(it.Capacity[v1beta1.ResourceAMDGPU]) || - !resources.IsZero(it.Capacity[v1beta1.ResourceNVIDIAGPU]) || - !resources.IsZero(it.Capacity[v1beta1.ResourceHabanaGaudi]) { + if !resources.IsZero(it.Capacity[v1.ResourceAWSNeuron]) || + !resources.IsZero(it.Capacity[v1.ResourceAMDGPU]) || + !resources.IsZero(it.Capacity[v1.ResourceNVIDIAGPU]) || + !resources.IsZero(it.Capacity[v1.ResourceHabanaGaudi]) { continue } genericInstanceTypes = append(genericInstanceTypes, it) diff --git a/pkg/providers/instance/suite_test.go b/pkg/providers/instance/suite_test.go index 80b60b437009..86b72d523e89 100644 --- a/pkg/providers/instance/suite_test.go +++ b/pkg/providers/instance/suite_test.go @@ -29,14 +29,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" corecloudprovider "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/events" coreoptions "sigs.k8s.io/karpenter/pkg/operator/options" coretest "sigs.k8s.io/karpenter/pkg/test" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -80,35 +80,35 @@ var _ = BeforeEach(func() { }) var _ = Describe("InstanceProvider", func() { - var nodeClass *v1beta1.EC2NodeClass - var nodePool *corev1beta1.NodePool - var nodeClaim *corev1beta1.NodeClaim + var nodeClass *v1.EC2NodeClass + var nodePool *karpv1.NodePool + var nodeClaim *karpv1.NodeClaim BeforeEach(func() { nodeClass = test.EC2NodeClass() - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, }, }) - nodeClaim = coretest.NodeClaim(corev1beta1.NodeClaim{ + nodeClaim = coretest.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - corev1beta1.NodePoolLabelKey: nodePool.Name, + karpv1.NodePoolLabelKey: nodePool.Name, }, }, - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -121,10 +121,10 @@ var _ = Describe("InstanceProvider", func() { ExpectApplied(ctx, env.Client, nodeClaim, nodePool, nodeClass) nodeClass = ExpectExists(ctx, env.Client, nodeClass) awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{ - {CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, - {CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, - {CapacityType: corev1beta1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, - {CapacityType: corev1beta1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, + {CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, + {CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, + {CapacityType: karpv1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, + {CapacityType: karpv1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, }) instanceTypes, err := cloudProvider.GetInstanceTypes(ctx, nodePool) Expect(err).ToNot(HaveOccurred()) @@ -154,15 +154,15 @@ var _ = Describe("InstanceProvider", func() { Value: aws.String("owned"), }, { - Key: aws.String(corev1beta1.NodePoolLabelKey), + Key: aws.String(karpv1.NodePoolLabelKey), Value: aws.String("default"), }, { - Key: aws.String(v1beta1.LabelNodeClass), + Key: aws.String(v1.LabelNodeClass), Value: aws.String("default"), }, { - Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Key: aws.String(karpv1.ManagedByAnnotationKey), Value: aws.String(options.FromContext(ctx).ClusterName), }, }, diff --git a/pkg/providers/instance/types.go b/pkg/providers/instance/types.go index 5f3804f2d004..5ee8e3b3a930 100644 --- a/pkg/providers/instance/types.go +++ b/pkg/providers/instance/types.go @@ -21,7 +21,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" ) // Instance is an internal data representation of either an ec2.Instance or an ec2.FleetInstance @@ -48,7 +48,7 @@ func NewInstance(out *ec2.Instance) *Instance { ImageID: aws.StringValue(out.ImageId), Type: aws.StringValue(out.InstanceType), Zone: aws.StringValue(out.Placement.AvailabilityZone), - CapacityType: lo.Ternary(out.SpotInstanceRequestId != nil, corev1beta1.CapacityTypeSpot, corev1beta1.CapacityTypeOnDemand), + CapacityType: lo.Ternary(out.SpotInstanceRequestId != nil, karpv1.CapacityTypeSpot, karpv1.CapacityTypeOnDemand), SecurityGroupIDs: lo.Map(out.SecurityGroups, func(securitygroup *ec2.GroupIdentifier, _ int) string { return aws.StringValue(securitygroup.GroupId) }), diff --git a/pkg/providers/instanceprofile/instanceprofile.go b/pkg/providers/instanceprofile/instanceprofile.go index d8b361a8fd23..9e62bfb83227 100644 --- a/pkg/providers/instanceprofile/instanceprofile.go +++ b/pkg/providers/instanceprofile/instanceprofile.go @@ -23,7 +23,7 @@ import ( "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/patrickmn/go-cache" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" awserrors "github.com/aws/karpenter-provider-aws/pkg/errors" @@ -59,7 +59,7 @@ func NewDefaultProvider(region string, iamapi iamiface.IAMAPI, cache *cache.Cach func (p *DefaultProvider) Create(ctx context.Context, m ResourceOwner) (string, error) { profileName := m.InstanceProfileName(options.FromContext(ctx).ClusterName, p.region) - tags := lo.Assign(m.InstanceProfileTags(options.FromContext(ctx).ClusterName), map[string]string{v1.LabelTopologyRegion: p.region}) + tags := lo.Assign(m.InstanceProfileTags(options.FromContext(ctx).ClusterName), map[string]string{corev1.LabelTopologyRegion: p.region}) // An instance profile exists for this NodeClass if _, ok := p.cache.Get(string(m.GetUID())); ok { diff --git a/pkg/providers/instancetype/instancetype.go b/pkg/providers/instancetype/instancetype.go index 82bbcb099faa..f34e2c25c039 100644 --- a/pkg/providers/instancetype/instancetype.go +++ b/pkg/providers/instancetype/instancetype.go @@ -26,17 +26,17 @@ import ( "github.com/prometheus/client_golang/prometheus" "sigs.k8s.io/controller-runtime/pkg/log" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/scheduling" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" awscache "github.com/aws/karpenter-provider-aws/pkg/cache" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" @@ -49,7 +49,7 @@ import ( type Provider interface { LivenessProbe(*http.Request) error - List(context.Context, *corev1beta1.KubeletConfiguration, *v1beta1.EC2NodeClass) ([]*cloudprovider.InstanceType, error) + List(context.Context, *v1.KubeletConfiguration, *v1.EC2NodeClass) ([]*cloudprovider.InstanceType, error) UpdateInstanceTypes(ctx context.Context) error UpdateInstanceTypeOfferings(ctx context.Context) error } @@ -97,14 +97,14 @@ func NewDefaultProvider(region string, instanceTypesCache *cache.Cache, ec2api e } } -func (p *DefaultProvider) List(ctx context.Context, kc *corev1beta1.KubeletConfiguration, nodeClass *v1beta1.EC2NodeClass) ([]*cloudprovider.InstanceType, error) { +func (p *DefaultProvider) List(ctx context.Context, kc *v1.KubeletConfiguration, nodeClass *v1.EC2NodeClass) ([]*cloudprovider.InstanceType, error) { p.muInstanceTypeInfo.RLock() p.muInstanceTypeOfferings.RLock() defer p.muInstanceTypeInfo.RUnlock() defer p.muInstanceTypeOfferings.RUnlock() if kc == nil { - kc = &corev1beta1.KubeletConfiguration{} + kc = &v1.KubeletConfiguration{} } if len(p.instanceTypesInfo) == 0 { return nil, fmt.Errorf("no instance types found") @@ -116,7 +116,7 @@ func (p *DefaultProvider) List(ctx context.Context, kc *corev1beta1.KubeletConfi return nil, fmt.Errorf("no subnets found") } - subnetZones := sets.New(lo.Map(nodeClass.Status.Subnets, func(s v1beta1.Subnet, _ int) string { + subnetZones := sets.New(lo.Map(nodeClass.Status.Subnets, func(s v1.Subnet, _ int) string { return aws.StringValue(&s.Zone) })...) @@ -261,7 +261,7 @@ func (p *DefaultProvider) UpdateInstanceTypeOfferings(ctx context.Context) error // offering, you can do the following thanks to this invariant: // // offering.Requirements.Get(v1.TopologyLabelZone).Any() -func (p *DefaultProvider) createOfferings(ctx context.Context, instanceType *ec2.InstanceTypeInfo, zones, instanceTypeZones sets.Set[string], subnets []v1beta1.Subnet) []cloudprovider.Offering { +func (p *DefaultProvider) createOfferings(ctx context.Context, instanceType *ec2.InstanceTypeInfo, zones, instanceTypeZones sets.Set[string], subnets []v1.Subnet) []cloudprovider.Offering { var offerings []cloudprovider.Offering for zone := range zones { // while usage classes should be a distinct set, there's no guarantee of that @@ -283,20 +283,20 @@ func (p *DefaultProvider) createOfferings(ctx context.Context, instanceType *ec2 continue } - subnet, hasSubnet := lo.Find(subnets, func(s v1beta1.Subnet) bool { + subnet, hasSubnet := lo.Find(subnets, func(s v1.Subnet) bool { return s.Zone == zone }) available := !isUnavailable && ok && instanceTypeZones.Has(zone) && hasSubnet offering := cloudprovider.Offering{ Requirements: scheduling.NewRequirements( - scheduling.NewRequirement(corev1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, capacityType), - scheduling.NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, zone), + scheduling.NewRequirement(karpv1.CapacityTypeLabelKey, corev1.NodeSelectorOpIn, capacityType), + scheduling.NewRequirement(corev1.LabelTopologyZone, corev1.NodeSelectorOpIn, zone), ), Price: price, Available: available, } if subnet.ZoneID != "" { - offering.Requirements.Add(scheduling.NewRequirement(v1beta1.LabelTopologyZoneID, v1.NodeSelectorOpIn, subnet.ZoneID)) + offering.Requirements.Add(scheduling.NewRequirement(v1.LabelTopologyZoneID, corev1.NodeSelectorOpIn, subnet.ZoneID)) } offerings = append(offerings, offering) instanceTypeOfferingAvailable.With(prometheus.Labels{ diff --git a/pkg/providers/instancetype/suite_test.go b/pkg/providers/instancetype/suite_test.go index ad434cc85967..569647e2b99a 100644 --- a/pkg/providers/instancetype/suite_test.go +++ b/pkg/providers/instancetype/suite_test.go @@ -35,13 +35,14 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" corecloudprovider "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/controllers/provisioning" "sigs.k8s.io/karpenter/pkg/controllers/state" @@ -56,7 +57,7 @@ import ( "sigs.k8s.io/karpenter/pkg/utils/resources" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" "github.com/aws/karpenter-provider-aws/pkg/fake" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -109,14 +110,14 @@ var _ = AfterEach(func() { }) var _ = Describe("InstanceTypeProvider", func() { - var nodeClass, windowsNodeClass *v1beta1.EC2NodeClass - var nodePool, windowsNodePool *corev1beta1.NodePool + var nodeClass, windowsNodeClass *v1.EC2NodeClass + var nodePool, windowsNodePool *karpv1.NodePool BeforeEach(func() { nodeClass = test.EC2NodeClass( - v1beta1.EC2NodeClass{ - Status: v1beta1.EC2NodeClassStatus{ + v1.EC2NodeClass{ + Status: v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", - SecurityGroups: []v1beta1.SecurityGroup{ + SecurityGroups: []v1.SecurityGroup{ { ID: "sg-test1", }, @@ -127,7 +128,7 @@ var _ = Describe("InstanceTypeProvider", func() { ID: "sg-test3", }, }, - Subnets: []v1beta1.Subnet{ + Subnets: []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -145,62 +146,61 @@ var _ = Describe("InstanceTypeProvider", func() { }, ) nodeClass.StatusConditions().SetTrue(status.ConditionReady) - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - Kubelet: &corev1beta1.KubeletConfiguration{}, - NodeClassRef: &corev1beta1.NodeClassReference{ + NodeClassRef: &karpv1.NodeClassReference{ Name: nodeClass.Name, }, }, }, }, }) - windowsNodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - AMIFamily: &v1beta1.AMIFamilyWindows2022, + windowsNodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + AMIFamily: &v1.AMIFamilyWindows2022, }, - Status: v1beta1.EC2NodeClassStatus{ + Status: v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", SecurityGroups: nodeClass.Status.SecurityGroups, Subnets: nodeClass.Status.Subnets, - AMIs: []v1beta1.AMI{ + AMIs: []v1.AMI{ { ID: "ami-window-test1", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, - {Key: v1.LabelOSStable, Operator: v1.NodeSelectorOpIn, Values: []string{string(v1.Windows)}}, - {Key: v1.LabelWindowsBuild, Operator: v1.NodeSelectorOpIn, Values: []string{v1beta1.Windows2022Build}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, + {Key: corev1.LabelOSStable, Operator: corev1.NodeSelectorOpIn, Values: []string{string(corev1.Windows)}}, + {Key: corev1.LabelWindowsBuild, Operator: corev1.NodeSelectorOpIn, Values: []string{v1.Windows2022Build}}, }, }, }, }, }) windowsNodeClass.StatusConditions().SetTrue(status.ConditionReady) - windowsNodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + windowsNodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ + NodeClassRef: &karpv1.NodeClassReference{ Name: windowsNodeClass.Name, }, }, @@ -218,48 +218,48 @@ var _ = Describe("InstanceTypeProvider", func() { nodeSelector := map[string]string{ // Well known - corev1beta1.NodePoolLabelKey: nodePool.Name, - v1.LabelTopologyRegion: fake.DefaultRegion, - v1.LabelTopologyZone: "test-zone-1a", - v1.LabelInstanceTypeStable: "g4dn.8xlarge", - v1.LabelOSStable: "linux", - v1.LabelArchStable: "amd64", - corev1beta1.CapacityTypeLabelKey: "on-demand", + karpv1.NodePoolLabelKey: nodePool.Name, + corev1.LabelTopologyRegion: fake.DefaultRegion, + corev1.LabelTopologyZone: "test-zone-1a", + corev1.LabelInstanceTypeStable: "g4dn.8xlarge", + corev1.LabelOSStable: "linux", + corev1.LabelArchStable: "amd64", + karpv1.CapacityTypeLabelKey: "on-demand", // Well Known to AWS - v1beta1.LabelInstanceHypervisor: "nitro", - v1beta1.LabelInstanceEncryptionInTransitSupported: "true", - v1beta1.LabelInstanceCategory: "g", - v1beta1.LabelInstanceGeneration: "4", - v1beta1.LabelInstanceFamily: "g4dn", - v1beta1.LabelInstanceSize: "8xlarge", - v1beta1.LabelInstanceCPU: "32", - v1beta1.LabelInstanceCPUManufacturer: "intel", - v1beta1.LabelInstanceMemory: "131072", - v1beta1.LabelInstanceEBSBandwidth: "9500", - v1beta1.LabelInstanceNetworkBandwidth: "50000", - v1beta1.LabelInstanceGPUName: "t4", - v1beta1.LabelInstanceGPUManufacturer: "nvidia", - v1beta1.LabelInstanceGPUCount: "1", - v1beta1.LabelInstanceGPUMemory: "16384", - v1beta1.LabelInstanceLocalNVME: "900", - v1beta1.LabelInstanceAcceleratorName: "inferentia", - v1beta1.LabelInstanceAcceleratorManufacturer: "aws", - v1beta1.LabelInstanceAcceleratorCount: "1", - v1beta1.LabelTopologyZoneID: "tstz1-1a", + v1.LabelInstanceHypervisor: "nitro", + v1.LabelInstanceEncryptionInTransitSupported: "true", + v1.LabelInstanceCategory: "g", + v1.LabelInstanceGeneration: "4", + v1.LabelInstanceFamily: "g4dn", + v1.LabelInstanceSize: "8xlarge", + v1.LabelInstanceCPU: "32", + v1.LabelInstanceCPUManufacturer: "intel", + v1.LabelInstanceMemory: "131072", + v1.LabelInstanceEBSBandwidth: "9500", + v1.LabelInstanceNetworkBandwidth: "50000", + v1.LabelInstanceGPUName: "t4", + v1.LabelInstanceGPUManufacturer: "nvidia", + v1.LabelInstanceGPUCount: "1", + v1.LabelInstanceGPUMemory: "16384", + v1.LabelInstanceLocalNVME: "900", + v1.LabelInstanceAcceleratorName: "inferentia", + v1.LabelInstanceAcceleratorManufacturer: "aws", + v1.LabelInstanceAcceleratorCount: "1", + v1.LabelTopologyZoneID: "tstz1-1a", // Deprecated Labels - v1.LabelFailureDomainBetaRegion: fake.DefaultRegion, - v1.LabelFailureDomainBetaZone: "test-zone-1a", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/os": "linux", - v1.LabelInstanceType: "g4dn.8xlarge", - "topology.ebs.csi.aws.com/zone": "test-zone-1a", - v1.LabelWindowsBuild: v1beta1.Windows2022Build, + corev1.LabelFailureDomainBetaRegion: fake.DefaultRegion, + corev1.LabelFailureDomainBetaZone: "test-zone-1a", + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + corev1.LabelInstanceType: "g4dn.8xlarge", + "topology.ebs.csi.aws.com/zone": "test-zone-1a", + corev1.LabelWindowsBuild: v1.Windows2022Build, } // Ensure that we're exercising all well known labels - Expect(lo.Keys(nodeSelector)).To(ContainElements(append(corev1beta1.WellKnownLabels.UnsortedList(), lo.Keys(corev1beta1.NormalizedLabels)...))) + Expect(lo.Keys(nodeSelector)).To(ContainElements(append(karpv1.WellKnownLabels.UnsortedList(), lo.Keys(karpv1.NormalizedLabels)...))) - var pods []*v1.Pod + var pods []*corev1.Pod for key, value := range nodeSelector { pods = append(pods, coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: map[string]string{key: value}})) } @@ -273,49 +273,49 @@ var _ = Describe("InstanceTypeProvider", func() { nodeSelector := map[string]string{ // Well known - corev1beta1.NodePoolLabelKey: nodePool.Name, - v1.LabelTopologyRegion: fake.DefaultRegion, - v1.LabelTopologyZone: "test-zone-1a", - v1.LabelInstanceTypeStable: "g4dn.8xlarge", - v1.LabelOSStable: "linux", - v1.LabelArchStable: "amd64", - corev1beta1.CapacityTypeLabelKey: "on-demand", + karpv1.NodePoolLabelKey: nodePool.Name, + corev1.LabelTopologyRegion: fake.DefaultRegion, + corev1.LabelTopologyZone: "test-zone-1a", + corev1.LabelInstanceTypeStable: "g4dn.8xlarge", + corev1.LabelOSStable: "linux", + corev1.LabelArchStable: "amd64", + karpv1.CapacityTypeLabelKey: "on-demand", // Well Known to AWS - v1beta1.LabelInstanceHypervisor: "nitro", - v1beta1.LabelInstanceEncryptionInTransitSupported: "true", - v1beta1.LabelInstanceCategory: "g", - v1beta1.LabelInstanceGeneration: "4", - v1beta1.LabelInstanceFamily: "g4dn", - v1beta1.LabelInstanceSize: "8xlarge", - v1beta1.LabelInstanceCPU: "32", - v1beta1.LabelInstanceCPUManufacturer: "intel", - v1beta1.LabelInstanceMemory: "131072", - v1beta1.LabelInstanceEBSBandwidth: "9500", - v1beta1.LabelInstanceNetworkBandwidth: "50000", - v1beta1.LabelInstanceGPUName: "t4", - v1beta1.LabelInstanceGPUManufacturer: "nvidia", - v1beta1.LabelInstanceGPUCount: "1", - v1beta1.LabelInstanceGPUMemory: "16384", - v1beta1.LabelInstanceLocalNVME: "900", - v1beta1.LabelTopologyZoneID: "tstz1-1a", + v1.LabelInstanceHypervisor: "nitro", + v1.LabelInstanceEncryptionInTransitSupported: "true", + v1.LabelInstanceCategory: "g", + v1.LabelInstanceGeneration: "4", + v1.LabelInstanceFamily: "g4dn", + v1.LabelInstanceSize: "8xlarge", + v1.LabelInstanceCPU: "32", + v1.LabelInstanceCPUManufacturer: "intel", + v1.LabelInstanceMemory: "131072", + v1.LabelInstanceEBSBandwidth: "9500", + v1.LabelInstanceNetworkBandwidth: "50000", + v1.LabelInstanceGPUName: "t4", + v1.LabelInstanceGPUManufacturer: "nvidia", + v1.LabelInstanceGPUCount: "1", + v1.LabelInstanceGPUMemory: "16384", + v1.LabelInstanceLocalNVME: "900", + v1.LabelTopologyZoneID: "tstz1-1a", // Deprecated Labels - v1.LabelFailureDomainBetaRegion: fake.DefaultRegion, - v1.LabelFailureDomainBetaZone: "test-zone-1a", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/os": "linux", - v1.LabelInstanceType: "g4dn.8xlarge", - "topology.ebs.csi.aws.com/zone": "test-zone-1a", + corev1.LabelFailureDomainBetaRegion: fake.DefaultRegion, + corev1.LabelFailureDomainBetaZone: "test-zone-1a", + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + corev1.LabelInstanceType: "g4dn.8xlarge", + "topology.ebs.csi.aws.com/zone": "test-zone-1a", } // Ensure that we're exercising all well known labels except for accelerator labels Expect(lo.Keys(nodeSelector)).To(ContainElements( append( - corev1beta1.WellKnownLabels.Difference(sets.New( - v1beta1.LabelInstanceAcceleratorCount, - v1beta1.LabelInstanceAcceleratorName, - v1beta1.LabelInstanceAcceleratorManufacturer, - v1.LabelWindowsBuild, - )).UnsortedList(), lo.Keys(corev1beta1.NormalizedLabels)...))) + karpv1.WellKnownLabels.Difference(sets.New( + v1.LabelInstanceAcceleratorCount, + v1.LabelInstanceAcceleratorName, + v1.LabelInstanceAcceleratorManufacturer, + corev1.LabelWindowsBuild, + )).UnsortedList(), lo.Keys(karpv1.NormalizedLabels)...))) pod := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: nodeSelector}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -326,47 +326,47 @@ var _ = Describe("InstanceTypeProvider", func() { nodeSelector := map[string]string{ // Well known - corev1beta1.NodePoolLabelKey: nodePool.Name, - v1.LabelTopologyRegion: fake.DefaultRegion, - v1.LabelTopologyZone: "test-zone-1a", - v1.LabelInstanceTypeStable: "inf1.2xlarge", - v1.LabelOSStable: "linux", - v1.LabelArchStable: "amd64", - corev1beta1.CapacityTypeLabelKey: "on-demand", + karpv1.NodePoolLabelKey: nodePool.Name, + corev1.LabelTopologyRegion: fake.DefaultRegion, + corev1.LabelTopologyZone: "test-zone-1a", + corev1.LabelInstanceTypeStable: "inf1.2xlarge", + corev1.LabelOSStable: "linux", + corev1.LabelArchStable: "amd64", + karpv1.CapacityTypeLabelKey: "on-demand", // Well Known to AWS - v1beta1.LabelInstanceHypervisor: "nitro", - v1beta1.LabelInstanceEncryptionInTransitSupported: "true", - v1beta1.LabelInstanceCategory: "inf", - v1beta1.LabelInstanceGeneration: "1", - v1beta1.LabelInstanceFamily: "inf1", - v1beta1.LabelInstanceSize: "2xlarge", - v1beta1.LabelInstanceCPU: "8", - v1beta1.LabelInstanceCPUManufacturer: "intel", - v1beta1.LabelInstanceMemory: "16384", - v1beta1.LabelInstanceEBSBandwidth: "4750", - v1beta1.LabelInstanceNetworkBandwidth: "5000", - v1beta1.LabelInstanceAcceleratorName: "inferentia", - v1beta1.LabelInstanceAcceleratorManufacturer: "aws", - v1beta1.LabelInstanceAcceleratorCount: "1", - v1beta1.LabelTopologyZoneID: "tstz1-1a", + v1.LabelInstanceHypervisor: "nitro", + v1.LabelInstanceEncryptionInTransitSupported: "true", + v1.LabelInstanceCategory: "inf", + v1.LabelInstanceGeneration: "1", + v1.LabelInstanceFamily: "inf1", + v1.LabelInstanceSize: "2xlarge", + v1.LabelInstanceCPU: "8", + v1.LabelInstanceCPUManufacturer: "intel", + v1.LabelInstanceMemory: "16384", + v1.LabelInstanceEBSBandwidth: "4750", + v1.LabelInstanceNetworkBandwidth: "5000", + v1.LabelInstanceAcceleratorName: "inferentia", + v1.LabelInstanceAcceleratorManufacturer: "aws", + v1.LabelInstanceAcceleratorCount: "1", + v1.LabelTopologyZoneID: "tstz1-1a", // Deprecated Labels - v1.LabelFailureDomainBetaRegion: fake.DefaultRegion, - v1.LabelFailureDomainBetaZone: "test-zone-1a", - "beta.kubernetes.io/arch": "amd64", - "beta.kubernetes.io/os": "linux", - v1.LabelInstanceType: "inf1.2xlarge", - "topology.ebs.csi.aws.com/zone": "test-zone-1a", + corev1.LabelFailureDomainBetaRegion: fake.DefaultRegion, + corev1.LabelFailureDomainBetaZone: "test-zone-1a", + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + corev1.LabelInstanceType: "inf1.2xlarge", + "topology.ebs.csi.aws.com/zone": "test-zone-1a", } // Ensure that we're exercising all well known labels except for gpu labels and nvme - expectedLabels := append(corev1beta1.WellKnownLabels.Difference(sets.New( - v1beta1.LabelInstanceGPUCount, - v1beta1.LabelInstanceGPUName, - v1beta1.LabelInstanceGPUManufacturer, - v1beta1.LabelInstanceGPUMemory, - v1beta1.LabelInstanceLocalNVME, - v1.LabelWindowsBuild, - )).UnsortedList(), lo.Keys(corev1beta1.NormalizedLabels)...) + expectedLabels := append(karpv1.WellKnownLabels.Difference(sets.New( + v1.LabelInstanceGPUCount, + v1.LabelInstanceGPUName, + v1.LabelInstanceGPUManufacturer, + v1.LabelInstanceGPUMemory, + v1.LabelInstanceLocalNVME, + corev1.LabelWindowsBuild, + )).UnsortedList(), lo.Keys(karpv1.NormalizedLabels)...) Expect(lo.Keys(nodeSelector)).To(ContainElements(expectedLabels)) pod := coretest.UnschedulablePod(coretest.PodOptions{NodeSelector: nodeSelector}) @@ -377,11 +377,11 @@ var _ = Describe("InstanceTypeProvider", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ NodeSelector: map[string]string{ - v1.LabelInstanceTypeStable: "t3.large", + corev1.LabelInstanceTypeStable: "t3.large", }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSPodENI: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSPodENI: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSPodENI: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAWSPodENI: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -399,9 +399,9 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypeOfferings(ctx)).To(Succeed()) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -443,14 +443,14 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypes(ctx)).To(Succeed()) Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypeOfferings(ctx)).To(Succeed()) - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, Values: []string{ - corev1beta1.CapacityTypeSpot, - corev1beta1.CapacityTypeOnDemand, + karpv1.CapacityTypeSpot, + karpv1.CapacityTypeOnDemand, }, }, }, @@ -462,9 +462,9 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypeOfferings(ctx)).To(Succeed()) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -506,12 +506,12 @@ var _ = Describe("InstanceTypeProvider", func() { }) It("should not remove expensive metal instanceTypeOptions if any of the requirement with minValues is provided", func() { // Construct requirements with minValues for capacityType requirement. - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }, MinValues: lo.ToPtr(1), }, @@ -520,9 +520,9 @@ var _ = Describe("InstanceTypeProvider", func() { // Apply requirements and schedule pods. ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }) @@ -544,9 +544,9 @@ var _ = Describe("InstanceTypeProvider", func() { It("should de-prioritize metal", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -563,9 +563,9 @@ var _ = Describe("InstanceTypeProvider", func() { It("should de-prioritize gpu types", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -581,20 +581,20 @@ var _ = Describe("InstanceTypeProvider", func() { }) It("should launch on metal", func() { // add a nodePool requirement for instance type exists to remove our default filter for metal sizes - nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpExists, + nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpExists, }, }) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ NodeSelector: map[string]string{ - v1beta1.LabelInstanceSize: "metal", + v1.LabelInstanceSize: "metal", }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -603,33 +603,33 @@ var _ = Describe("InstanceTypeProvider", func() { It("should launch vpc.amazonaws.com/pod-eni on a compatible instance type", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSPodENI: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSPodENI: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSPodENI: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAWSPodENI: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKey(v1.LabelInstanceTypeStable)) + Expect(node.Labels).To(HaveKey(corev1.LabelInstanceTypeStable)) supportsPodENI := func() bool { - limits, ok := instancetype.Limits[node.Labels[v1.LabelInstanceTypeStable]] + limits, ok := instancetype.Limits[node.Labels[corev1.LabelInstanceTypeStable]] return ok && limits.IsTrunkingCompatible } Expect(supportsPodENI()).To(Equal(true)) }) It("should launch vpc.amazonaws.com/PrivateIPv4Address on a compatible instance type", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2022 ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourcePrivateIPv4Address: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourcePrivateIPv4Address: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourcePrivateIPv4Address: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourcePrivateIPv4Address: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKey(v1.LabelInstanceTypeStable)) - limits, ok := instancetype.Limits[node.Labels[v1.LabelInstanceTypeStable]] + Expect(node.Labels).To(HaveKey(corev1.LabelInstanceTypeStable)) + limits, ok := instancetype.Limits[node.Labels[corev1.LabelInstanceTypeStable]] Expect(ok).To(BeTrue()) Expect(limits.IPv4PerInterface).ToNot(BeZero()) }) @@ -672,19 +672,19 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypes(ctx)).To(Succeed()) Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypeOfferings(ctx)).To(Succeed()) - nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"test"}, }, }) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2022 ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourcePrivateIPv4Address: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourcePrivateIPv4Address: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourcePrivateIPv4Address: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourcePrivateIPv4Address: resource.MustParse("1")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -693,32 +693,32 @@ var _ = Describe("InstanceTypeProvider", func() { It("should launch instances for nvidia.com/gpu resource requests", func() { nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("1")}, }, }), // Should pack onto same instance coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("2")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("2")}, }, }), // Should pack onto a separate instance coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("4")}, - Limits: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("4")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("4")}, + Limits: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("4")}, }, }), } ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "p3.8xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "p3.8xlarge")) nodeNames.Insert(node.Name) } Expect(nodeNames.Len()).To(Equal(2)) @@ -726,30 +726,30 @@ var _ = Describe("InstanceTypeProvider", func() { It("should launch instances for habana.ai/gaudi resource requests", func() { nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("1")}, }, }), coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("2")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("2")}, }, }), coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("4")}, - Limits: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("4")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("4")}, + Limits: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("4")}, }, }), } ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "dl1.24xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "dl1.24xlarge")) nodeNames.Insert(node.Name) } Expect(nodeNames.Len()).To(Equal(1)) @@ -757,86 +757,86 @@ var _ = Describe("InstanceTypeProvider", func() { It("should launch instances for aws.amazon.com/neuron resource requests", func() { nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, }, }), // Should pack onto same instance coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("2")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("2")}, }, }), // Should pack onto a separate instance coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("4")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("4")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("4")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("4")}, }, }), } ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "inf1.6xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "inf1.6xlarge")) nodeNames.Insert(node.Name) } Expect(nodeNames.Len()).To(Equal(2)) }) It("should launch trn1 instances for aws.amazon.com/neuron resource requests", func() { nodeNames := sets.NewString() - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"trn1.2xlarge"}, }, }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, }, }), } ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "trn1.2xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "trn1.2xlarge")) nodeNames.Insert(node.Name) } Expect(nodeNames.Len()).To(Equal(1)) }) It("should launch instances for vpc.amazonaws.com/efa resource requests", func() { - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"dl1.24xlarge"}, }, }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceEFA: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceEFA: resource.MustParse("1")}, }, }), coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("2")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceEFA: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceEFA: resource.MustParse("2")}, }, }), } @@ -844,7 +844,7 @@ var _ = Describe("InstanceTypeProvider", func() { nodes := sets.NewString() for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "dl1.24xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "dl1.24xlarge")) nodes.Insert(node.Name) } Expect(nodes.Len()).To(Equal(1)) @@ -852,32 +852,32 @@ var _ = Describe("InstanceTypeProvider", func() { It("should launch instances for amd.com/gpu resource requests", func() { nodeNames := sets.NewString() ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAMDGPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAMDGPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAMDGPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAMDGPU: resource.MustParse("1")}, }, }), // Should pack onto same instance coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAMDGPU: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceAMDGPU: resource.MustParse("2")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAMDGPU: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceAMDGPU: resource.MustParse("2")}, }, }), // Should pack onto a separate instance coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAMDGPU: resource.MustParse("4")}, - Limits: v1.ResourceList{v1beta1.ResourceAMDGPU: resource.MustParse("4")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAMDGPU: resource.MustParse("4")}, + Limits: corev1.ResourceList{v1.ResourceAMDGPU: resource.MustParse("4")}, }, }), } ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "g4ad.16xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "g4ad.16xlarge")) nodeNames.Insert(node.Name) } Expect(nodeNames.Len()).To(Equal(2)) @@ -885,29 +885,30 @@ var _ = Describe("InstanceTypeProvider", func() { It("should not launch instances w/ instance storage for ephemeral storage resource requests when exceeding blockDeviceMapping", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("5000Gi")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceEphemeralStorage: resource.MustParse("5000Gi")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should launch instances w/ instance storage for ephemeral storage resource requests when disks are mounted for ephemeral-storage", func() { - nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1beta1.InstanceStorePolicyRAID0) + nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1.InstanceStorePolicyRAID0) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("5000Gi")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceEphemeralStorage: resource.MustParse("5000Gi")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("m6idn.32xlarge")) + Expect(node.Labels[corev1.LabelInstanceTypeStable]).To(Equal("m6idn.32xlarge")) Expect(*node.Status.Capacity.StorageEphemeral()).To(Equal(resource.MustParse("7600G"))) }) It("should not set pods to 110 if using ENI-based pod density", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} for _, info := range instanceInfo.InstanceTypes { amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, @@ -915,12 +916,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -930,7 +931,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should set pods to 110 if AMI Family doesn't support", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} for _, info := range instanceInfo.InstanceTypes { amiFamily := amifamily.GetAMIFamily(windowsNodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, @@ -938,12 +939,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, windowsNodeClass.Spec.BlockDeviceMappings, windowsNodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -952,7 +953,7 @@ var _ = Describe("InstanceTypeProvider", func() { }) Context("Metrics", func() { It("should expose vcpu metrics for instance types", func() { - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) Expect(len(instanceTypes)).To(BeNumerically(">", 0)) for _, it := range instanceTypes { @@ -966,7 +967,7 @@ var _ = Describe("InstanceTypeProvider", func() { } }) It("should expose memory metrics for instance types", func() { - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) Expect(len(instanceTypes)).To(BeNumerically(">", 0)) for _, it := range instanceTypes { @@ -980,15 +981,15 @@ var _ = Describe("InstanceTypeProvider", func() { } }) It("should expose availability metrics for instance types", func() { - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) Expect(len(instanceTypes)).To(BeNumerically(">", 0)) for _, it := range instanceTypes { for _, of := range it.Offerings { metric, ok := FindMetricWithLabelValues("karpenter_cloudprovider_instance_type_offering_available", map[string]string{ "instance_type": it.Name, - "capacity_type": of.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any(), - "zone": of.Requirements.Get(v1.LabelTopologyZone).Any(), + "capacity_type": of.Requirements.Get(karpv1.CapacityTypeLabelKey).Any(), + "zone": of.Requirements.Get(corev1.LabelTopologyZone).Any(), }) Expect(ok).To(BeTrue()) Expect(metric).To(Not(BeNil())) @@ -998,15 +999,15 @@ var _ = Describe("InstanceTypeProvider", func() { } }) It("should expose pricing metrics for instance types", func() { - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) Expect(len(instanceTypes)).To(BeNumerically(">", 0)) for _, it := range instanceTypes { for _, of := range it.Offerings { metric, ok := FindMetricWithLabelValues("karpenter_cloudprovider_instance_type_offering_price_estimate", map[string]string{ "instance_type": it.Name, - "capacity_type": of.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any(), - "zone": of.Requirements.Get(v1.LabelTopologyZone).Any(), + "capacity_type": of.Requirements.Get(karpv1.CapacityTypeLabelKey).Any(), + "zone": of.Requirements.Get(corev1.LabelTopologyZone).Any(), }) Expect(ok).To(BeTrue()) Expect(metric).To(Not(BeNil())) @@ -1017,7 +1018,7 @@ var _ = Describe("InstanceTypeProvider", func() { }) }) It("should launch instances in local zones", func() { - nodeClass.Status.Subnets = []v1beta1.Subnet{ + nodeClass.Status.Subnets = []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a-local", @@ -1025,9 +1026,9 @@ var _ = Describe("InstanceTypeProvider", func() { } ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - NodeRequirements: []v1.NodeSelectorRequirement{{ - Key: v1.LabelTopologyZone, - Operator: v1.NodeSelectorOpIn, + NodeRequirements: []corev1.NodeSelectorRequirement{{ + Key: corev1.LabelTopologyZone, + Operator: corev1.NodeSelectorOpIn, Values: []string{"test-zone-1a-local"}, }}, }) @@ -1052,17 +1053,18 @@ var _ = Describe("InstanceTypeProvider", func() { Context("System Reserved Resources", func() { It("should use defaults when no kubelet is specified", func() { amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} it := instancetype.NewInstanceType(ctx, info, fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1071,11 +1073,11 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(it.Overhead.SystemReserved.StorageEphemeral().String()).To(Equal("0")) }) It("should override system reserved cpus when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceCPU): "2", - string(v1.ResourceMemory): "20Gi", - string(v1.ResourceEphemeralStorage): "10Gi", + string(corev1.ResourceCPU): "2", + string(corev1.ResourceMemory): "20Gi", + string(corev1.ResourceEphemeralStorage): "10Gi", }, } amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) @@ -1084,12 +1086,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1100,19 +1102,19 @@ var _ = Describe("InstanceTypeProvider", func() { }) Context("Kube Reserved Resources", func() { It("should use defaults when no kubelet is specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, info, fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1121,16 +1123,16 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(it.Overhead.KubeReserved.StorageEphemeral().String()).To(Equal("1Gi")) }) It("should override kube reserved when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceCPU): "1", - string(v1.ResourceMemory): "20Gi", - string(v1.ResourceEphemeralStorage): "1Gi", + string(corev1.ResourceCPU): "1", + string(corev1.ResourceMemory): "20Gi", + string(corev1.ResourceEphemeralStorage): "1Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceCPU): "2", - string(v1.ResourceMemory): "10Gi", - string(v1.ResourceEphemeralStorage): "2Gi", + string(corev1.ResourceCPU): "2", + string(corev1.ResourceMemory): "10Gi", + string(corev1.ResourceEphemeralStorage): "2Gi", }, } amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) @@ -1139,12 +1141,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1161,12 +1163,12 @@ var _ = Describe("InstanceTypeProvider", func() { }) Context("Eviction Hard", func() { It("should override eviction threshold when specified as a quantity", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionHard: map[string]string{ instancetype.MemoryAvailable: "500Mi", @@ -1178,24 +1180,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("500Mi")) }) It("should override eviction threshold when specified as a percentage value", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionHard: map[string]string{ instancetype.MemoryAvailable: "10%", @@ -1207,24 +1209,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.1, 10)) }) It("should consider the eviction threshold disabled when specified as 100%", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionHard: map[string]string{ instancetype.MemoryAvailable: "100%", @@ -1236,24 +1238,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("0")) }) It("should used default eviction threshold for memory when evictionHard not specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionSoft: map[string]string{ instancetype.MemoryAvailable: "50Mi", @@ -1265,12 +1267,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1279,12 +1281,12 @@ var _ = Describe("InstanceTypeProvider", func() { }) Context("Eviction Soft", func() { It("should override eviction threshold when specified as a quantity", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionSoft: map[string]string{ instancetype.MemoryAvailable: "500Mi", @@ -1296,24 +1298,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("500Mi")) }) It("should override eviction threshold when specified as a percentage value", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionHard: map[string]string{ instancetype.MemoryAvailable: "5%", @@ -1328,24 +1330,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.1, 10)) }) It("should consider the eviction threshold disabled when specified as 100%", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionSoft: map[string]string{ instancetype.MemoryAvailable: "100%", @@ -1357,25 +1359,25 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("0")) }) It("should ignore eviction threshold when using Bottlerocket AMI", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionHard: map[string]string{ instancetype.MemoryAvailable: "1Gi", @@ -1390,12 +1392,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1403,19 +1405,19 @@ var _ = Describe("InstanceTypeProvider", func() { }) }) It("should take the default eviction threshold when none is specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, info, fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1424,12 +1426,12 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(it.Overhead.EvictionThreshold.StorageEphemeral().AsApproximateFloat64()).To(BeNumerically("~", resources.Quantity("2Gi").AsApproximateFloat64())) }) It("should take the greater of evictionHard and evictionSoft for overhead as a value", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionSoft: map[string]string{ instancetype.MemoryAvailable: "3Gi", @@ -1444,24 +1446,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().String()).To(Equal("3Gi")) }) It("should take the greater of evictionHard and evictionSoft for overhead as a value", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionSoft: map[string]string{ instancetype.MemoryAvailable: "2%", @@ -1476,24 +1478,24 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) Expect(it.Overhead.EvictionThreshold.Memory().Value()).To(BeNumerically("~", float64(it.Capacity.Memory().Value())*0.05, 10)) }) It("should take the greater of evictionHard and evictionSoft for overhead with mixed percentage/value", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceMemory): "20Gi", + string(corev1.ResourceMemory): "20Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceMemory): "10Gi", + string(corev1.ResourceMemory): "10Gi", }, EvictionSoft: map[string]string{ instancetype.MemoryAvailable: "10%", @@ -1508,12 +1510,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1523,6 +1525,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should default max pods based off of network interfaces", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} for _, info := range instanceInfo.InstanceTypes { if *info.InstanceType == "t3.large" { amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) @@ -1531,12 +1534,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1549,12 +1552,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1565,7 +1568,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should set max-pods to user-defined value if specified", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr(int32(10)), } for _, info := range instanceInfo.InstanceTypes { @@ -1575,12 +1578,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1590,7 +1593,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should override max-pods value", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr(int32(10)), } for _, info := range instanceInfo.InstanceTypes { @@ -1600,12 +1603,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1624,17 +1627,18 @@ var _ = Describe("InstanceTypeProvider", func() { }) Expect(ok).To(Equal(true)) amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} it := instancetype.NewInstanceType(ctx, t3Large, fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1658,17 +1662,18 @@ var _ = Describe("InstanceTypeProvider", func() { }) Expect(ok).To(Equal(true)) amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} it := instancetype.NewInstanceType(ctx, t3Large, fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1684,7 +1689,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should override pods-per-core value", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: lo.ToPtr(int32(1)), } for _, info := range instanceInfo.InstanceTypes { @@ -1694,12 +1699,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1709,7 +1714,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should take the minimum of pods-per-core and max-pods", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: lo.ToPtr(int32(4)), MaxPods: lo.ToPtr(int32(20)), } @@ -1720,12 +1725,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1735,8 +1740,8 @@ var _ = Describe("InstanceTypeProvider", func() { It("should ignore pods-per-core when using Bottlerocket AMI", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: lo.ToPtr(int32(1)), } for _, info := range instanceInfo.InstanceTypes { @@ -1746,12 +1751,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1762,7 +1767,7 @@ var _ = Describe("InstanceTypeProvider", func() { It("should take limited pod density to be the default pods number when pods-per-core is 0", func() { instanceInfo, err := awsEnv.EC2API.DescribeInstanceTypesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}) Expect(err).To(BeNil()) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: lo.ToPtr(int32(0)), } for _, info := range instanceInfo.InstanceTypes { @@ -1773,12 +1778,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1791,12 +1796,12 @@ var _ = Describe("InstanceTypeProvider", func() { fake.DefaultRegion, nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1894,21 +1899,21 @@ var _ = Describe("InstanceTypeProvider", func() { }) Context("Insufficient Capacity Error Cache", func() { It("should launch instances of different type on second reconciliation attempt with Insufficient Capacity Error Cache fallback", func() { - awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "inf1.6xlarge", Zone: "test-zone-1a"}}) + awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "inf1.6xlarge", Zone: "test-zone-1a"}}) ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ + pods := []*corev1.Pod{ coretest.UnschedulablePod(coretest.PodOptions{ - NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, + NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, }, }), coretest.UnschedulablePod(coretest.PodOptions{ - NodeSelector: map[string]string{v1.LabelTopologyZone: "test-zone-1a"}, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("1")}, + NodeSelector: map[string]string{corev1.LabelTopologyZone: "test-zone-1a"}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("1")}, }, }), } @@ -1921,24 +1926,24 @@ var _ = Describe("InstanceTypeProvider", func() { ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1beta1.LabelInstanceAcceleratorName, "inferentia")) + Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceAcceleratorName, "inferentia")) nodeNames.Insert(node.Name) } Expect(nodeNames.Len()).To(Equal(2)) }) It("should launch instances in a different zone on second reconciliation attempt with Insufficient Capacity Error Cache fallback", func() { - awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "p3.8xlarge", Zone: "test-zone-1a"}}) + awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "p3.8xlarge", Zone: "test-zone-1a"}}) pod := coretest.UnschedulablePod(coretest.PodOptions{ - NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "p3.8xlarge"}, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("1")}, + NodeSelector: map[string]string{corev1.LabelInstanceTypeStable: "p3.8xlarge"}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("1")}, }, }) - pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + pod.Spec.Affinity = &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ { - Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{ - {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1a"}}, + Weight: 1, Preference: corev1.NodeSelectorTerm{MatchExpressions: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelTopologyZone, Operator: corev1.NodeSelectorOpIn, Values: []string{"test-zone-1a"}}, }}, }, }}} @@ -1950,28 +1955,28 @@ var _ = Describe("InstanceTypeProvider", func() { ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(SatisfyAll( - HaveKeyWithValue(v1.LabelInstanceTypeStable, "p3.8xlarge"), - HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1b"))) + HaveKeyWithValue(corev1.LabelInstanceTypeStable, "p3.8xlarge"), + HaveKeyWithValue(corev1.LabelTopologyZone, "test-zone-1b"))) }) It("should launch smaller instances than optimal if larger instance launch results in Insufficient Capacity Error", func() { awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{ - {CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, + {CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, }) - nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceType, - Operator: v1.NodeSelectorOpIn, + nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceType, + Operator: corev1.NodeSelectorOpIn, Values: []string{"m5.large", "m5.xlarge"}, }, }) - pods := []*v1.Pod{} + pods := []*corev1.Pod{} for i := 0; i < 2; i++ { pods = append(pods, coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, NodeSelector: map[string]string{ - v1.LabelTopologyZone: "test-zone-1a", + corev1.LabelTopologyZone: "test-zone-1a", }, })) } @@ -1984,41 +1989,41 @@ var _ = Describe("InstanceTypeProvider", func() { ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pods...) for _, pod := range pods { node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("m5.large")) + Expect(node.Labels[corev1.LabelInstanceTypeStable]).To(Equal("m5.large")) } }) It("should launch instances on later reconciliation attempt with Insufficient Capacity Error Cache expiry", func() { - awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "inf1.6xlarge", Zone: "test-zone-1a"}}) + awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "inf1.6xlarge", Zone: "test-zone-1a"}}) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.PodOptions{ - NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "inf1.6xlarge"}, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceAWSNeuron: resource.MustParse("2")}, + NodeSelector: map[string]string{corev1.LabelInstanceTypeStable: "inf1.6xlarge"}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceAWSNeuron: resource.MustParse("2")}, }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) // capacity shortage is over - expire the item from the cache and try again awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{}) - awsEnv.UnavailableOfferingsCache.Delete("inf1.6xlarge", "test-zone-1a", corev1beta1.CapacityTypeOnDemand) + awsEnv.UnavailableOfferingsCache.Delete("inf1.6xlarge", "test-zone-1a", karpv1.CapacityTypeOnDemand) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceTypeStable, "inf1.6xlarge")) + Expect(node.Labels).To(HaveKeyWithValue(corev1.LabelInstanceTypeStable, "inf1.6xlarge")) }) It("should launch instances in a different zone on second reconciliation attempt with Insufficient Capacity Error Cache fallback (Habana)", func() { - awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "dl1.24xlarge", Zone: "test-zone-1a"}}) + awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{{CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "dl1.24xlarge", Zone: "test-zone-1a"}}) pod := coretest.UnschedulablePod(coretest.PodOptions{ - NodeSelector: map[string]string{v1.LabelInstanceTypeStable: "dl1.24xlarge"}, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("1")}, - Limits: v1.ResourceList{v1beta1.ResourceHabanaGaudi: resource.MustParse("1")}, + NodeSelector: map[string]string{corev1.LabelInstanceTypeStable: "dl1.24xlarge"}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceHabanaGaudi: resource.MustParse("1")}, }, }) - pod.Spec.Affinity = &v1.Affinity{NodeAffinity: &v1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + pod.Spec.Affinity = &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ { - Weight: 1, Preference: v1.NodeSelectorTerm{MatchExpressions: []v1.NodeSelectorRequirement{ - {Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1a"}}, + Weight: 1, Preference: corev1.NodeSelectorTerm{MatchExpressions: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelTopologyZone, Operator: corev1.NodeSelectorOpIn, Values: []string{"test-zone-1a"}}, }}, }, }}} @@ -2030,19 +2035,19 @@ var _ = Describe("InstanceTypeProvider", func() { ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(SatisfyAll( - HaveKeyWithValue(v1.LabelInstanceTypeStable, "dl1.24xlarge"), - HaveKeyWithValue(v1.LabelTopologyZone, "test-zone-1b"))) + HaveKeyWithValue(corev1.LabelInstanceTypeStable, "dl1.24xlarge"), + HaveKeyWithValue(corev1.LabelTopologyZone, "test-zone-1b"))) }) It("should launch on-demand capacity if flexible to both spot and on-demand, but spot is unavailable", func() { Expect(awsEnv.EC2API.DescribeInstanceTypesPagesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}, func(dito *ec2.DescribeInstanceTypesOutput, b bool) bool { for _, it := range dito.InstanceTypes { - awsEnv.EC2API.InsufficientCapacityPools.Add(fake.CapacityPool{CapacityType: corev1beta1.CapacityTypeSpot, InstanceType: aws.StringValue(it.InstanceType), Zone: "test-zone-1a"}) + awsEnv.EC2API.InsufficientCapacityPools.Add(fake.CapacityPool{CapacityType: karpv1.CapacityTypeSpot, InstanceType: aws.StringValue(it.InstanceType), Zone: "test-zone-1a"}) } return true })).To(Succeed()) - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: corev1beta1.CapacityTypeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.CapacityTypeSpot, corev1beta1.CapacityTypeOnDemand}}}, - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1a"}}}, + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: karpv1.CapacityTypeLabelKey, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.CapacityTypeSpot, karpv1.CapacityTypeOnDemand}}}, + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: corev1.LabelTopologyZone, Operator: corev1.NodeSelectorOpIn, Values: []string{"test-zone-1a"}}}, } // Spot Unavailable ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -2053,43 +2058,43 @@ var _ = Describe("InstanceTypeProvider", func() { ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) // Fallback to OD node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(corev1beta1.CapacityTypeLabelKey, corev1beta1.CapacityTypeOnDemand)) + Expect(node.Labels).To(HaveKeyWithValue(karpv1.CapacityTypeLabelKey, karpv1.CapacityTypeOnDemand)) }) It("should return all instance types, even though with no offerings due to Insufficient Capacity Error", func() { awsEnv.EC2API.InsufficientCapacityPools.Set([]fake.CapacityPool{ - {CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, - {CapacityType: corev1beta1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, - {CapacityType: corev1beta1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, - {CapacityType: corev1beta1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, + {CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, + {CapacityType: karpv1.CapacityTypeOnDemand, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, + {CapacityType: karpv1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1a"}, + {CapacityType: karpv1.CapacityTypeSpot, InstanceType: "m5.xlarge", Zone: "test-zone-1b"}, }) nodePool.Spec.Template.Spec.Requirements = nil - nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceType, - Operator: v1.NodeSelectorOpIn, + nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceType, + Operator: corev1.NodeSelectorOpIn, Values: []string{"m5.xlarge"}, }, }, ) - nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, + nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, Values: []string{"spot", "on-demand"}, }, }) ExpectApplied(ctx, env.Client, nodePool, nodeClass) - for _, ct := range []string{corev1beta1.CapacityTypeOnDemand, corev1beta1.CapacityTypeSpot} { + for _, ct := range []string{karpv1.CapacityTypeOnDemand, karpv1.CapacityTypeSpot} { for _, zone := range []string{"test-zone-1a", "test-zone-1b"} { ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, coretest.UnschedulablePod(coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, NodeSelector: map[string]string{ - corev1beta1.CapacityTypeLabelKey: ct, - v1.LabelTopologyZone: zone, + karpv1.CapacityTypeLabelKey: ct, + corev1.LabelTopologyZone: zone, }, })) } @@ -2114,16 +2119,16 @@ var _ = Describe("InstanceTypeProvider", func() { pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(corev1beta1.CapacityTypeLabelKey, corev1beta1.CapacityTypeOnDemand)) + Expect(node.Labels).To(HaveKeyWithValue(karpv1.CapacityTypeLabelKey, karpv1.CapacityTypeOnDemand)) }) It("should launch spot capacity if flexible to both spot and on demand", func() { - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: corev1beta1.CapacityTypeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.CapacityTypeSpot, corev1beta1.CapacityTypeOnDemand}}}} + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: karpv1.CapacityTypeLabelKey, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.CapacityTypeSpot, karpv1.CapacityTypeOnDemand}}}} ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(corev1beta1.CapacityTypeLabelKey, corev1beta1.CapacityTypeSpot)) + Expect(node.Labels).To(HaveKeyWithValue(karpv1.CapacityTypeLabelKey, karpv1.CapacityTypeSpot)) }) It("should fail to launch capacity when there is no zonal availability for spot", func() { now := time.Now() @@ -2139,10 +2144,10 @@ var _ = Describe("InstanceTypeProvider", func() { }) Expect(awsEnv.PricingProvider.UpdateSpotPricing(ctx)).To(Succeed()) - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: corev1beta1.CapacityTypeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.CapacityTypeSpot}}}, - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"m5.large"}}}, - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelTopologyZone, Operator: v1.NodeSelectorOpIn, Values: []string{"test-zone-1b"}}}, + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: karpv1.CapacityTypeLabelKey, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.CapacityTypeSpot}}}, + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: corev1.LabelInstanceTypeStable, Operator: corev1.NodeSelectorOpIn, Values: []string{"m5.large"}}}, + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: corev1.LabelTopologyZone, Operator: corev1.NodeSelectorOpIn, Values: []string{"test-zone-1b"}}}, } // Instance type with no zonal availability for spot shouldn't be scheduled @@ -2166,33 +2171,33 @@ var _ = Describe("InstanceTypeProvider", func() { Expect(awsEnv.PricingProvider.UpdateSpotPricing(ctx)).To(Succeed()) // not restricting to the zone so we can get any zone - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: corev1beta1.CapacityTypeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.CapacityTypeSpot}}}, - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelInstanceTypeStable, Operator: v1.NodeSelectorOpIn, Values: []string{"m5.large"}}}, + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: karpv1.CapacityTypeLabelKey, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.CapacityTypeSpot}}}, + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: corev1.LabelInstanceTypeStable, Operator: corev1.NodeSelectorOpIn, Values: []string{"m5.large"}}}, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKeyWithValue(corev1beta1.NodePoolLabelKey, nodePool.Name)) + Expect(node.Labels).To(HaveKeyWithValue(karpv1.NodePoolLabelKey, nodePool.Name)) }) }) Context("Ephemeral Storage", func() { BeforeEach(func() { - nodeClass.Spec.AMIFamily = aws.String(v1beta1.AMIFamilyAL2) - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.AMIFamily = aws.String(v1.AMIFamilyAL2) + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ SnapshotID: aws.String("snap-xxxxxxxx"), }, }, } }) It("should default to EBS defaults when volumeSize is not defined in blockDeviceMappings for custom AMIs", func() { - nodeClass.Spec.AMIFamily = aws.String(v1beta1.AMIFamilyCustom) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMIFamily = aws.String(v1.AMIFamilyCustom) + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Tags: map[string]string{ "*": "*", @@ -2225,7 +2230,7 @@ var _ = Describe("InstanceTypeProvider", func() { }) }) It("should default to EBS defaults when volumeSize is not defined in blockDeviceMappings for AL2023 Root volume", func() { - nodeClass.Spec.AMIFamily = aws.String(v1beta1.AMIFamilyAL2023) + nodeClass.Spec.AMIFamily = aws.String(v1.AMIFamilyAL2023) awsEnv.LaunchTemplateProvider.CABundle = lo.ToPtr("Y2EtYnVuZGxlCg==") awsEnv.LaunchTemplateProvider.ClusterCIDR.Store(lo.ToPtr("10.100.0.0/16")) ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -2241,7 +2246,7 @@ var _ = Describe("InstanceTypeProvider", func() { }) }) It("should default to EBS defaults when volumeSize is not defined in blockDeviceMappings for Bottlerocket Root volume", func() { - nodeClass.Spec.AMIFamily = aws.String(v1beta1.AMIFamilyBottlerocket) + nodeClass.Spec.AMIFamily = aws.String(v1.AMIFamilyBottlerocket) nodeClass.Spec.BlockDeviceMappings[0].DeviceName = aws.String("/dev/xvdb") ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() @@ -2257,7 +2262,7 @@ var _ = Describe("InstanceTypeProvider", func() { }) }) It("should default to EBS defaults when volumeSize is not defined in blockDeviceMappings for Ubuntu Root volume", func() { - nodeClass.Spec.AMIFamily = aws.String(v1beta1.AMIFamilyUbuntu) + nodeClass.Spec.AMIFamily = aws.String(v1.AMIFamilyUbuntu) nodeClass.Spec.BlockDeviceMappings[0].DeviceName = aws.String("/dev/sda1") ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() @@ -2287,7 +2292,7 @@ var _ = Describe("InstanceTypeProvider", func() { }) }) It("should set metadata options on generated launch template from nodePool configuration", func() { - nodeClass.Spec.MetadataOptions = &v1beta1.MetadataOptions{ + nodeClass.Spec.MetadataOptions = &v1.MetadataOptions{ HTTPEndpoint: aws.String(ec2.LaunchTemplateInstanceMetadataEndpointStateDisabled), HTTPProtocolIPv6: aws.String(ec2.LaunchTemplateInstanceMetadataProtocolIpv6Enabled), HTTPPutResponseHopLimit: aws.Int64(1), @@ -2315,17 +2320,20 @@ var _ = Describe("InstanceTypeProvider", func() { // kubelet.evictionHard // kubelet.evictionSoft // kubelet.maxPods - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ - KubeReserved: map[string]string{string(v1.ResourceCPU): "1"}, - SystemReserved: map[string]string{string(v1.ResourceCPU): "1"}, + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ + KubeReserved: map[string]string{string(corev1.ResourceCPU): "1"}, + SystemReserved: map[string]string{string(corev1.ResourceCPU): "1"}, EvictionHard: map[string]string{"memory.available": "5%"}, EvictionSoft: map[string]string{"nodefs.available": "10%"}, - MaxPods: aws.Int32(10), + EvictionSoftGracePeriod: map[string]metav1.Duration{ + "nodefs.available": {Duration: time.Minute}, + }, + MaxPods: aws.Int32(10), } - kubeletChanges := []*corev1beta1.KubeletConfiguration{ + kubeletChanges := []*v1.KubeletConfiguration{ {}, // Testing the base case black EC2NodeClass - {KubeReserved: map[string]string{string(v1.ResourceCPU): "20"}}, - {SystemReserved: map[string]string{string(v1.ResourceMemory): "10Gi"}}, + {KubeReserved: map[string]string{string(corev1.ResourceCPU): "20"}}, + {SystemReserved: map[string]string{string(corev1.ResourceMemory): "10Gi"}}, {EvictionHard: map[string]string{"memory.available": "52%"}}, {EvictionSoft: map[string]string{"nodefs.available": "132%"}}, {MaxPods: aws.Int32(20)}, @@ -2342,12 +2350,12 @@ var _ = Describe("InstanceTypeProvider", func() { sorted := nodePool.DeepCopy() for _, change := range kubeletChanges { nodePool = sorted.DeepCopy() - Expect(mergo.Merge(nodePool.Spec.Template.Spec.Kubelet, change, mergo.WithOverride, mergo.WithSliceDeepCopy)).To(BeNil()) + Expect(mergo.Merge(nodeClass.Spec.Kubelet, change, mergo.WithOverride, mergo.WithSliceDeepCopy)).To(BeNil()) // Calling the provider and storing the instance type list to the instancetype provider cache - _, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + _, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) // We are making sure to pull from the cache - instancetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instancetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) sort.Slice(instancetypes, func(x int, y int) bool { return instancetypes[x].Name < instancetypes[y].Name @@ -2365,40 +2373,40 @@ var _ = Describe("InstanceTypeProvider", func() { // nodeClass.blockDeviceMapping.rootVolume // nodeClass.blockDeviceMapping.volumeSize // nodeClass.blockDeviceMapping.deviceName - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: lo.ToPtr("/dev/xvda"), - EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, + EBS: &v1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, RootVolume: false, }, } - nodeClassChanges := []*v1beta1.EC2NodeClass{ + nodeClassChanges := []*v1.EC2NodeClass{ {}, // Testing the base case black EC2NodeClass - {Spec: v1beta1.EC2NodeClassSpec{InstanceStorePolicy: lo.ToPtr(v1beta1.InstanceStorePolicyRAID0)}}, - {Spec: v1beta1.EC2NodeClassSpec{AMIFamily: &v1beta1.AMIFamilyUbuntu}}, + {Spec: v1.EC2NodeClassSpec{InstanceStorePolicy: lo.ToPtr(v1.InstanceStorePolicyRAID0)}}, + {Spec: v1.EC2NodeClassSpec{AMIFamily: &v1.AMIFamilyUbuntu}}, { - Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{ { DeviceName: lo.ToPtr("/dev/sda1"), - EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, + EBS: &v1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, RootVolume: true, }, }, }}, { - Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{ { DeviceName: lo.ToPtr("/dev/xvda"), - EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, + EBS: &v1.BlockDevice{VolumeSize: resource.NewScaledQuantity(10, resource.Giga)}, RootVolume: true, }, }, }}, { - Spec: v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + Spec: v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{ { DeviceName: lo.ToPtr("/dev/xvda"), - EBS: &v1beta1.BlockDevice{VolumeSize: resource.NewScaledQuantity(20, resource.Giga)}, + EBS: &v1.BlockDevice{VolumeSize: resource.NewScaledQuantity(20, resource.Giga)}, RootVolume: false, }, }, @@ -2419,10 +2427,10 @@ var _ = Describe("InstanceTypeProvider", func() { nodeClass = sorted.DeepCopy() Expect(mergo.Merge(nodeClass, change, mergo.WithOverride)).To(BeNil()) // Calling the provider and storing the instance type list to the instancetype provider cache - _, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + _, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) // We are making sure to pull from the cache - instanetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodePool.Spec.Template.Spec.Kubelet, nodeClass) + instanetypes, err := awsEnv.InstanceTypesProvider.List(ctx, nodeClass.Spec.Kubelet, nodeClass) Expect(err).To(BeNil()) sort.Slice(instanetypes, func(x int, y int) bool { return instanetypes[x].Name < instanetypes[y].Name @@ -2443,7 +2451,7 @@ var _ = Describe("InstanceTypeProvider", func() { go func() { defer wg.Done() defer GinkgoRecover() - instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, &corev1beta1.KubeletConfiguration{}, nodeClass) + instanceTypes, err := awsEnv.InstanceTypesProvider.List(ctx, &v1.KubeletConfiguration{}, nodeClass) Expect(err).ToNot(HaveOccurred()) // Sort everything in parallel and ensure that we don't get data races @@ -2484,7 +2492,7 @@ func uniqueInstanceTypeList(instanceTypesLists [][]*corecloudprovider.InstanceTy // generateSpotPricing creates a spot price history output for use in a mock that has all spot offerings discounted by 50% // vs the on-demand offering. -func generateSpotPricing(cp *cloudprovider.CloudProvider, nodePool *corev1beta1.NodePool) *ec2.DescribeSpotPriceHistoryOutput { +func generateSpotPricing(cp *cloudprovider.CloudProvider, nodePool *karpv1.NodePool) *ec2.DescribeSpotPriceHistoryOutput { rsp := &ec2.DescribeSpotPriceHistoryOutput{} instanceTypes, err := cp.GetInstanceTypes(ctx, nodePool) awsEnv.InstanceTypesProvider.Reset() @@ -2495,16 +2503,16 @@ func generateSpotPricing(cp *cloudprovider.CloudProvider, nodePool *corev1beta1. instanceType := it onDemandPrice := 1.00 for _, o := range it.Offerings { - if o.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any() == corev1beta1.CapacityTypeOnDemand { + if o.Requirements.Get(karpv1.CapacityTypeLabelKey).Any() == karpv1.CapacityTypeOnDemand { onDemandPrice = o.Price } } for _, o := range instanceType.Offerings { o := o - if o.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any() != corev1beta1.CapacityTypeSpot { + if o.Requirements.Get(karpv1.CapacityTypeLabelKey).Any() != karpv1.CapacityTypeSpot { continue } - zone := o.Requirements.Get(v1.LabelTopologyZone).Any() + zone := o.Requirements.Get(corev1.LabelTopologyZone).Any() spotPrice := fmt.Sprintf("%0.3f", onDemandPrice*0.5) rsp.SpotPriceHistory = append(rsp.SpotPriceHistory, &ec2.SpotPrice{ AvailabilityZone: &zone, diff --git a/pkg/providers/instancetype/types.go b/pkg/providers/instancetype/types.go index 43062cc470ca..bce08d2be7ed 100644 --- a/pkg/providers/instancetype/types.go +++ b/pkg/providers/instancetype/types.go @@ -25,12 +25,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" @@ -49,7 +49,7 @@ var ( ) func NewInstanceType(ctx context.Context, info *ec2.InstanceTypeInfo, region string, - blockDeviceMappings []*v1beta1.BlockDeviceMapping, instanceStorePolicy *v1beta1.InstanceStorePolicy, maxPods *int32, podsPerCore *int32, + blockDeviceMappings []*v1.BlockDeviceMapping, instanceStorePolicy *v1.InstanceStorePolicy, maxPods *int32, podsPerCore *int32, kubeReserved map[string]string, systemReserved map[string]string, evictionHard map[string]string, evictionSoft map[string]string, amiFamily amifamily.AMIFamily, offerings cloudprovider.Offerings) *cloudprovider.InstanceType { @@ -64,8 +64,8 @@ func NewInstanceType(ctx context.Context, info *ec2.InstanceTypeInfo, region str EvictionThreshold: evictionThreshold(memory(ctx, info), ephemeralStorage(info, amiFamily, blockDeviceMappings, instanceStorePolicy), amiFamily, evictionHard, evictionSoft), }, } - if it.Requirements.Compatible(scheduling.NewRequirements(scheduling.NewRequirement(v1.LabelOSStable, v1.NodeSelectorOpIn, string(v1.Windows)))) == nil { - it.Capacity[v1beta1.ResourcePrivateIPv4Address] = *privateIPv4Address(aws.StringValue(info.InstanceType)) + if it.Requirements.Compatible(scheduling.NewRequirements(scheduling.NewRequirement(corev1.LabelOSStable, corev1.NodeSelectorOpIn, string(corev1.Windows)))) == nil { + it.Capacity[v1.ResourcePrivateIPv4Address] = *privateIPv4Address(aws.StringValue(info.InstanceType)) } return it } @@ -74,116 +74,116 @@ func NewInstanceType(ctx context.Context, info *ec2.InstanceTypeInfo, region str func computeRequirements(info *ec2.InstanceTypeInfo, offerings cloudprovider.Offerings, region string, amiFamily amifamily.AMIFamily) scheduling.Requirements { requirements := scheduling.NewRequirements( // Well Known Upstream - scheduling.NewRequirement(v1.LabelInstanceTypeStable, v1.NodeSelectorOpIn, aws.StringValue(info.InstanceType)), - scheduling.NewRequirement(v1.LabelArchStable, v1.NodeSelectorOpIn, getArchitecture(info)), - scheduling.NewRequirement(v1.LabelOSStable, v1.NodeSelectorOpIn, getOS(info, amiFamily)...), - scheduling.NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, lo.Map(offerings.Available(), func(o cloudprovider.Offering, _ int) string { - return o.Requirements.Get(v1.LabelTopologyZone).Any() + scheduling.NewRequirement(corev1.LabelInstanceTypeStable, corev1.NodeSelectorOpIn, aws.StringValue(info.InstanceType)), + scheduling.NewRequirement(corev1.LabelArchStable, corev1.NodeSelectorOpIn, getArchitecture(info)), + scheduling.NewRequirement(corev1.LabelOSStable, corev1.NodeSelectorOpIn, getOS(info, amiFamily)...), + scheduling.NewRequirement(corev1.LabelTopologyZone, corev1.NodeSelectorOpIn, lo.Map(offerings.Available(), func(o cloudprovider.Offering, _ int) string { + return o.Requirements.Get(corev1.LabelTopologyZone).Any() })...), - scheduling.NewRequirement(v1.LabelTopologyRegion, v1.NodeSelectorOpIn, region), - scheduling.NewRequirement(v1.LabelWindowsBuild, v1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(corev1.LabelTopologyRegion, corev1.NodeSelectorOpIn, region), + scheduling.NewRequirement(corev1.LabelWindowsBuild, corev1.NodeSelectorOpDoesNotExist), // Well Known to Karpenter - scheduling.NewRequirement(corev1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, lo.Map(offerings.Available(), func(o cloudprovider.Offering, _ int) string { - return o.Requirements.Get(corev1beta1.CapacityTypeLabelKey).Any() + scheduling.NewRequirement(karpv1.CapacityTypeLabelKey, corev1.NodeSelectorOpIn, lo.Map(offerings.Available(), func(o cloudprovider.Offering, _ int) string { + return o.Requirements.Get(karpv1.CapacityTypeLabelKey).Any() })...), // Well Known to AWS - scheduling.NewRequirement(v1beta1.LabelInstanceCPU, v1.NodeSelectorOpIn, fmt.Sprint(aws.Int64Value(info.VCpuInfo.DefaultVCpus))), - scheduling.NewRequirement(v1beta1.LabelInstanceCPUManufacturer, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceMemory, v1.NodeSelectorOpIn, fmt.Sprint(aws.Int64Value(info.MemoryInfo.SizeInMiB))), - scheduling.NewRequirement(v1beta1.LabelInstanceEBSBandwidth, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceNetworkBandwidth, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceCategory, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceFamily, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceGeneration, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceLocalNVME, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceSize, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUName, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUManufacturer, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUCount, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceGPUMemory, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorName, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorManufacturer, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceAcceleratorCount, v1.NodeSelectorOpDoesNotExist), - scheduling.NewRequirement(v1beta1.LabelInstanceHypervisor, v1.NodeSelectorOpIn, aws.StringValue(info.Hypervisor)), - scheduling.NewRequirement(v1beta1.LabelInstanceEncryptionInTransitSupported, v1.NodeSelectorOpIn, fmt.Sprint(aws.BoolValue(info.NetworkInfo.EncryptionInTransitSupported))), + scheduling.NewRequirement(v1.LabelInstanceCPU, corev1.NodeSelectorOpIn, fmt.Sprint(aws.Int64Value(info.VCpuInfo.DefaultVCpus))), + scheduling.NewRequirement(v1.LabelInstanceCPUManufacturer, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceMemory, corev1.NodeSelectorOpIn, fmt.Sprint(aws.Int64Value(info.MemoryInfo.SizeInMiB))), + scheduling.NewRequirement(v1.LabelInstanceEBSBandwidth, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceNetworkBandwidth, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceCategory, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceFamily, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceGeneration, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceLocalNVME, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceSize, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceGPUName, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceGPUManufacturer, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceGPUCount, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceGPUMemory, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorName, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorManufacturer, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceAcceleratorCount, corev1.NodeSelectorOpDoesNotExist), + scheduling.NewRequirement(v1.LabelInstanceHypervisor, corev1.NodeSelectorOpIn, aws.StringValue(info.Hypervisor)), + scheduling.NewRequirement(v1.LabelInstanceEncryptionInTransitSupported, corev1.NodeSelectorOpIn, fmt.Sprint(aws.BoolValue(info.NetworkInfo.EncryptionInTransitSupported))), ) // Only add zone-id label when available in offerings. It may not be available if a user has upgraded from a // previous version of Karpenter w/o zone-id support and the nodeclass subnet status has not yet updated. if zoneIDs := lo.FilterMap(offerings.Available(), func(o cloudprovider.Offering, _ int) (string, bool) { - zoneID := o.Requirements.Get(v1beta1.LabelTopologyZoneID).Any() + zoneID := o.Requirements.Get(v1.LabelTopologyZoneID).Any() return zoneID, zoneID != "" }); len(zoneIDs) != 0 { - requirements.Add(scheduling.NewRequirement(v1beta1.LabelTopologyZoneID, v1.NodeSelectorOpIn, zoneIDs...)) + requirements.Add(scheduling.NewRequirement(v1.LabelTopologyZoneID, corev1.NodeSelectorOpIn, zoneIDs...)) } // Instance Type Labels instanceFamilyParts := instanceTypeScheme.FindStringSubmatch(aws.StringValue(info.InstanceType)) if len(instanceFamilyParts) == 4 { - requirements[v1beta1.LabelInstanceCategory].Insert(instanceFamilyParts[1]) - requirements[v1beta1.LabelInstanceGeneration].Insert(instanceFamilyParts[3]) + requirements[v1.LabelInstanceCategory].Insert(instanceFamilyParts[1]) + requirements[v1.LabelInstanceGeneration].Insert(instanceFamilyParts[3]) } instanceTypeParts := strings.Split(aws.StringValue(info.InstanceType), ".") if len(instanceTypeParts) == 2 { - requirements.Get(v1beta1.LabelInstanceFamily).Insert(instanceTypeParts[0]) - requirements.Get(v1beta1.LabelInstanceSize).Insert(instanceTypeParts[1]) + requirements.Get(v1.LabelInstanceFamily).Insert(instanceTypeParts[0]) + requirements.Get(v1.LabelInstanceSize).Insert(instanceTypeParts[1]) } if info.InstanceStorageInfo != nil && aws.StringValue(info.InstanceStorageInfo.NvmeSupport) != ec2.EphemeralNvmeSupportUnsupported { - requirements[v1beta1.LabelInstanceLocalNVME].Insert(fmt.Sprint(aws.Int64Value(info.InstanceStorageInfo.TotalSizeInGB))) + requirements[v1.LabelInstanceLocalNVME].Insert(fmt.Sprint(aws.Int64Value(info.InstanceStorageInfo.TotalSizeInGB))) } // Network bandwidth if bandwidth, ok := InstanceTypeBandwidthMegabits[aws.StringValue(info.InstanceType)]; ok { - requirements[v1beta1.LabelInstanceNetworkBandwidth].Insert(fmt.Sprint(bandwidth)) + requirements[v1.LabelInstanceNetworkBandwidth].Insert(fmt.Sprint(bandwidth)) } // GPU Labels if info.GpuInfo != nil && len(info.GpuInfo.Gpus) == 1 { gpu := info.GpuInfo.Gpus[0] - requirements.Get(v1beta1.LabelInstanceGPUName).Insert(lowerKabobCase(aws.StringValue(gpu.Name))) - requirements.Get(v1beta1.LabelInstanceGPUManufacturer).Insert(lowerKabobCase(aws.StringValue(gpu.Manufacturer))) - requirements.Get(v1beta1.LabelInstanceGPUCount).Insert(fmt.Sprint(aws.Int64Value(gpu.Count))) - requirements.Get(v1beta1.LabelInstanceGPUMemory).Insert(fmt.Sprint(aws.Int64Value(gpu.MemoryInfo.SizeInMiB))) + requirements.Get(v1.LabelInstanceGPUName).Insert(lowerKabobCase(aws.StringValue(gpu.Name))) + requirements.Get(v1.LabelInstanceGPUManufacturer).Insert(lowerKabobCase(aws.StringValue(gpu.Manufacturer))) + requirements.Get(v1.LabelInstanceGPUCount).Insert(fmt.Sprint(aws.Int64Value(gpu.Count))) + requirements.Get(v1.LabelInstanceGPUMemory).Insert(fmt.Sprint(aws.Int64Value(gpu.MemoryInfo.SizeInMiB))) } // Accelerators if info.InferenceAcceleratorInfo != nil && len(info.InferenceAcceleratorInfo.Accelerators) == 1 { accelerator := info.InferenceAcceleratorInfo.Accelerators[0] - requirements.Get(v1beta1.LabelInstanceAcceleratorName).Insert(lowerKabobCase(aws.StringValue(accelerator.Name))) - requirements.Get(v1beta1.LabelInstanceAcceleratorManufacturer).Insert(lowerKabobCase(aws.StringValue(accelerator.Manufacturer))) - requirements.Get(v1beta1.LabelInstanceAcceleratorCount).Insert(fmt.Sprint(aws.Int64Value(accelerator.Count))) + requirements.Get(v1.LabelInstanceAcceleratorName).Insert(lowerKabobCase(aws.StringValue(accelerator.Name))) + requirements.Get(v1.LabelInstanceAcceleratorManufacturer).Insert(lowerKabobCase(aws.StringValue(accelerator.Manufacturer))) + requirements.Get(v1.LabelInstanceAcceleratorCount).Insert(fmt.Sprint(aws.Int64Value(accelerator.Count))) } // Windows Build Version Labels if family, ok := amiFamily.(*amifamily.Windows); ok { - requirements.Get(v1.LabelWindowsBuild).Insert(family.Build) + requirements.Get(corev1.LabelWindowsBuild).Insert(family.Build) } // Trn1 Accelerators // TODO: remove function once DescribeInstanceTypes contains the accelerator data // Values found from: https://aws.amazon.com/ec2/instance-types/trn1/ if strings.HasPrefix(*info.InstanceType, "trn1") { - requirements.Get(v1beta1.LabelInstanceAcceleratorName).Insert(lowerKabobCase("Inferentia")) - requirements.Get(v1beta1.LabelInstanceAcceleratorManufacturer).Insert(lowerKabobCase("AWS")) - requirements.Get(v1beta1.LabelInstanceAcceleratorCount).Insert(fmt.Sprint(awsNeurons(info))) + requirements.Get(v1.LabelInstanceAcceleratorName).Insert(lowerKabobCase("Inferentia")) + requirements.Get(v1.LabelInstanceAcceleratorManufacturer).Insert(lowerKabobCase("AWS")) + requirements.Get(v1.LabelInstanceAcceleratorCount).Insert(fmt.Sprint(awsNeurons(info))) } // CPU Manufacturer, valid options: aws, intel, amd if info.ProcessorInfo != nil { - requirements.Get(v1beta1.LabelInstanceCPUManufacturer).Insert(lowerKabobCase(aws.StringValue(info.ProcessorInfo.Manufacturer))) + requirements.Get(v1.LabelInstanceCPUManufacturer).Insert(lowerKabobCase(aws.StringValue(info.ProcessorInfo.Manufacturer))) } // EBS Max Bandwidth if info.EbsInfo != nil && aws.StringValue(info.EbsInfo.EbsOptimizedSupport) == ec2.EbsOptimizedSupportDefault { - requirements.Get(v1beta1.LabelInstanceEBSBandwidth).Insert(fmt.Sprint(aws.Int64Value(info.EbsInfo.EbsOptimizedInfo.MaximumBandwidthInMbps))) + requirements.Get(v1.LabelInstanceEBSBandwidth).Insert(fmt.Sprint(aws.Int64Value(info.EbsInfo.EbsOptimizedInfo.MaximumBandwidthInMbps))) } return requirements } func getOS(info *ec2.InstanceTypeInfo, amiFamily amifamily.AMIFamily) []string { if _, ok := amiFamily.(*amifamily.Windows); ok { - if getArchitecture(info) == corev1beta1.ArchitectureAmd64 { - return []string{string(v1.Windows)} + if getArchitecture(info) == karpv1.ArchitectureAmd64 { + return []string{string(corev1.Windows)} } return []string{} } - return []string{string(v1.Linux)} + return []string{string(corev1.Linux)} } func getArchitecture(info *ec2.InstanceTypeInfo) string { for _, architecture := range info.ProcessorInfo.SupportedArchitectures { - if value, ok := v1beta1.AWSToKubeArchitectures[aws.StringValue(architecture)]; ok { + if value, ok := v1.AWSToKubeArchitectures[aws.StringValue(architecture)]; ok { return value } } @@ -191,20 +191,20 @@ func getArchitecture(info *ec2.InstanceTypeInfo) string { } func computeCapacity(ctx context.Context, info *ec2.InstanceTypeInfo, amiFamily amifamily.AMIFamily, - blockDeviceMapping []*v1beta1.BlockDeviceMapping, instanceStorePolicy *v1beta1.InstanceStorePolicy, - maxPods *int32, podsPerCore *int32) v1.ResourceList { - - resourceList := v1.ResourceList{ - v1.ResourceCPU: *cpu(info), - v1.ResourceMemory: *memory(ctx, info), - v1.ResourceEphemeralStorage: *ephemeralStorage(info, amiFamily, blockDeviceMapping, instanceStorePolicy), - v1.ResourcePods: *pods(ctx, info, amiFamily, maxPods, podsPerCore), - v1beta1.ResourceAWSPodENI: *awsPodENI(aws.StringValue(info.InstanceType)), - v1beta1.ResourceNVIDIAGPU: *nvidiaGPUs(info), - v1beta1.ResourceAMDGPU: *amdGPUs(info), - v1beta1.ResourceAWSNeuron: *awsNeurons(info), - v1beta1.ResourceHabanaGaudi: *habanaGaudis(info), - v1beta1.ResourceEFA: *efas(info), + blockDeviceMapping []*v1.BlockDeviceMapping, instanceStorePolicy *v1.InstanceStorePolicy, + maxPods *int32, podsPerCore *int32) corev1.ResourceList { + + resourceList := corev1.ResourceList{ + corev1.ResourceCPU: *cpu(info), + corev1.ResourceMemory: *memory(ctx, info), + corev1.ResourceEphemeralStorage: *ephemeralStorage(info, amiFamily, blockDeviceMapping, instanceStorePolicy), + corev1.ResourcePods: *pods(ctx, info, amiFamily, maxPods, podsPerCore), + v1.ResourceAWSPodENI: *awsPodENI(aws.StringValue(info.InstanceType)), + v1.ResourceNVIDIAGPU: *nvidiaGPUs(info), + v1.ResourceAMDGPU: *amdGPUs(info), + v1.ResourceAWSNeuron: *awsNeurons(info), + v1.ResourceHabanaGaudi: *habanaGaudis(info), + v1.ResourceEFA: *efas(info), } return resourceList } @@ -226,16 +226,16 @@ func memory(ctx context.Context, info *ec2.InstanceTypeInfo) *resource.Quantity } // Setting ephemeral-storage to be either the default value, what is defined in blockDeviceMappings, or the combined size of local store volumes. -func ephemeralStorage(info *ec2.InstanceTypeInfo, amiFamily amifamily.AMIFamily, blockDeviceMappings []*v1beta1.BlockDeviceMapping, instanceStorePolicy *v1beta1.InstanceStorePolicy) *resource.Quantity { +func ephemeralStorage(info *ec2.InstanceTypeInfo, amiFamily amifamily.AMIFamily, blockDeviceMappings []*v1.BlockDeviceMapping, instanceStorePolicy *v1.InstanceStorePolicy) *resource.Quantity { // If local store disks have been configured for node ephemeral-storage, use the total size of the disks. - if lo.FromPtr(instanceStorePolicy) == v1beta1.InstanceStorePolicyRAID0 { + if lo.FromPtr(instanceStorePolicy) == v1.InstanceStorePolicyRAID0 { if info.InstanceStorageInfo != nil && info.InstanceStorageInfo.TotalSizeInGB != nil { return resources.Quantity(fmt.Sprintf("%dG", *info.InstanceStorageInfo.TotalSizeInGB)) } } if len(blockDeviceMappings) != 0 { // First check if there's a root volume configured in blockDeviceMappings. - if blockDeviceMapping, ok := lo.Find(blockDeviceMappings, func(bdm *v1beta1.BlockDeviceMapping) bool { + if blockDeviceMapping, ok := lo.Find(blockDeviceMappings, func(bdm *v1.BlockDeviceMapping) bool { return bdm.RootVolume }); ok && blockDeviceMapping.EBS.VolumeSize != nil { return blockDeviceMapping.EBS.VolumeSize @@ -247,7 +247,7 @@ func ephemeralStorage(info *ec2.InstanceTypeInfo, amiFamily amifamily.AMIFamily, return lo.Ternary(volumeSize != nil, volumeSize, amifamily.DefaultEBS.VolumeSize) default: // If a block device mapping exists in the provider for the root volume, use the volume size specified in the provider. If not, use the default - if blockDeviceMapping, ok := lo.Find(blockDeviceMappings, func(bdm *v1beta1.BlockDeviceMapping) bool { + if blockDeviceMapping, ok := lo.Find(blockDeviceMappings, func(bdm *v1.BlockDeviceMapping) bool { return *bdm.DeviceName == *amiFamily.EphemeralBlockDevice() }); ok && blockDeviceMapping.EBS.VolumeSize != nil { return blockDeviceMapping.EBS.VolumeSize @@ -255,7 +255,7 @@ func ephemeralStorage(info *ec2.InstanceTypeInfo, amiFamily amifamily.AMIFamily, } } //Return the ephemeralBlockDevice size if defined in ami - if ephemeralBlockDevice, ok := lo.Find(amiFamily.DefaultBlockDeviceMappings(), func(item *v1beta1.BlockDeviceMapping) bool { + if ephemeralBlockDevice, ok := lo.Find(amiFamily.DefaultBlockDeviceMappings(), func(item *v1.BlockDeviceMapping) bool { return *amiFamily.EphemeralBlockDevice() == *item.DeviceName }); ok { return ephemeralBlockDevice.EBS.VolumeSize @@ -360,19 +360,19 @@ func privateIPv4Address(instanceTypeName string) *resource.Quantity { return resources.Quantity(fmt.Sprint(limits.IPv4PerInterface - 1)) } -func systemReservedResources(systemReserved map[string]string) v1.ResourceList { - return lo.MapEntries(systemReserved, func(k string, v string) (v1.ResourceName, resource.Quantity) { - return v1.ResourceName(k), resource.MustParse(v) +func systemReservedResources(systemReserved map[string]string) corev1.ResourceList { + return lo.MapEntries(systemReserved, func(k string, v string) (corev1.ResourceName, resource.Quantity) { + return corev1.ResourceName(k), resource.MustParse(v) }) } -func kubeReservedResources(cpus, pods, eniLimitedPods *resource.Quantity, amiFamily amifamily.AMIFamily, kubeReserved map[string]string) v1.ResourceList { +func kubeReservedResources(cpus, pods, eniLimitedPods *resource.Quantity, amiFamily amifamily.AMIFamily, kubeReserved map[string]string) corev1.ResourceList { if amiFamily.FeatureFlags().UsesENILimitedMemoryOverhead { pods = eniLimitedPods } - resources := v1.ResourceList{ - v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dMi", (11*pods.Value())+255)), - v1.ResourceEphemeralStorage: resource.MustParse("1Gi"), // default kube-reserved ephemeral-storage + resources := corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse(fmt.Sprintf("%dMi", (11*pods.Value())+255)), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi"), // default kube-reserved ephemeral-storage } // kube-reserved Computed from // https://github.com/bottlerocket-os/bottlerocket/pull/1388/files#diff-bba9e4e3e46203be2b12f22e0d654ebd270f0b478dd34f40c31d7aa695620f2fR611 @@ -393,21 +393,21 @@ func kubeReservedResources(cpus, pods, eniLimitedPods *resource.Quantity, amiFam } cpuOverhead := resources.Cpu() cpuOverhead.Add(*resource.NewMilliQuantity(int64(r*cpuRange.percentage), resource.DecimalSI)) - resources[v1.ResourceCPU] = *cpuOverhead + resources[corev1.ResourceCPU] = *cpuOverhead } } - return lo.Assign(resources, lo.MapEntries(kubeReserved, func(k string, v string) (v1.ResourceName, resource.Quantity) { - return v1.ResourceName(k), resource.MustParse(v) + return lo.Assign(resources, lo.MapEntries(kubeReserved, func(k string, v string) (corev1.ResourceName, resource.Quantity) { + return corev1.ResourceName(k), resource.MustParse(v) })) } -func evictionThreshold(memory *resource.Quantity, storage *resource.Quantity, amiFamily amifamily.AMIFamily, evictionHard map[string]string, evictionSoft map[string]string) v1.ResourceList { - overhead := v1.ResourceList{ - v1.ResourceMemory: resource.MustParse("100Mi"), - v1.ResourceEphemeralStorage: resource.MustParse(fmt.Sprint(math.Ceil(float64(storage.Value()) / 100 * 10))), +func evictionThreshold(memory *resource.Quantity, storage *resource.Quantity, amiFamily amifamily.AMIFamily, evictionHard map[string]string, evictionSoft map[string]string) corev1.ResourceList { + overhead := corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceEphemeralStorage: resource.MustParse(fmt.Sprint(math.Ceil(float64(storage.Value()) / 100 * 10))), } - override := v1.ResourceList{} + override := corev1.ResourceList{} var evictionSignals []map[string]string if evictionHard != nil { evictionSignals = append(evictionSignals, evictionHard) @@ -417,12 +417,12 @@ func evictionThreshold(memory *resource.Quantity, storage *resource.Quantity, am } for _, m := range evictionSignals { - temp := v1.ResourceList{} + temp := corev1.ResourceList{} if v, ok := m[MemoryAvailable]; ok { - temp[v1.ResourceMemory] = computeEvictionSignal(*memory, v) + temp[corev1.ResourceMemory] = computeEvictionSignal(*memory, v) } if v, ok := m[NodeFSAvailable]; ok { - temp[v1.ResourceEphemeralStorage] = computeEvictionSignal(*storage, v) + temp[corev1.ResourceEphemeralStorage] = computeEvictionSignal(*storage, v) } override = resources.MaxResources(override, temp) } diff --git a/pkg/providers/launchtemplate/launchtemplate.go b/pkg/providers/launchtemplate/launchtemplate.go index 8a14daaf04a6..8007df20cb34 100644 --- a/pkg/providers/launchtemplate/launchtemplate.go +++ b/pkg/providers/launchtemplate/launchtemplate.go @@ -36,12 +36,12 @@ import ( "github.com/mitchellh/hashstructure/v2" "github.com/patrickmn/go-cache" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" awserrors "github.com/aws/karpenter-provider-aws/pkg/errors" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/providers/amifamily" @@ -54,9 +54,9 @@ import ( ) type Provider interface { - EnsureAll(context.Context, *v1beta1.EC2NodeClass, *corev1beta1.NodeClaim, + EnsureAll(context.Context, *v1.EC2NodeClass, *karpv1.NodeClaim, []*cloudprovider.InstanceType, string, map[string]string) ([]*LaunchTemplate, error) - DeleteAll(context.Context, *v1beta1.EC2NodeClass) error + DeleteAll(context.Context, *v1.EC2NodeClass) error InvalidateCache(context.Context, string, string) ResolveClusterCIDR(context.Context) error } @@ -110,13 +110,13 @@ func NewDefaultProvider(ctx context.Context, cache *cache.Cache, ec2api ec2iface return l } -func (p *DefaultProvider) EnsureAll(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, nodeClaim *corev1beta1.NodeClaim, +func (p *DefaultProvider) EnsureAll(ctx context.Context, nodeClass *v1.EC2NodeClass, nodeClaim *karpv1.NodeClaim, instanceTypes []*cloudprovider.InstanceType, capacityType string, tags map[string]string) ([]*LaunchTemplate, error) { p.Lock() defer p.Unlock() - options, err := p.createAMIOptions(ctx, nodeClass, lo.Assign(nodeClaim.Labels, map[string]string{corev1beta1.CapacityTypeLabelKey: capacityType}), tags) + options, err := p.createAMIOptions(ctx, nodeClass, lo.Assign(nodeClaim.Labels, map[string]string{karpv1.CapacityTypeLabelKey: capacityType}), tags) if err != nil { return nil, err } @@ -151,12 +151,12 @@ func LaunchTemplateName(options *amifamily.LaunchTemplate) string { return fmt.Sprintf("%s/%d", apis.Group, lo.Must(hashstructure.Hash(options, hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true}))) } -func (p *DefaultProvider) createAMIOptions(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, labels, tags map[string]string) (*amifamily.Options, error) { +func (p *DefaultProvider) createAMIOptions(ctx context.Context, nodeClass *v1.EC2NodeClass, labels, tags map[string]string) (*amifamily.Options, error) { // Remove any labels passed into userData that are prefixed with "node-restriction.kubernetes.io" or "kops.k8s.io" since the kubelet can't // register the node with any labels from this domain: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction for k := range labels { - labelDomain := corev1beta1.GetLabelDomain(k) - if strings.HasSuffix(labelDomain, v1.LabelNamespaceNodeRestriction) || strings.HasSuffix(labelDomain, "kops.k8s.io") { + labelDomain := karpv1.GetLabelDomain(k) + if strings.HasSuffix(labelDomain, corev1.LabelNamespaceNodeRestriction) || strings.HasSuffix(labelDomain, "kops.k8s.io") { delete(labels, k) } } @@ -232,7 +232,7 @@ func (p *DefaultProvider) createLaunchTemplate(ctx context.Context, options *ami {ResourceType: aws.String(ec2.ResourceTypeNetworkInterface), Tags: utils.MergeTags(options.Tags)}, } // Add the spot-instances-request tag if trying to launch spot capacity - if options.CapacityType == corev1beta1.CapacityTypeSpot { + if options.CapacityType == karpv1.CapacityTypeSpot { launchTemplateDataTags = append(launchTemplateDataTags, &ec2.LaunchTemplateTagSpecificationRequest{ResourceType: aws.String(ec2.ResourceTypeSpotInstancesRequest), Tags: utils.MergeTags(options.Tags)}) } networkInterfaces := p.generateNetworkInterfaces(options) @@ -247,7 +247,7 @@ func (p *DefaultProvider) createLaunchTemplate(ctx context.Context, options *ami Enabled: aws.Bool(options.DetailedMonitoring), }, // If the network interface is defined, the security groups are defined within it - SecurityGroupIds: lo.Ternary(networkInterfaces != nil, nil, lo.Map(options.SecurityGroups, func(s v1beta1.SecurityGroup, _ int) *string { return aws.String(s.ID) })), + SecurityGroupIds: lo.Ternary(networkInterfaces != nil, nil, lo.Map(options.SecurityGroups, func(s v1.SecurityGroup, _ int) *string { return aws.String(s.ID) })), UserData: aws.String(userData), ImageId: aws.String(options.AMIID), MetadataOptions: &ec2.LaunchTemplateInstanceMetadataOptionsRequest{ @@ -262,7 +262,7 @@ func (p *DefaultProvider) createLaunchTemplate(ctx context.Context, options *ami TagSpecifications: []*ec2.TagSpecification{ { ResourceType: aws.String(ec2.ResourceTypeLaunchTemplate), - Tags: utils.MergeTags(options.Tags, map[string]string{v1beta1.TagManagedLaunchTemplate: options.ClusterName, v1beta1.LabelNodeClass: options.NodeClassName}), + Tags: utils.MergeTags(options.Tags, map[string]string{v1.TagManagedLaunchTemplate: options.ClusterName, v1.LabelNodeClass: options.NodeClassName}), }, }, }) @@ -282,7 +282,7 @@ func (p *DefaultProvider) generateNetworkInterfaces(options *amifamily.LaunchTem // Some networking magic to ensure that one network card has higher priority than all the others (important if an instance needs a public IP w/o adding an EIP to every network card) DeviceIndex: lo.ToPtr(lo.Ternary[int64](i == 0, 0, 1)), InterfaceType: lo.ToPtr(ec2.NetworkInterfaceTypeEfa), - Groups: lo.Map(options.SecurityGroups, func(s v1beta1.SecurityGroup, _ int) *string { return aws.String(s.ID) }), + Groups: lo.Map(options.SecurityGroups, func(s v1.SecurityGroup, _ int) *string { return aws.String(s.ID) }), // Instances launched with multiple pre-configured network interfaces cannot set AssociatePublicIPAddress to true. This is an EC2 limitation. However, this does not apply for instances // with a single EFA network interface, and we should support those use cases. Launch failures with multiple enis should be considered user misconfiguration. AssociatePublicIpAddress: options.AssociatePublicIPAddress, @@ -295,14 +295,14 @@ func (p *DefaultProvider) generateNetworkInterfaces(options *amifamily.LaunchTem { AssociatePublicIpAddress: options.AssociatePublicIPAddress, DeviceIndex: aws.Int64(0), - Groups: lo.Map(options.SecurityGroups, func(s v1beta1.SecurityGroup, _ int) *string { return aws.String(s.ID) }), + Groups: lo.Map(options.SecurityGroups, func(s v1.SecurityGroup, _ int) *string { return aws.String(s.ID) }), }, } } return nil } -func (p *DefaultProvider) blockDeviceMappings(blockDeviceMappings []*v1beta1.BlockDeviceMapping) []*ec2.LaunchTemplateBlockDeviceMappingRequest { +func (p *DefaultProvider) blockDeviceMappings(blockDeviceMappings []*v1.BlockDeviceMapping) []*ec2.LaunchTemplateBlockDeviceMappingRequest { if len(blockDeviceMappings) == 0 { // The EC2 API fails with empty slices and expects nil. return nil @@ -339,9 +339,9 @@ func (p *DefaultProvider) volumeSize(quantity *resource.Quantity) *int64 { // Any error during hydration will result in a panic func (p *DefaultProvider) hydrateCache(ctx context.Context) { clusterName := options.FromContext(ctx).ClusterName - ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("tag-key", v1beta1.TagManagedLaunchTemplate, "tag-value", clusterName)) + ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("tag-key", v1.TagManagedLaunchTemplate, "tag-value", clusterName)) if err := p.ec2api.DescribeLaunchTemplatesPagesWithContext(ctx, &ec2.DescribeLaunchTemplatesInput{ - Filters: []*ec2.Filter{{Name: aws.String(fmt.Sprintf("tag:%s", v1beta1.TagManagedLaunchTemplate)), Values: []*string{aws.String(clusterName)}}}, + Filters: []*ec2.Filter{{Name: aws.String(fmt.Sprintf("tag:%s", v1.TagManagedLaunchTemplate)), Values: []*string{aws.String(clusterName)}}}, }, func(output *ec2.DescribeLaunchTemplatesOutput, _ bool) bool { for _, lt := range output.LaunchTemplates { p.cache.SetDefault(*lt.LaunchTemplateName, lt) @@ -373,7 +373,7 @@ func (p *DefaultProvider) cachedEvictedFunc(ctx context.Context) func(string, in } } -func (p *DefaultProvider) getInstanceProfile(nodeClass *v1beta1.EC2NodeClass) (string, error) { +func (p *DefaultProvider) getInstanceProfile(nodeClass *v1.EC2NodeClass) (string, error) { if nodeClass.Spec.InstanceProfile != nil { return aws.StringValue(nodeClass.Spec.InstanceProfile), nil } @@ -386,13 +386,13 @@ func (p *DefaultProvider) getInstanceProfile(nodeClass *v1beta1.EC2NodeClass) (s return "", errors.New("neither spec.instanceProfile or spec.role is specified") } -func (p *DefaultProvider) DeleteAll(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) error { +func (p *DefaultProvider) DeleteAll(ctx context.Context, nodeClass *v1.EC2NodeClass) error { clusterName := options.FromContext(ctx).ClusterName var ltNames []*string if err := p.ec2api.DescribeLaunchTemplatesPagesWithContext(ctx, &ec2.DescribeLaunchTemplatesInput{ Filters: []*ec2.Filter{ - {Name: aws.String(fmt.Sprintf("tag:%s", v1beta1.TagManagedLaunchTemplate)), Values: []*string{aws.String(clusterName)}}, - {Name: aws.String(fmt.Sprintf("tag:%s", v1beta1.LabelNodeClass)), Values: []*string{aws.String(nodeClass.Name)}}, + {Name: aws.String(fmt.Sprintf("tag:%s", v1.TagManagedLaunchTemplate)), Values: []*string{aws.String(clusterName)}}, + {Name: aws.String(fmt.Sprintf("tag:%s", v1.LabelNodeClass)), Values: []*string{aws.String(nodeClass.Name)}}, }, }, func(output *ec2.DescribeLaunchTemplatesOutput, _ bool) bool { for _, lt := range output.LaunchTemplates { diff --git a/pkg/providers/launchtemplate/suite_test.go b/pkg/providers/launchtemplate/suite_test.go index aac85d53a014..cf724f665b69 100644 --- a/pkg/providers/launchtemplate/suite_test.go +++ b/pkg/providers/launchtemplate/suite_test.go @@ -37,7 +37,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -47,7 +47,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" corecloudprovider "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/controllers/provisioning" "sigs.k8s.io/karpenter/pkg/controllers/state" @@ -59,7 +59,7 @@ import ( . "sigs.k8s.io/karpenter/pkg/utils/testing" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/cloudprovider" "github.com/aws/karpenter-provider-aws/pkg/controllers/nodeclass/status" "github.com/aws/karpenter-provider-aws/pkg/fake" @@ -122,14 +122,14 @@ var _ = AfterEach(func() { }) var _ = Describe("LaunchTemplate Provider", func() { - var nodePool *corev1beta1.NodePool - var nodeClass *v1beta1.EC2NodeClass + var nodePool *karpv1.NodePool + var nodeClass *v1.EC2NodeClass BeforeEach(func() { nodeClass = test.EC2NodeClass( - v1beta1.EC2NodeClass{ - Status: v1beta1.EC2NodeClassStatus{ + v1.EC2NodeClass{ + Status: v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", - SecurityGroups: []v1beta1.SecurityGroup{ + SecurityGroups: []v1.SecurityGroup{ { ID: "sg-test1", }, @@ -140,7 +140,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ID: "sg-test3", }, }, - Subnets: []v1beta1.Subnet{ + Subnets: []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -158,28 +158,27 @@ var _ = Describe("LaunchTemplate Provider", func() { }, ) nodeClass.StatusConditions().SetTrue(opstatus.ConditionReady) - nodePool = coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - ObjectMeta: corev1beta1.ObjectMeta{ + nodePool = coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + ObjectMeta: karpv1.ObjectMeta{ // TODO @joinnis: Move this into the coretest.NodePool function Labels: map[string]string{coretest.DiscoveryLabel: "unspecified"}, }, - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - Kubelet: &corev1beta1.KubeletConfiguration{}, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, @@ -191,8 +190,8 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(awsEnv.InstanceTypesProvider.UpdateInstanceTypeOfferings(ctx)).To(Succeed()) }) It("should create unique launch templates for multiple identical nodeClasses", func() { - nodeClass2 := test.EC2NodeClass(v1beta1.EC2NodeClass{ - Status: v1beta1.EC2NodeClassStatus{ + nodeClass2 := test.EC2NodeClass(v1.EC2NodeClass{ + Status: v1.EC2NodeClassStatus{ InstanceProfile: "test-profile", Subnets: nodeClass.Status.Subnets, SecurityGroups: nodeClass.Status.SecurityGroups, @@ -201,29 +200,29 @@ var _ = Describe("LaunchTemplate Provider", func() { }) _, err := awsEnv.SubnetProvider.List(ctx, nodeClass2) // Hydrate the subnet cache Expect(err).To(BeNil()) - nodePool2 := coretest.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool2 := coretest.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass2).GroupVersion().String(), - Kind: object.GVK(nodeClass2).Kind, - Name: nodeClass2.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass2).Group, + Kind: object.GVK(nodeClass2).Kind, + Name: nodeClass2.Name, }, }, }, }, }) - nodeClass2.Status.SecurityGroups = []v1beta1.SecurityGroup{ + nodeClass2.Status.SecurityGroups = []v1.SecurityGroup{ { ID: "sg-test1", }, @@ -234,7 +233,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ID: "sg-test3", }, } - nodeClass2.Status.Subnets = []v1beta1.Subnet{ + nodeClass2.Status.Subnets = []v1.Subnet{ { ID: "subnet-test1", Zone: "test-zone-1a", @@ -250,20 +249,20 @@ var _ = Describe("LaunchTemplate Provider", func() { } nodeClass2.StatusConditions().SetTrue(opstatus.ConditionReady) - pods := []*v1.Pod{ - coretest.UnschedulablePod(coretest.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ + pods := []*corev1.Pod{ + coretest.UnschedulablePod(coretest.PodOptions{NodeRequirements: []corev1.NodeSelectorRequirement{ { - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }, }, }), - coretest.UnschedulablePod(coretest.PodOptions{NodeRequirements: []v1.NodeSelectorRequirement{ + coretest.UnschedulablePod(coretest.PodOptions{NodeRequirements: []corev1.NodeSelectorRequirement{ { - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }), @@ -275,7 +274,7 @@ var _ = Describe("LaunchTemplate Provider", func() { nodeClasses := [2]string{nodeClass.Name, nodeClass2.Name} awsEnv.EC2API.CalledWithCreateLaunchTemplateInput.ForEach(func(ltInput *ec2.CreateLaunchTemplateInput) { for _, value := range ltInput.LaunchTemplateData.TagSpecifications[0].Tags { - if *value.Key == v1beta1.LabelNodeClass { + if *value.Key == v1.LabelNodeClass { Expect(*value.Value).To(BeElementOf(nodeClasses)) } } @@ -321,19 +320,19 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Cache", func() { It("should use same launch template for equivalent constraints", func() { - t1 := v1.Toleration{ + t1 := corev1.Toleration{ Key: "Abacus", Operator: "Equal", Value: "Zebra", Effect: "NoSchedule", } - t2 := v1.Toleration{ + t2 := corev1.Toleration{ Key: "Zebra", Operator: "Equal", Value: "Abacus", Effect: "NoSchedule", } - t3 := v1.Toleration{ + t3 := corev1.Toleration{ Key: "Boar", Operator: "Equal", Value: "Abacus", @@ -341,17 +340,17 @@ var _ = Describe("LaunchTemplate Provider", func() { } // constrain the packer to a single launch template type - rr := v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("24"), - v1beta1.ResourceNVIDIAGPU: resource.MustParse("1"), + rr := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("24"), + v1.ResourceNVIDIAGPU: resource.MustParse("1"), }, - Limits: v1.ResourceList{v1beta1.ResourceNVIDIAGPU: resource.MustParse("1")}, + Limits: corev1.ResourceList{v1.ResourceNVIDIAGPU: resource.MustParse("1")}, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod1 := coretest.UnschedulablePod(coretest.PodOptions{ - Tolerations: []v1.Toleration{t1, t2, t3}, + Tolerations: []corev1.Toleration{t1, t2, t3}, ResourceRequirements: rr, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod1) @@ -364,7 +363,7 @@ var _ = Describe("LaunchTemplate Provider", func() { } pod2 := coretest.UnschedulablePod(coretest.PodOptions{ - Tolerations: []v1.Toleration{t2, t3, t1}, + Tolerations: []corev1.Toleration{t2, t3, t1}, ResourceRequirements: rr, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod2) @@ -379,7 +378,7 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(lts1.Equal(lts2)).To(BeTrue()) }) It("should recover from an out-of-sync launch template cache", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: aws.Int32(1)} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: aws.Int32(1)} ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -409,8 +408,8 @@ var _ = Describe("LaunchTemplate Provider", func() { {ClusterEndpoint: "test-endpoint"}, {ClusterCIDR: lo.ToPtr("test-cidr")}, {InstanceProfile: "test-profile"}, - {InstanceStorePolicy: lo.ToPtr(v1beta1.InstanceStorePolicyRAID0)}, - {SecurityGroups: []v1beta1.SecurityGroup{{Name: "test-sg"}}}, + {InstanceStorePolicy: lo.ToPtr(v1.InstanceStorePolicyRAID0)}, + {SecurityGroups: []v1.SecurityGroup{{Name: "test-sg"}}}, {Tags: map[string]string{"test-key": "test-value"}}, {KubeDNSIP: net.ParseIP("192.0.0.2")}, {AssociatePublicIPAddress: lo.ToPtr(true)}, @@ -439,10 +438,10 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(lo.Uniq(launchtemplateResult)[0]).To(Equal(launchtemplate.LaunchTemplateName(&amifamily.LaunchTemplate{Options: &amifamily.Options{}}))) }) It("should generate different launch template names based on kubelet configuration", func() { - kubeletChanges := []*corev1beta1.KubeletConfiguration{ + kubeletChanges := []*v1.KubeletConfiguration{ {}, - {KubeReserved: map[string]string{string(v1.ResourceCPU): "20"}}, - {SystemReserved: map[string]string{string(v1.ResourceMemory): "10Gi"}}, + {KubeReserved: map[string]string{string(corev1.ResourceCPU): "20"}}, + {SystemReserved: map[string]string{string(corev1.ResourceMemory): "10Gi"}}, {EvictionHard: map[string]string{"memory.available": "52%"}}, {EvictionSoft: map[string]string{"nodefs.available": "132%"}}, {MaxPods: aws.Int32(20)}, @@ -461,7 +460,7 @@ var _ = Describe("LaunchTemplate Provider", func() { {ClusterName: "test-name"}, {ClusterEndpoint: "test-endpoint"}, {ClusterCIDR: lo.ToPtr("test-cidr")}, - {Taints: []v1.Taint{{Key: "test-key", Value: "test-value"}}}, + {Taints: []corev1.Taint{{Key: "test-key", Value: "test-value"}}}, {Labels: map[string]string{"test-key": "test-value"}}, {CABundle: lo.ToPtr("test-bundle")}, {AWSENILimitedPodDensity: true}, @@ -479,7 +478,7 @@ var _ = Describe("LaunchTemplate Provider", func() { It("should generate different launch template names based on launchtemplate option configuration", func() { launchtemplates := []*amifamily.LaunchTemplate{ {}, - {BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{{DeviceName: lo.ToPtr("test-block")}}}, + {BlockDeviceMappings: []*v1.BlockDeviceMapping{{DeviceName: lo.ToPtr("test-block")}}}, {AMIID: "test-ami"}, {DetailedMonitoring: true}, {EFACount: 12}, @@ -511,9 +510,9 @@ var _ = Describe("LaunchTemplate Provider", func() { pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels).To(HaveKey(v1.LabelOSStable)) - Expect(node.Labels).To(HaveKey(v1.LabelArchStable)) - Expect(node.Labels).To(HaveKey(v1.LabelInstanceTypeStable)) + Expect(node.Labels).To(HaveKey(corev1.LabelOSStable)) + Expect(node.Labels).To(HaveKey(corev1.LabelArchStable)) + Expect(node.Labels).To(HaveKey(corev1.LabelInstanceTypeStable)) }) }) Context("Tags", func() { @@ -545,12 +544,12 @@ var _ = Describe("LaunchTemplate Provider", func() { "tag1": "tag1value", "tag2": "tag2value", } - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }, }, } @@ -595,7 +594,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Block Device Mappings", func() { It("should default AL2 block device mappings", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -609,7 +608,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should default AL2023 block device mappings", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 awsEnv.LaunchTemplateProvider.CABundle = lo.ToPtr("Y2EtYnVuZGxlCg==") awsEnv.LaunchTemplateProvider.ClusterCIDR.Store(lo.ToPtr("10.100.0.0/16")) ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -625,11 +624,11 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should use custom block device mapping", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ DeleteOnTermination: aws.Bool(true), Encrypted: aws.Bool(true), VolumeType: aws.String("io2"), @@ -640,7 +639,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }, { DeviceName: aws.String("/dev/xvdb"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ DeleteOnTermination: aws.Bool(true), Encrypted: aws.Bool(true), VolumeType: aws.String("io2"), @@ -675,11 +674,11 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should round up for custom block device mappings when specified in gigabytes", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ DeleteOnTermination: aws.Bool(true), Encrypted: aws.Bool(true), VolumeType: aws.String("io2"), @@ -690,7 +689,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }, { DeviceName: aws.String("/dev/xvdb"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ DeleteOnTermination: aws.Bool(true), Encrypted: aws.Bool(true), VolumeType: aws.String("io2"), @@ -712,7 +711,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should default bottlerocket second volume with root volume size", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -731,8 +730,8 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should not default block device mappings for custom AMIFamilies", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -743,12 +742,12 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should use custom block device mapping for custom AMIFamilies", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ DeleteOnTermination: aws.Bool(true), Encrypted: aws.Bool(true), VolumeType: aws.String("io2"), @@ -778,10 +777,10 @@ var _ = Describe("LaunchTemplate Provider", func() { It("should pack pods when a daemonset has an ephemeral-storage request", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass, coretest.DaemonSet( coretest.DaemonSetOptions{PodOptions: coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), - v1.ResourceMemory: resource.MustParse("1Gi"), - v1.ResourceEphemeralStorage: resource.MustParse("1Gi")}}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("1Gi")}}, }}, )) pod := coretest.UnschedulablePod() @@ -790,57 +789,57 @@ var _ = Describe("LaunchTemplate Provider", func() { }) It("should pack pods with any ephemeral-storage request", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("1G"), + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("1G"), }}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should pack pods with large ephemeral-storage request", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), }}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) }) It("should not pack pods if the sum of pod ephemeral-storage and overhead exceeds node capacity", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("19Gi"), + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("19Gi"), }}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectNotScheduled(ctx, env.Client, pod) }) It("should pack pods if the pod's ephemeral-storage exceeds node capacity and instance storage is mounted", func() { - nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1beta1.InstanceStorePolicyRAID0) + nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1.InstanceStorePolicyRAID0) ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ // Default node ephemeral-storage capacity is 20Gi - v1.ResourceEphemeralStorage: resource.MustParse("5000Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("5000Gi"), }}}) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) - Expect(node.Labels[v1.LabelInstanceTypeStable]).To(Equal("m6idn.32xlarge")) + Expect(node.Labels[corev1.LabelInstanceTypeStable]).To(Equal("m6idn.32xlarge")) Expect(*node.Status.Capacity.StorageEphemeral()).To(Equal(resource.MustParse("7600G"))) }) It("should launch multiple nodes if sum of pod ephemeral-storage requests exceeds a single nodes capacity", func() { - var nodes []*v1.Node + var nodes []*corev1.Node ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ - coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + pods := []*corev1.Pod{ + coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), }, }, }), - coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), }, }, }), @@ -853,16 +852,16 @@ var _ = Describe("LaunchTemplate Provider", func() { }) It("should only pack pods with ephemeral-storage requests that will fit on an available node", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pods := []*v1.Pod{ - coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("10Gi"), + pods := []*corev1.Pod{ + coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("10Gi"), }, }, }), - coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("150Gi"), + coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("150Gi"), }, }, }), @@ -873,9 +872,9 @@ var _ = Describe("LaunchTemplate Provider", func() { }) It("should not pack pod if no available instance types have enough storage", func() { ExpectApplied(ctx, env.Client, nodePool) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("150Gi"), + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("150Gi"), }, }, }) @@ -883,24 +882,24 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectNotScheduled(ctx, env.Client, pod) }) It("should pack pods using the blockdevicemappings from the provider spec when defined", func() { - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(50, resource.Giga), }, }, { DeviceName: aws.String("/dev/xvdb"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(20, resource.Giga), }, }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ - v1.ResourceEphemeralStorage: resource.MustParse("25Gi"), + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceEphemeralStorage: resource.MustParse("25Gi"), }, }, }) @@ -910,27 +909,27 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectScheduled(ctx, env.Client, pod) }) It("should pack pods using blockdevicemappings for Custom AMIFamily", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(20, resource.Giga), }, }, { DeviceName: aws.String("/dev/xvdb"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(40, resource.Giga), }, }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ // this pod can only be satisfied if `/dev/xvdb` will house all the pods. - v1.ResourceEphemeralStorage: resource.MustParse("25Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("25Gi"), }, }, }) @@ -940,34 +939,34 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectScheduled(ctx, env.Client, pod) }) It("should pack pods using the configured root volume in blockdevicemappings", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(20, resource.Giga), }, }, { DeviceName: aws.String("/dev/xvdb"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(40, resource.Giga), }, RootVolume: true, }, { DeviceName: aws.String("/dev/xvdc"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resource.NewScaledQuantity(20, resource.Giga), }, }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) - pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: v1.ResourceRequirements{ - Requests: map[v1.ResourceName]resource.Quantity{ + pod := coretest.UnschedulablePod(coretest.PodOptions{ResourceRequirements: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ // this pod can only be satisfied if `/dev/xvdb` will house all the pods. - v1.ResourceEphemeralStorage: resource.MustParse("25Gi"), + corev1.ResourceEphemeralStorage: resource.MustParse("25Gi"), }, }, }) @@ -1009,19 +1008,20 @@ var _ = Describe("LaunchTemplate Provider", func() { VMMemoryOverheadPercent: lo.ToPtr[float64](0), })) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, info, "", nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1062,19 +1062,20 @@ var _ = Describe("LaunchTemplate Provider", func() { VMMemoryOverheadPercent: lo.ToPtr[float64](0), })) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{} amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, info, "", nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1087,20 +1088,20 @@ var _ = Describe("LaunchTemplate Provider", func() { VMMemoryOverheadPercent: lo.ToPtr[float64](0), })) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} amiFamily := amifamily.GetAMIFamily(nodeClass.Spec.AMIFamily, &amifamily.Options{}) it := instancetype.NewInstanceType(ctx, info, "", nodeClass.Spec.BlockDeviceMappings, nodeClass.Spec.InstanceStorePolicy, - nodePool.Spec.Template.Spec.Kubelet.MaxPods, - nodePool.Spec.Template.Spec.Kubelet.PodsPerCore, - nodePool.Spec.Template.Spec.Kubelet.KubeReserved, - nodePool.Spec.Template.Spec.Kubelet.SystemReserved, - nodePool.Spec.Template.Spec.Kubelet.EvictionHard, - nodePool.Spec.Template.Spec.Kubelet.EvictionSoft, + nodeClass.Spec.Kubelet.MaxPods, + nodeClass.Spec.Kubelet.PodsPerCore, + nodeClass.Spec.Kubelet.KubeReserved, + nodeClass.Spec.Kubelet.SystemReserved, + nodeClass.Spec.Kubelet.EvictionHard, + nodeClass.Spec.Kubelet.EvictionSoft, amiFamily, nil, ) @@ -1117,7 +1118,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining("--use-max-pods false") }) It("should specify --use-max-pods=false and --max-pods user value when user specifies maxPods in NodePool", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: aws.Int32(10)} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: aws.Int32(10)} ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -1125,11 +1126,11 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining("--use-max-pods false", "--max-pods=10") }) It("should specify --system-reserved when overriding system reserved values", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceCPU): "500m", - string(v1.ResourceMemory): "1Gi", - string(v1.ResourceEphemeralStorage): "2Gi", + string(corev1.ResourceCPU): "500m", + string(corev1.ResourceMemory): "1Gi", + string(corev1.ResourceEphemeralStorage): "2Gi", }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1146,17 +1147,17 @@ var _ = Describe("LaunchTemplate Provider", func() { i := strings.Index(string(userData), arg) rem := string(userData)[(i + len(arg)):] i = strings.Index(rem, "'") - for k, v := range nodePool.Spec.Template.Spec.Kubelet.SystemReserved { + for k, v := range nodeClass.Spec.Kubelet.SystemReserved { Expect(rem[:i]).To(ContainSubstring(fmt.Sprintf("%v=%v", k, v))) } }) }) It("should specify --kube-reserved when overriding system reserved values", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ KubeReserved: map[string]string{ - string(v1.ResourceCPU): "500m", - string(v1.ResourceMemory): "1Gi", - string(v1.ResourceEphemeralStorage): "2Gi", + string(corev1.ResourceCPU): "500m", + string(corev1.ResourceMemory): "1Gi", + string(corev1.ResourceEphemeralStorage): "2Gi", }, } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1173,13 +1174,13 @@ var _ = Describe("LaunchTemplate Provider", func() { i := strings.Index(string(userData), arg) rem := string(userData)[(i + len(arg)):] i = strings.Index(rem, "'") - for k, v := range nodePool.Spec.Template.Spec.Kubelet.KubeReserved { + for k, v := range nodeClass.Spec.Kubelet.KubeReserved { Expect(rem[:i]).To(ContainSubstring(fmt.Sprintf("%v=%v", k, v))) } }) }) It("should pass eviction hard threshold values when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ EvictionHard: map[string]string{ "memory.available": "10%", "nodefs.available": "15%", @@ -1200,13 +1201,13 @@ var _ = Describe("LaunchTemplate Provider", func() { i := strings.Index(string(userData), arg) rem := string(userData)[(i + len(arg)):] i = strings.Index(rem, "'") - for k, v := range nodePool.Spec.Template.Spec.Kubelet.EvictionHard { + for k, v := range nodeClass.Spec.Kubelet.EvictionHard { Expect(rem[:i]).To(ContainSubstring(fmt.Sprintf("%v<%v", k, v))) } }) }) It("should pass eviction soft threshold values when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ EvictionSoft: map[string]string{ "memory.available": "10%", "nodefs.available": "15%", @@ -1232,13 +1233,13 @@ var _ = Describe("LaunchTemplate Provider", func() { i := strings.Index(string(userData), arg) rem := string(userData)[(i + len(arg)):] i = strings.Index(rem, "'") - for k, v := range nodePool.Spec.Template.Spec.Kubelet.EvictionSoft { + for k, v := range nodeClass.Spec.Kubelet.EvictionSoft { Expect(rem[:i]).To(ContainSubstring(fmt.Sprintf("%v<%v", k, v))) } }) }) It("should pass eviction soft grace period values when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ EvictionSoftGracePeriod: map[string]metav1.Duration{ "memory.available": {Duration: time.Minute}, "nodefs.available": {Duration: time.Second * 180}, @@ -1264,13 +1265,13 @@ var _ = Describe("LaunchTemplate Provider", func() { i := strings.Index(string(userData), arg) rem := string(userData)[(i + len(arg)):] i = strings.Index(rem, "'") - for k, v := range nodePool.Spec.Template.Spec.Kubelet.EvictionSoftGracePeriod { + for k, v := range nodeClass.Spec.Kubelet.EvictionSoftGracePeriod { Expect(rem[:i]).To(ContainSubstring(fmt.Sprintf("%v=%v", k, v.Duration.String()))) } }) }) It("should pass eviction max pod grace period when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ EvictionMaxPodGracePeriod: aws.Int32(300), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1280,7 +1281,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining(fmt.Sprintf("--eviction-max-pod-grace-period=%d", 300)) }) It("should specify --pods-per-core", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: aws.Int32(2), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1290,7 +1291,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining(fmt.Sprintf("--pods-per-core=%d", 2)) }) It("should specify --pods-per-core with --max-pods enabled", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: aws.Int32(2), MaxPods: aws.Int32(100), } @@ -1317,7 +1318,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining("--dns-cluster-ip '10.0.100.10'") }) It("should pass ImageGCHighThresholdPercent when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ ImageGCHighThresholdPercent: aws.Int32(50), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1327,7 +1328,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining("--image-gc-high-threshold=50") }) It("should pass ImageGCLowThresholdPercent when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ ImageGCLowThresholdPercent: aws.Int32(50), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1337,7 +1338,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserDataContaining("--image-gc-low-threshold=50") }) It("should pass --cpu-fs-quota when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ CPUCFSQuota: aws.Bool(false), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1348,19 +1349,19 @@ var _ = Describe("LaunchTemplate Provider", func() { }) It("should not pass any labels prefixed with the node-restriction.kubernetes.io domain", func() { nodePool.Spec.Template.Labels = lo.Assign(nodePool.Spec.Template.Labels, map[string]string{ - v1.LabelNamespaceNodeRestriction + "/team": "team-1", - v1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", - "subdomain." + v1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", + corev1.LabelNamespaceNodeRestriction + "/team": "team-1", + corev1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", + "subdomain." + corev1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", }) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) - ExpectLaunchTemplatesCreatedWithUserDataNotContaining(v1.LabelNamespaceNodeRestriction) + ExpectLaunchTemplatesCreatedWithUserDataNotContaining(corev1.LabelNamespaceNodeRestriction) }) It("should specify --local-disks raid0 when instance-store policy is set on AL2", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 - nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1beta1.InstanceStorePolicyRAID0) + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 + nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1.InstanceStorePolicyRAID0) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -1369,44 +1370,44 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Bottlerocket", func() { BeforeEach(func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} }) It("should merge in custom user data", func() { content, err := os.ReadFile("testdata/br_userdata_input.golden") Expect(err).To(BeNil()) - nodeClass.Spec.UserData = aws.String(fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey)) - nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectNoExecute}} - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "baz", Value: "bin", Effect: v1.TaintEffectNoExecute}} + nodeClass.Spec.UserData = aws.String(fmt.Sprintf(string(content), karpv1.NodePoolLabelKey)) + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{{Key: "foo", Value: "bar", Effect: corev1.TaintEffectNoExecute}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "baz", Value: "bin", Effect: corev1.TaintEffectNoExecute}} ExpectApplied(ctx, env.Client, nodeClass, nodePool) pod := coretest.UnschedulablePod(coretest.PodOptions{ - Tolerations: []v1.Toleration{{Operator: v1.TolerationOpExists}}, + Tolerations: []corev1.Toleration{{Operator: corev1.TolerationOpExists}}, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) content, err = os.ReadFile("testdata/br_userdata_merged.golden") Expect(err).To(BeNil()) - ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name)) + ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name)) }) It("should bootstrap when custom user data is empty", func() { - nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "foo", Value: "bar", Effect: v1.TaintEffectNoExecute}} - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "baz", Value: "bin", Effect: v1.TaintEffectNoExecute}} + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{{Key: "foo", Value: "bar", Effect: corev1.TaintEffectNoExecute}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "baz", Value: "bin", Effect: corev1.TaintEffectNoExecute}} ExpectApplied(ctx, env.Client, nodeClass, nodePool) Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(nodePool), nodePool)).To(Succeed()) pod := coretest.UnschedulablePod(coretest.PodOptions{ - Tolerations: []v1.Toleration{{Operator: v1.TolerationOpExists}}, + Tolerations: []corev1.Toleration{{Operator: corev1.TolerationOpExists}}, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) content, err := os.ReadFile("testdata/br_userdata_unmerged.golden") Expect(err).To(BeNil()) - ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name)) + ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name)) }) It("should not bootstrap when provider ref points to a non-existent EC2NodeClass resource", func() { - nodePool.Spec.Template.Spec.NodeClassRef = &corev1beta1.NodeClassReference{ - APIVersion: "doesnotexist", - Kind: "doesnotexist", - Name: "doesnotexist", + nodePool.Spec.Template.Spec.NodeClassRef = &karpv1.NodeClassReference{ + Group: "doesnotexist", + Kind: "doesnotexist", + Name: "doesnotexist", } ExpectApplied(ctx, env.Client, nodePool) pod := coretest.UnschedulablePod() @@ -1423,15 +1424,14 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectNotScheduled(ctx, env.Client, pod) }) It("should override system reserved values in user data", func() { - ExpectApplied(ctx, env.Client, nodeClass) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceCPU): "2", - string(v1.ResourceMemory): "3Gi", - string(v1.ResourceEphemeralStorage): "10Gi", + string(corev1.ResourceCPU): "2", + string(corev1.ResourceMemory): "3Gi", + string(corev1.ResourceEphemeralStorage): "10Gi", }, } - ExpectApplied(ctx, env.Client, nodePool) + ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) @@ -1442,21 +1442,20 @@ var _ = Describe("LaunchTemplate Provider", func() { config := &bootstrap.BottlerocketConfig{} Expect(config.UnmarshalTOML(userData)).To(Succeed()) Expect(len(config.Settings.Kubernetes.SystemReserved)).To(Equal(3)) - Expect(config.Settings.Kubernetes.SystemReserved[v1.ResourceCPU.String()]).To(Equal("2")) - Expect(config.Settings.Kubernetes.SystemReserved[v1.ResourceMemory.String()]).To(Equal("3Gi")) - Expect(config.Settings.Kubernetes.SystemReserved[v1.ResourceEphemeralStorage.String()]).To(Equal("10Gi")) + Expect(config.Settings.Kubernetes.SystemReserved[corev1.ResourceCPU.String()]).To(Equal("2")) + Expect(config.Settings.Kubernetes.SystemReserved[corev1.ResourceMemory.String()]).To(Equal("3Gi")) + Expect(config.Settings.Kubernetes.SystemReserved[corev1.ResourceEphemeralStorage.String()]).To(Equal("10Gi")) }) }) It("should override kube reserved values in user data", func() { - ExpectApplied(ctx, env.Client, nodeClass) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ KubeReserved: map[string]string{ - string(v1.ResourceCPU): "2", - string(v1.ResourceMemory): "3Gi", - string(v1.ResourceEphemeralStorage): "10Gi", + string(corev1.ResourceCPU): "2", + string(corev1.ResourceMemory): "3Gi", + string(corev1.ResourceEphemeralStorage): "10Gi", }, } - ExpectApplied(ctx, env.Client, nodePool) + ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) @@ -1467,21 +1466,20 @@ var _ = Describe("LaunchTemplate Provider", func() { config := &bootstrap.BottlerocketConfig{} Expect(config.UnmarshalTOML(userData)).To(Succeed()) Expect(len(config.Settings.Kubernetes.KubeReserved)).To(Equal(3)) - Expect(config.Settings.Kubernetes.KubeReserved[v1.ResourceCPU.String()]).To(Equal("2")) - Expect(config.Settings.Kubernetes.KubeReserved[v1.ResourceMemory.String()]).To(Equal("3Gi")) - Expect(config.Settings.Kubernetes.KubeReserved[v1.ResourceEphemeralStorage.String()]).To(Equal("10Gi")) + Expect(config.Settings.Kubernetes.KubeReserved[corev1.ResourceCPU.String()]).To(Equal("2")) + Expect(config.Settings.Kubernetes.KubeReserved[corev1.ResourceMemory.String()]).To(Equal("3Gi")) + Expect(config.Settings.Kubernetes.KubeReserved[corev1.ResourceEphemeralStorage.String()]).To(Equal("10Gi")) }) }) It("should override kube reserved values in user data", func() { - ExpectApplied(ctx, env.Client, nodeClass) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ EvictionHard: map[string]string{ "memory.available": "10%", "nodefs.available": "15%", "nodefs.inodesFree": "5%", }, } - ExpectApplied(ctx, env.Client, nodePool) + ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) @@ -1498,7 +1496,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should specify max pods value when passing maxPods in configuration", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: aws.Int32(10), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1516,7 +1514,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should pass ImageGCHighThresholdPercent when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ ImageGCHighThresholdPercent: aws.Int32(50), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1536,7 +1534,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should pass ImageGCLowThresholdPercent when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ ImageGCLowThresholdPercent: aws.Int32(50), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1571,7 +1569,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should pass CPUCFSQuota when specified", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ CPUCFSQuota: aws.Bool(false), } ExpectApplied(ctx, env.Client, nodePool, nodeClass) @@ -1591,7 +1589,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("AL2 Custom UserData", func() { BeforeEach(func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} }) It("should merge in custom user data", func() { content, err := os.ReadFile("testdata/al2_userdata_input.golden") @@ -1603,7 +1601,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectScheduled(ctx, env.Client, pod) content, err = os.ReadFile("testdata/al2_userdata_merged.golden") Expect(err).To(BeNil()) - expectedUserData := fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name) + expectedUserData := fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name) ExpectLaunchTemplatesCreatedWithUserData(expectedUserData) }) It("should merge in custom user data when Content-Type is before MIME-Version", func() { @@ -1616,7 +1614,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectScheduled(ctx, env.Client, pod) content, err = os.ReadFile("testdata/al2_userdata_merged.golden") Expect(err).To(BeNil()) - expectedUserData := fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name) + expectedUserData := fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name) ExpectLaunchTemplatesCreatedWithUserData(expectedUserData) }) It("should merge in custom user data not in multi-part mime format", func() { @@ -1629,7 +1627,7 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectScheduled(ctx, env.Client, pod) content, err = os.ReadFile("testdata/al2_userdata_merged.golden") Expect(err).To(BeNil()) - expectedUserData := fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name) + expectedUserData := fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name) ExpectLaunchTemplatesCreatedWithUserData(expectedUserData) }) It("should handle empty custom user data", func() { @@ -1640,13 +1638,13 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectScheduled(ctx, env.Client, pod) content, err := os.ReadFile("testdata/al2_userdata_unmerged.golden") Expect(err).To(BeNil()) - expectedUserData := fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name) + expectedUserData := fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name) ExpectLaunchTemplatesCreatedWithUserData(expectedUserData) }) }) Context("AL2023", func() { BeforeEach(func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 // base64 encoded version of "ca-bundle" to ensure the nodeadm bootstrap provider can decode successfully awsEnv.LaunchTemplateProvider.CABundle = lo.ToPtr("Y2EtYnVuZGxlCg==") @@ -1654,21 +1652,21 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Kubelet", func() { It("should specify taints in the KubeletConfiguration when specified in NodePool", func() { - desiredTaints := []v1.Taint{ + desiredTaints := []corev1.Taint{ { Key: "test-taint-1", - Effect: v1.TaintEffectNoSchedule, + Effect: corev1.TaintEffectNoSchedule, }, { Key: "test-taint-2", - Effect: v1.TaintEffectNoExecute, + Effect: corev1.TaintEffectNoExecute, }, } nodePool.Spec.Template.Spec.Taints = desiredTaints ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(coretest.UnscheduleablePodOptions(coretest.PodOptions{ - Tolerations: []v1.Toleration{{ - Operator: v1.TolerationOpExists, + Tolerations: []corev1.Toleration{{ + Operator: corev1.TolerationOpExists, }}, })) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -1678,10 +1676,10 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(len(configs)).To(Equal(1)) taintsRaw, ok := configs[0].Spec.Kubelet.Config["registerWithTaints"] Expect(ok).To(BeTrue()) - taints := []v1.Taint{} + taints := []corev1.Taint{} Expect(yaml.Unmarshal(taintsRaw.Raw, &taints)).To(Succeed()) Expect(len(taints)).To(Equal(3)) - Expect(taints).To(ContainElements(lo.Map(desiredTaints, func(t v1.Taint, _ int) interface{} { + Expect(taints).To(ContainElements(lo.Map(desiredTaints, func(t corev1.Taint, _ int) interface{} { return interface{}(t) }))) } @@ -1711,8 +1709,8 @@ var _ = Describe("LaunchTemplate Provider", func() { }) DescribeTable( "should specify KubletConfiguration field when specified in NodePool", - func(field string, kc corev1beta1.KubeletConfiguration) { - nodePool.Spec.Template.Spec.Kubelet = &kc + func(field string, kc v1.KubeletConfiguration) { + nodeClass.Spec.Kubelet = &kc ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -1736,28 +1734,28 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(configs[0].Spec.Kubelet.Config[field]).To(Equal(inlineConfig[field])) } }, - Entry("systemReserved", "systemReserved", corev1beta1.KubeletConfiguration{ + Entry("systemReserved", "systemReserved", v1.KubeletConfiguration{ SystemReserved: map[string]string{ - string(v1.ResourceCPU): "500m", - string(v1.ResourceMemory): "1Gi", - string(v1.ResourceEphemeralStorage): "2Gi", + string(corev1.ResourceCPU): "500m", + string(corev1.ResourceMemory): "1Gi", + string(corev1.ResourceEphemeralStorage): "2Gi", }, }), - Entry("kubeReserved", "kubeReserved", corev1beta1.KubeletConfiguration{ + Entry("kubeReserved", "kubeReserved", v1.KubeletConfiguration{ KubeReserved: map[string]string{ - string(v1.ResourceCPU): "500m", - string(v1.ResourceMemory): "1Gi", - string(v1.ResourceEphemeralStorage): "2Gi", + string(corev1.ResourceCPU): "500m", + string(corev1.ResourceMemory): "1Gi", + string(corev1.ResourceEphemeralStorage): "2Gi", }, }), - Entry("evictionHard", "evictionHard", corev1beta1.KubeletConfiguration{ + Entry("evictionHard", "evictionHard", v1.KubeletConfiguration{ EvictionHard: map[string]string{ "memory.available": "10%", "nodefs.available": "15%", "nodefs.inodesFree": "5%", }, }), - Entry("evictionSoft", "evictionSoft", corev1beta1.KubeletConfiguration{ + Entry("evictionSoft", "evictionSoft", v1.KubeletConfiguration{ EvictionSoft: map[string]string{ "memory.available": "10%", "nodefs.available": "15%", @@ -1769,7 +1767,7 @@ var _ = Describe("LaunchTemplate Provider", func() { "nodefs.inodesFree": {Duration: time.Minute * 5}, }, }), - Entry("evictionSoftGracePeriod", "evictionSoftGracePeriod", corev1beta1.KubeletConfiguration{ + Entry("evictionSoftGracePeriod", "evictionSoftGracePeriod", v1.KubeletConfiguration{ EvictionSoft: map[string]string{ "memory.available": "10%", "nodefs.available": "15%", @@ -1781,28 +1779,28 @@ var _ = Describe("LaunchTemplate Provider", func() { "nodefs.inodesFree": {Duration: time.Minute * 5}, }, }), - Entry("evictionMaxPodGracePeriod", "evictionMaxPodGracePeriod", corev1beta1.KubeletConfiguration{ + Entry("evictionMaxPodGracePeriod", "evictionMaxPodGracePeriod", v1.KubeletConfiguration{ EvictionMaxPodGracePeriod: lo.ToPtr[int32](300), }), - Entry("podsPerCore", "podsPerCore", corev1beta1.KubeletConfiguration{ + Entry("podsPerCore", "podsPerCore", v1.KubeletConfiguration{ PodsPerCore: lo.ToPtr[int32](2), }), - Entry("clusterDNS", "clusterDNS", corev1beta1.KubeletConfiguration{ + Entry("clusterDNS", "clusterDNS", v1.KubeletConfiguration{ ClusterDNS: []string{"10.0.100.0"}, }), - Entry("imageGCHighThresholdPercent", "imageGCHighThresholdPercent", corev1beta1.KubeletConfiguration{ + Entry("imageGCHighThresholdPercent", "imageGCHighThresholdPercent", v1.KubeletConfiguration{ ImageGCHighThresholdPercent: lo.ToPtr[int32](50), }), - Entry("imageGCLowThresholdPercent", "imageGCLowThresholdPercent", corev1beta1.KubeletConfiguration{ + Entry("imageGCLowThresholdPercent", "imageGCLowThresholdPercent", v1.KubeletConfiguration{ ImageGCLowThresholdPercent: lo.ToPtr[int32](50), }), - Entry("cpuCFSQuota", "cpuCFSQuota", corev1beta1.KubeletConfiguration{ + Entry("cpuCFSQuota", "cpuCFSQuota", v1.KubeletConfiguration{ CPUCFSQuota: lo.ToPtr(false), }), ) }) It("should set LocalDiskStrategy to Raid0 when specified by the InstanceStorePolicy", func() { - nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1beta1.InstanceStorePolicyRAID0) + nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1.InstanceStorePolicyRAID0) ExpectApplied(ctx, env.Client, nodeClass, nodePool) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -1821,14 +1819,14 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(err).To(BeNil()) nodeClass.Spec.UserData = lo.ToPtr(string(content)) } - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} ExpectApplied(ctx, env.Client, nodeClass, nodePool) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) content, err := os.ReadFile("testdata/" + mergedFile) Expect(err).To(BeNil()) - expectedUserData := fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name) + expectedUserData := fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name) ExpectLaunchTemplatesCreatedWithUserData(expectedUserData) }, Entry("MIME", lo.ToPtr("al2023_mime_userdata_input.golden"), "al2023_mime_userdata_merged.golden"), @@ -1847,12 +1845,12 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Custom AMI Selector", func() { It("should use ami selector specified in EC2NodeClass", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Status.AMIs = []v1beta1.AMI{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Status.AMIs = []v1.AMI{ { ID: "ami-123", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, }, }, } @@ -1867,13 +1865,13 @@ var _ = Describe("LaunchTemplate Provider", func() { }) It("should copy over userData untouched when AMIFamily is Custom", func() { nodeClass.Spec.UserData = aws.String("special user data") - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Status.AMIs = []v1beta1.AMI{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Status.AMIs = []v1.AMI{ { ID: "ami-123", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, }, }, } @@ -1884,20 +1882,20 @@ var _ = Describe("LaunchTemplate Provider", func() { ExpectLaunchTemplatesCreatedWithUserData("special user data") }) It("should correctly use ami selector with specific IDs in EC2NodeClass", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: "ami-123"}, {ID: "ami-456"}} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: "ami-123"}, {ID: "ami-456"}} awsEnv.EC2API.DescribeImagesOutput.Set(&ec2.DescribeImagesOutput{Images: []*ec2.Image{ { Name: aws.String(coretest.RandomName()), ImageId: aws.String("ami-123"), Architecture: aws.String("x86_64"), - Tags: []*ec2.Tag{{Key: aws.String(v1.LabelInstanceTypeStable), Value: aws.String("m5.large")}}, + Tags: []*ec2.Tag{{Key: aws.String(corev1.LabelInstanceTypeStable), Value: aws.String("m5.large")}}, CreationDate: aws.String("2022-08-15T12:00:00Z"), }, { Name: aws.String(coretest.RandomName()), ImageId: aws.String("ami-456"), Architecture: aws.String("x86_64"), - Tags: []*ec2.Tag{{Key: aws.String(v1.LabelInstanceTypeStable), Value: aws.String("m5.xlarge")}}, + Tags: []*ec2.Tag{{Key: aws.String(corev1.LabelInstanceTypeStable), Value: aws.String("m5.xlarge")}}, CreationDate: aws.String("2022-08-15T12:00:00Z"), }, }}) @@ -1918,18 +1916,18 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(actualFilter).To(Equal(expectedFilter)) }) It("should create multiple launch templates when multiple amis are discovered with non-equivalent requirements", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Status.AMIs = []v1beta1.AMI{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Status.AMIs = []v1.AMI{ { ID: "ami-123", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, }, }, { ID: "ami-456", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureArm64}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureArm64}}, }, }, } @@ -1967,16 +1965,16 @@ var _ = Describe("LaunchTemplate Provider", func() { CreationDate: aws.String("2022-01-01T12:00:00Z"), }, }}) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} ExpectApplied(ctx, env.Client, nodeClass) controller := status.NewController(env.Client, awsEnv.SubnetProvider, awsEnv.SecurityGroupProvider, awsEnv.AMIProvider, awsEnv.InstanceProfileProvider, awsEnv.LaunchTemplateProvider) ExpectObjectReconciled(ctx, env.Client, controller, nodeClass) - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.ArchitectureAmd64}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.ArchitectureAmd64}, }, }, } @@ -1992,8 +1990,8 @@ var _ = Describe("LaunchTemplate Provider", func() { It("should fail if no amis match selector.", func() { awsEnv.EC2API.DescribeImagesOutput.Set(&ec2.DescribeImagesOutput{Images: []*ec2.Image{}}) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Status.AMIs = []v1beta1.AMI{} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Status.AMIs = []v1.AMI{} ExpectApplied(ctx, env.Client, nodeClass, nodePool) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -2003,12 +2001,12 @@ var _ = Describe("LaunchTemplate Provider", func() { It("should fail if no instanceType matches ami requirements.", func() { awsEnv.EC2API.DescribeImagesOutput.Set(&ec2.DescribeImagesOutput{Images: []*ec2.Image{ {Name: aws.String(coretest.RandomName()), ImageId: aws.String("ami-123"), Architecture: aws.String("newnew"), CreationDate: aws.String("2022-01-01T12:00:00Z")}}}) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} - nodeClass.Status.AMIs = []v1beta1.AMI{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{Tags: map[string]string{"*": "*"}}} + nodeClass.Status.AMIs = []v1.AMI{ { ID: "ami-123", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{"newnew"}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{"newnew"}}, }, }, } @@ -2023,11 +2021,11 @@ var _ = Describe("LaunchTemplate Provider", func() { awsEnv.SSMAPI.Parameters = map[string]string{ fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2/recommended/image_id", version): "test-ami-123", } - nodeClass.Status.AMIs = []v1beta1.AMI{ + nodeClass.Status.AMIs = []v1.AMI{ { ID: "test-ami-123", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{string(corev1beta1.ArchitectureAmd64)}}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{string(karpv1.ArchitectureAmd64)}}, }, }, } @@ -2047,9 +2045,9 @@ var _ = Describe("LaunchTemplate Provider", func() { nodeClass.Spec.AssociatePublicIPAddress = lo.ToPtr(setValue) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod(lo.Ternary(isEFA, coretest.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("2")}, - Limits: v1.ResourceList{v1beta1.ResourceEFA: resource.MustParse("2")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{v1.ResourceEFA: resource.MustParse("2")}, + Limits: corev1.ResourceList{v1.ResourceEFA: resource.MustParse("2")}, }, }, coretest.PodOptions{})) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -2065,7 +2063,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Kubelet Args", func() { It("should specify the --dns-cluster-ip flag when clusterDNSIP is set", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ClusterDNS: []string{"10.0.10.100"}} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ClusterDNS: []string{"10.0.10.100"}} ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -2075,9 +2073,9 @@ var _ = Describe("LaunchTemplate Provider", func() { }) Context("Windows Custom UserData", func() { BeforeEach(func() { - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1.LabelOSStable, Operator: v1.NodeSelectorOpIn, Values: []string{string(v1.Windows)}}}} - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022 - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: corev1.LabelOSStable, Operator: corev1.NodeSelectorOpIn, Values: []string{string(corev1.Windows)}}}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2022 + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} }) It("should merge and bootstrap with custom user data", func() { content, err := os.ReadFile("testdata/windows_userdata_input.golden") @@ -2087,36 +2085,36 @@ var _ = Describe("LaunchTemplate Provider", func() { Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(nodePool), nodePool)).To(Succeed()) pod := coretest.UnschedulablePod(coretest.PodOptions{ NodeSelector: map[string]string{ - v1.LabelOSStable: string(v1.Windows), - v1.LabelWindowsBuild: "10.0.20348", + corev1.LabelOSStable: string(corev1.Windows), + corev1.LabelWindowsBuild: "10.0.20348", }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) content, err = os.ReadFile("testdata/windows_userdata_merged.golden") Expect(err).To(BeNil()) - ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name)) + ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name)) }) It("should bootstrap when custom user data is empty", func() { ExpectApplied(ctx, env.Client, nodeClass, nodePool) Expect(env.Client.Get(ctx, client.ObjectKeyFromObject(nodePool), nodePool)).To(Succeed()) pod := coretest.UnschedulablePod(coretest.PodOptions{ NodeSelector: map[string]string{ - v1.LabelOSStable: string(v1.Windows), - v1.LabelWindowsBuild: "10.0.20348", + corev1.LabelOSStable: string(corev1.Windows), + corev1.LabelWindowsBuild: "10.0.20348", }, }) ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) ExpectScheduled(ctx, env.Client, pod) content, err := os.ReadFile("testdata/windows_userdata_unmerged.golden") Expect(err).To(BeNil()) - ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), corev1beta1.NodePoolLabelKey, nodePool.Name)) + ExpectLaunchTemplatesCreatedWithUserData(fmt.Sprintf(string(content), karpv1.NodePoolLabelKey, nodePool.Name)) }) }) }) Context("Detailed Monitoring", func() { It("should default detailed monitoring to off", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) @@ -2127,7 +2125,7 @@ var _ = Describe("LaunchTemplate Provider", func() { }) }) It("should pass detailed monitoring setting to the launch template at creation", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 nodeClass.Spec.DetailedMonitoring = aws.Bool(true) ExpectApplied(ctx, env.Client, nodePool, nodeClass) pod := coretest.UnschedulablePod() diff --git a/pkg/providers/securitygroup/securitygroup.go b/pkg/providers/securitygroup/securitygroup.go index 6ebb6a83f69b..998498277f32 100644 --- a/pkg/providers/securitygroup/securitygroup.go +++ b/pkg/providers/securitygroup/securitygroup.go @@ -29,11 +29,11 @@ import ( "sigs.k8s.io/karpenter/pkg/utils/pretty" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) type Provider interface { - List(context.Context, *v1beta1.EC2NodeClass) ([]*ec2.SecurityGroup, error) + List(context.Context, *v1.EC2NodeClass) ([]*ec2.SecurityGroup, error) } type DefaultProvider struct { @@ -52,7 +52,7 @@ func NewDefaultProvider(ec2api ec2iface.EC2API, cache *cache.Cache) *DefaultProv } } -func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) ([]*ec2.SecurityGroup, error) { +func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1.EC2NodeClass) ([]*ec2.SecurityGroup, error) { p.Lock() defer p.Unlock() @@ -95,7 +95,7 @@ func (p *DefaultProvider) getSecurityGroups(ctx context.Context, filterSets [][] return lo.Values(securityGroups), nil } -func getFilterSets(terms []v1beta1.SecurityGroupSelectorTerm) (res [][]*ec2.Filter) { +func getFilterSets(terms []v1.SecurityGroupSelectorTerm) (res [][]*ec2.Filter) { idFilter := &ec2.Filter{Name: aws.String("group-id")} nameFilter := &ec2.Filter{Name: aws.String("group-name")} for _, term := range terms { diff --git a/pkg/providers/securitygroup/suite_test.go b/pkg/providers/securitygroup/suite_test.go index 8b414ce63f52..7adcbc95cb8e 100644 --- a/pkg/providers/securitygroup/suite_test.go +++ b/pkg/providers/securitygroup/suite_test.go @@ -27,7 +27,7 @@ import ( "github.com/samber/lo" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -44,7 +44,7 @@ var ctx context.Context var stop context.CancelFunc var env *coretest.Environment var awsEnv *test.Environment -var nodeClass *v1beta1.EC2NodeClass +var nodeClass *v1.EC2NodeClass func TestAWS(t *testing.T) { ctx = TestContextWithLogger(t) @@ -68,17 +68,17 @@ var _ = AfterSuite(func() { var _ = BeforeEach(func() { ctx = coreoptions.ToContext(ctx, coretest.Options()) ctx = options.ToContext(ctx, test.Options()) - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - AMIFamily: aws.String(v1beta1.AMIFamilyAL2), - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + AMIFamily: aws.String(v1.AMIFamilyAL2), + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{ "*": "*", }, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{ "*": "*", @@ -132,7 +132,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by multiple tag values", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"Name": "test-security-group-1"}, }, @@ -154,7 +154,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by ID", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test1", }, @@ -169,7 +169,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by IDs", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test1", }, @@ -191,7 +191,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by IDs and tags", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test1", Tags: map[string]string{"foo": "bar"}, @@ -215,7 +215,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by IDs intersected with tags", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: "sg-test2", Tags: map[string]string{"foo": "bar"}, @@ -231,7 +231,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by names", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Name: "securityGroup-test2", }, @@ -253,7 +253,7 @@ var _ = Describe("SecurityGroupProvider", func() { }, securityGroups) }) It("should discover security groups by names intersected with tags", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Name: "securityGroup-test3", Tags: map[string]string{"TestTag": "*"}, @@ -272,7 +272,7 @@ var _ = Describe("SecurityGroupProvider", func() { It("should resolve security groups from cache that are filtered by id", func() { expectedSecurityGroups := awsEnv.EC2API.DescribeSecurityGroupsOutput.Clone().SecurityGroups for _, sg := range expectedSecurityGroups { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { ID: *sg.GroupId, }, @@ -291,7 +291,7 @@ var _ = Describe("SecurityGroupProvider", func() { It("should resolve security groups from cache that are filtered by Name", func() { expectedSecurityGroups := awsEnv.EC2API.DescribeSecurityGroupsOutput.Clone().SecurityGroups for _, sg := range expectedSecurityGroups { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Name: *sg.GroupName, }, @@ -316,7 +316,7 @@ var _ = Describe("SecurityGroupProvider", func() { return map[string]string{"Name": lo.FromPtr(tag.Value)} }) for _, tag := range tagSet { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: tag, }, diff --git a/pkg/providers/subnet/subnet.go b/pkg/providers/subnet/subnet.go index c80be9c07b59..58fc1776804c 100644 --- a/pkg/providers/subnet/subnet.go +++ b/pkg/providers/subnet/subnet.go @@ -26,12 +26,12 @@ import ( "github.com/mitchellh/hashstructure/v2" "github.com/patrickmn/go-cache" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/scheduling" "sigs.k8s.io/karpenter/pkg/utils/pretty" @@ -39,8 +39,8 @@ import ( type Provider interface { LivenessProbe(*http.Request) error - List(context.Context, *v1beta1.EC2NodeClass) ([]*ec2.Subnet, error) - ZonalSubnetsForLaunch(context.Context, *v1beta1.EC2NodeClass, []*cloudprovider.InstanceType, string) (map[string]*Subnet, error) + List(context.Context, *v1.EC2NodeClass) ([]*ec2.Subnet, error) + ZonalSubnetsForLaunch(context.Context, *v1.EC2NodeClass, []*cloudprovider.InstanceType, string) (map[string]*Subnet, error) UpdateInflightIPs(*ec2.CreateFleetInput, *ec2.CreateFleetOutput, []*cloudprovider.InstanceType, []*Subnet, string) } @@ -75,7 +75,7 @@ func NewDefaultProvider(ec2api ec2iface.EC2API, cache *cache.Cache, availableIPA } } -func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1beta1.EC2NodeClass) ([]*ec2.Subnet, error) { +func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1.EC2NodeClass) ([]*ec2.Subnet, error) { p.Lock() defer p.Unlock() filterSets := getFilterSets(nodeClass.Spec.SubnetSelectorTerms) @@ -111,8 +111,8 @@ func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1beta1.EC2NodeCl p.cache.SetDefault(fmt.Sprint(hash), lo.Values(subnets)) if p.cm.HasChanged(fmt.Sprintf("subnets/%s", nodeClass.Name), lo.Keys(subnets)) { log.FromContext(ctx). - WithValues("subnets", lo.Map(lo.Values(subnets), func(s *ec2.Subnet, _ int) v1beta1.Subnet { - return v1beta1.Subnet{ + WithValues("subnets", lo.Map(lo.Values(subnets), func(s *ec2.Subnet, _ int) v1.Subnet { + return v1.Subnet{ ID: lo.FromPtr(s.SubnetId), Zone: lo.FromPtr(s.AvailabilityZone), ZoneID: lo.FromPtr(s.AvailabilityZoneId), @@ -123,7 +123,7 @@ func (p *DefaultProvider) List(ctx context.Context, nodeClass *v1beta1.EC2NodeCl } // ZonalSubnetsForLaunch returns a mapping of zone to the subnet with the most available IP addresses and deducts the passed ips from the available count -func (p *DefaultProvider) ZonalSubnetsForLaunch(ctx context.Context, nodeClass *v1beta1.EC2NodeClass, instanceTypes []*cloudprovider.InstanceType, capacityType string) (map[string]*Subnet, error) { +func (p *DefaultProvider) ZonalSubnetsForLaunch(ctx context.Context, nodeClass *v1.EC2NodeClass, instanceTypes []*cloudprovider.InstanceType, capacityType string) (map[string]*Subnet, error) { if len(nodeClass.Status.Subnets) == 0 { return nil, fmt.Errorf("no subnets matched selector %v", nodeClass.Spec.SubnetSelectorTerms) } @@ -159,8 +159,8 @@ func (p *DefaultProvider) ZonalSubnetsForLaunch(ctx context.Context, nodeClass * for _, subnet := range zonalSubnets { predictedIPsUsed := p.minPods(instanceTypes, scheduling.NewRequirements( - scheduling.NewRequirement(corev1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, capacityType), - scheduling.NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, subnet.Zone), + scheduling.NewRequirement(karpv1.CapacityTypeLabelKey, corev1.NodeSelectorOpIn, capacityType), + scheduling.NewRequirement(corev1.LabelTopologyZone, corev1.NodeSelectorOpIn, subnet.Zone), )) prevIPs := subnet.AvailableIPAddressCount if trackedIPs, ok := p.inflightIPs[subnet.ID]; ok { @@ -224,8 +224,8 @@ func (p *DefaultProvider) UpdateInflightIPs(createFleetInput *ec2.CreateFleetInp // other IPs deducted were opportunistic and need to be readded since Fleet didn't pick those subnets to launch into if ips, ok := p.inflightIPs[originalSubnet.ID]; ok { minPods := p.minPods(instanceTypes, scheduling.NewRequirements( - scheduling.NewRequirement(corev1beta1.CapacityTypeLabelKey, v1.NodeSelectorOpIn, capacityType), - scheduling.NewRequirement(v1.LabelTopologyZone, v1.NodeSelectorOpIn, originalSubnet.Zone), + scheduling.NewRequirement(karpv1.CapacityTypeLabelKey, corev1.NodeSelectorOpIn, capacityType), + scheduling.NewRequirement(corev1.LabelTopologyZone, corev1.NodeSelectorOpIn, originalSubnet.Zone), )) p.inflightIPs[originalSubnet.ID] = ips + minPods } @@ -255,7 +255,7 @@ func (p *DefaultProvider) minPods(instanceTypes []*cloudprovider.InstanceType, r return pods } -func getFilterSets(terms []v1beta1.SubnetSelectorTerm) (res [][]*ec2.Filter) { +func getFilterSets(terms []v1.SubnetSelectorTerm) (res [][]*ec2.Filter) { idFilter := &ec2.Filter{Name: aws.String("subnet-id")} for _, term := range terms { switch { diff --git a/pkg/providers/subnet/suite_test.go b/pkg/providers/subnet/suite_test.go index 1ce1c48af5bb..068fb93f4676 100644 --- a/pkg/providers/subnet/suite_test.go +++ b/pkg/providers/subnet/suite_test.go @@ -27,7 +27,7 @@ import ( "github.com/samber/lo" "github.com/aws/karpenter-provider-aws/pkg/apis" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/operator/options" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -44,7 +44,7 @@ var ctx context.Context var stop context.CancelFunc var env *coretest.Environment var awsEnv *test.Environment -var nodeClass *v1beta1.EC2NodeClass +var nodeClass *v1.EC2NodeClass func TestAWS(t *testing.T) { ctx = TestContextWithLogger(t) @@ -68,17 +68,17 @@ var _ = AfterSuite(func() { var _ = BeforeEach(func() { ctx = coreoptions.ToContext(ctx, coretest.Options()) ctx = options.ToContext(ctx, test.Options()) - nodeClass = test.EC2NodeClass(v1beta1.EC2NodeClass{ - Spec: v1beta1.EC2NodeClassSpec{ - AMIFamily: aws.String(v1beta1.AMIFamilyAL2), - SubnetSelectorTerms: []v1beta1.SubnetSelectorTerm{ + nodeClass = test.EC2NodeClass(v1.EC2NodeClass{ + Spec: v1.EC2NodeClassSpec{ + AMIFamily: aws.String(v1.AMIFamilyAL2), + SubnetSelectorTerms: []v1.SubnetSelectorTerm{ { Tags: map[string]string{ "*": "*", }, }, }, - SecurityGroupSelectorTerms: []v1beta1.SecurityGroupSelectorTerm{ + SecurityGroupSelectorTerms: []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{ "*": "*", @@ -97,7 +97,7 @@ var _ = AfterEach(func() { var _ = Describe("SubnetProvider", func() { Context("List", func() { It("should discover subnet by ID", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test1", }, @@ -114,7 +114,7 @@ var _ = Describe("SubnetProvider", func() { }, subnets) }) It("should discover subnets by IDs", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test1", }, @@ -140,7 +140,7 @@ var _ = Describe("SubnetProvider", func() { }, subnets) }) It("should discover subnets by IDs and tags", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test1", Tags: map[string]string{"foo": "bar"}, @@ -168,7 +168,7 @@ var _ = Describe("SubnetProvider", func() { }, subnets) }) It("should discover subnets by a single tag", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{"Name": "test-subnet-1"}, }, @@ -185,7 +185,7 @@ var _ = Describe("SubnetProvider", func() { }, subnets) }) It("should discover subnets by multiple tag values", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{"Name": "test-subnet-1"}, }, @@ -211,7 +211,7 @@ var _ = Describe("SubnetProvider", func() { }, subnets) }) It("should discover subnets by IDs intersected with tags", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: "subnet-test2", Tags: map[string]string{"foo": "bar"}, @@ -233,7 +233,7 @@ var _ = Describe("SubnetProvider", func() { It("should resolve subnets from cache that are filtered by id", func() { expectedSubnets := awsEnv.EC2API.DescribeSubnetsOutput.Clone().Subnets for _, subnet := range expectedSubnets { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: *subnet.SubnetId, }, @@ -258,7 +258,7 @@ var _ = Describe("SubnetProvider", func() { return map[string]string{"Name": lo.FromPtr(tag.Value)} }) for _, tag := range tagSet { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: tag, }, diff --git a/pkg/test/environment.go b/pkg/test/environment.go index bed6bd4728cc..015d5ae9a5eb 100644 --- a/pkg/test/environment.go +++ b/pkg/test/environment.go @@ -22,7 +22,8 @@ import ( "github.com/samber/lo" corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" awscache "github.com/aws/karpenter-provider-aws/pkg/cache" "github.com/aws/karpenter-provider-aws/pkg/fake" @@ -42,7 +43,8 @@ import ( ) func init() { - corev1beta1.NormalizedLabels = lo.Assign(corev1beta1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) + karpv1beta1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) + karpv1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) } type Environment struct { diff --git a/pkg/test/nodeclass.go b/pkg/test/nodeclass.go index 13f457bdddea..6b8858832a29 100644 --- a/pkg/test/nodeclass.go +++ b/pkg/test/nodeclass.go @@ -19,17 +19,118 @@ import ( "fmt" "github.com/imdario/mergo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/test" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" ) -func EC2NodeClass(overrides ...v1beta1.EC2NodeClass) *v1beta1.EC2NodeClass { +func EC2NodeClass(overrides ...v1.EC2NodeClass) *v1.EC2NodeClass { + options := v1.EC2NodeClass{} + for _, override := range overrides { + if err := mergo.Merge(&options, override, mergo.WithOverride); err != nil { + panic(fmt.Sprintf("Failed to merge settings: %s", err)) + } + } + if options.Spec.AMIFamily == nil { + options.Spec.AMIFamily = &v1.AMIFamilyAL2 + options.Status.AMIs = []v1.AMI{ + { + ID: "ami-test1", + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, + {Key: v1.LabelInstanceGPUCount, Operator: corev1.NodeSelectorOpDoesNotExist}, + {Key: v1.LabelInstanceAcceleratorCount, Operator: corev1.NodeSelectorOpDoesNotExist}, + }, + }, + { + ID: "ami-test2", + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, + {Key: v1.LabelInstanceGPUCount, Operator: corev1.NodeSelectorOpExists}, + }, + }, + { + ID: "ami-test3", + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureAmd64}}, + {Key: v1.LabelInstanceAcceleratorCount, Operator: corev1.NodeSelectorOpExists}, + }, + }, + { + ID: "ami-test4", + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.ArchitectureArm64}}, + {Key: v1.LabelInstanceGPUCount, Operator: corev1.NodeSelectorOpDoesNotExist}, + {Key: v1.LabelInstanceAcceleratorCount, Operator: corev1.NodeSelectorOpDoesNotExist}, + }, + }, + } + } + if options.Spec.Role == "" { + options.Spec.Role = "test-role" + options.Status.InstanceProfile = "test-profile" + } + if len(options.Spec.SecurityGroupSelectorTerms) == 0 { + options.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ + { + Tags: map[string]string{ + "*": "*", + }, + }, + } + options.Status.SecurityGroups = []v1.SecurityGroup{ + { + ID: "sg-test1", + }, + { + ID: "sg-test2", + }, + { + ID: "sg-test3", + }, + } + } + if len(options.Spec.SubnetSelectorTerms) == 0 { + options.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ + { + Tags: map[string]string{ + "*": "*", + }, + }, + } + options.Status.Subnets = []v1.Subnet{ + { + ID: "subnet-test1", + Zone: "test-zone-1a", + ZoneID: "tstz1-1a", + }, + { + ID: "subnet-test2", + Zone: "test-zone-1b", + ZoneID: "tstz1-1b", + }, + { + ID: "subnet-test3", + Zone: "test-zone-1c", + ZoneID: "tstz1-1c", + }, + } + } + return &v1.EC2NodeClass{ + ObjectMeta: test.ObjectMeta(options.ObjectMeta), + Spec: options.Spec, + Status: options.Status, + } +} + +func BetaEC2NodeClass(overrides ...v1beta1.EC2NodeClass) *v1beta1.EC2NodeClass { options := v1beta1.EC2NodeClass{} for _, override := range overrides { if err := mergo.Merge(&options, override, mergo.WithOverride); err != nil { @@ -41,32 +142,32 @@ func EC2NodeClass(overrides ...v1beta1.EC2NodeClass) *v1beta1.EC2NodeClass { options.Status.AMIs = []v1beta1.AMI{ { ID: "ami-test1", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, - {Key: v1beta1.LabelInstanceGPUCount, Operator: v1.NodeSelectorOpDoesNotExist}, - {Key: v1beta1.LabelInstanceAcceleratorCount, Operator: v1.NodeSelectorOpDoesNotExist}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1beta1.ArchitectureAmd64}}, + {Key: v1beta1.LabelInstanceGPUCount, Operator: corev1.NodeSelectorOpDoesNotExist}, + {Key: v1beta1.LabelInstanceAcceleratorCount, Operator: corev1.NodeSelectorOpDoesNotExist}, }, }, { ID: "ami-test2", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, - {Key: v1beta1.LabelInstanceGPUCount, Operator: v1.NodeSelectorOpExists}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1beta1.ArchitectureAmd64}}, + {Key: v1beta1.LabelInstanceGPUCount, Operator: corev1.NodeSelectorOpExists}, }, }, { ID: "ami-test3", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureAmd64}}, - {Key: v1beta1.LabelInstanceAcceleratorCount, Operator: v1.NodeSelectorOpExists}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1beta1.ArchitectureAmd64}}, + {Key: v1beta1.LabelInstanceAcceleratorCount, Operator: corev1.NodeSelectorOpExists}, }, }, { ID: "ami-test4", - Requirements: []v1.NodeSelectorRequirement{ - {Key: v1.LabelArchStable, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.ArchitectureArm64}}, - {Key: v1beta1.LabelInstanceGPUCount, Operator: v1.NodeSelectorOpDoesNotExist}, - {Key: v1beta1.LabelInstanceAcceleratorCount, Operator: v1.NodeSelectorOpDoesNotExist}, + Requirements: []corev1.NodeSelectorRequirement{ + {Key: corev1.LabelArchStable, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1beta1.ArchitectureArm64}}, + {Key: v1beta1.LabelInstanceGPUCount, Operator: corev1.NodeSelectorOpDoesNotExist}, + {Key: v1beta1.LabelInstanceAcceleratorCount, Operator: corev1.NodeSelectorOpDoesNotExist}, }, }, } @@ -130,8 +231,8 @@ func EC2NodeClass(overrides ...v1beta1.EC2NodeClass) *v1beta1.EC2NodeClass { func EC2NodeClassFieldIndexer(ctx context.Context) func(cache.Cache) error { return func(c cache.Cache) error { - return c.IndexField(ctx, &corev1beta1.NodeClaim{}, "spec.nodeClassRef.name", func(obj client.Object) []string { - nc := obj.(*corev1beta1.NodeClaim) + return c.IndexField(ctx, &karpv1.NodeClaim{}, "spec.nodeClassRef.name", func(obj client.Object) []string { + nc := obj.(*karpv1.NodeClaim) if nc.Spec.NodeClassRef == nil { return []string{""} } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 2dc741e87e3d..8f062a433930 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -15,6 +15,7 @@ limitations under the License. package utils import ( + "encoding/json" "fmt" "regexp" "strings" @@ -22,6 +23,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" + karpv1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) var ( @@ -66,3 +71,43 @@ func PrettySlice[T any](s []T, maxItems int) string { } return sb.String() } + +// GetKubletConfigurationWithNodePool use the most recent version of the kubelet configuration. +// The priority of fields is listed below: +// 1.) v1 NodePool kubelet annotation (Showing a user configured using v1beta1 NodePool at some point) +// 2.) v1 EC2NodeClass will be used (showing a user configured using v1 EC2NodeClass) +func GetKubletConfigurationWithNodePool(nodePool *karpv1.NodePool, nodeClass *v1.EC2NodeClass) (*v1.KubeletConfiguration, error) { + if annotation, ok := nodePool.Annotations[karpv1.KubeletCompatabilityAnnotationKey]; ok { + return parseKubeletConfiguration(annotation) + } + return nodeClass.Spec.Kubelet, nil +} + +func GetKubeletConfigurationWithNodeClaim(nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass) (*v1.KubeletConfiguration, error) { + if annotation, ok := nodeClaim.Annotations[karpv1.KubeletCompatabilityAnnotationKey]; ok { + return parseKubeletConfiguration(annotation) + } + return nodeClass.Spec.Kubelet, nil +} + +func parseKubeletConfiguration(annotation string) (*v1.KubeletConfiguration, error) { + kubelet := &karpv1beta1.KubeletConfiguration{} + err := json.Unmarshal([]byte(annotation), kubelet) + if err != nil { + return nil, fmt.Errorf("parsing kubelet config from %s annotation, %w", karpv1.KubeletCompatabilityAnnotationKey, err) + } + return &v1.KubeletConfiguration{ + ClusterDNS: kubelet.ClusterDNS, + MaxPods: kubelet.MaxPods, + PodsPerCore: kubelet.PodsPerCore, + SystemReserved: kubelet.SystemReserved, + KubeReserved: kubelet.KubeReserved, + EvictionSoft: kubelet.EvictionSoft, + EvictionHard: kubelet.EvictionHard, + EvictionSoftGracePeriod: kubelet.EvictionSoftGracePeriod, + EvictionMaxPodGracePeriod: kubelet.EvictionMaxPodGracePeriod, + ImageGCHighThresholdPercent: kubelet.ImageGCHighThresholdPercent, + ImageGCLowThresholdPercent: kubelet.ImageGCLowThresholdPercent, + CPUCFSQuota: kubelet.CPUCFSQuota, + }, nil +} diff --git a/test/pkg/debug/events.go b/test/pkg/debug/events.go index 0c15d48ee194..79d8648830e5 100644 --- a/test/pkg/debug/events.go +++ b/test/pkg/debug/events.go @@ -22,7 +22,7 @@ import ( "github.com/samber/lo" "go.uber.org/multierr" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,13 +49,13 @@ func (c *EventClient) DumpEvents(ctx context.Context) error { } func (c *EventClient) dumpPodEvents(ctx context.Context) error { - el := &v1.EventList{} + el := &corev1.EventList{} if err := c.kubeClient.List(ctx, el, &client.ListOptions{ FieldSelector: fields.SelectorFromSet(map[string]string{"involvedObject.kind": "Pod"}), }); err != nil { return err } - events := lo.Filter(filterTestEvents(el.Items, c.start), func(e v1.Event, _ int) bool { + events := lo.Filter(filterTestEvents(el.Items, c.start), func(e corev1.Event, _ int) bool { return e.InvolvedObject.Namespace != "kube-system" }) for k, v := range coallateEvents(events) { @@ -65,7 +65,7 @@ func (c *EventClient) dumpPodEvents(ctx context.Context) error { } func (c *EventClient) dumpNodeEvents(ctx context.Context) error { - el := &v1.EventList{} + el := &corev1.EventList{} if err := c.kubeClient.List(ctx, el, &client.ListOptions{ FieldSelector: fields.SelectorFromSet(map[string]string{"involvedObject.kind": "Node"}), }); err != nil { @@ -77,8 +77,8 @@ func (c *EventClient) dumpNodeEvents(ctx context.Context) error { return nil } -func filterTestEvents(events []v1.Event, startTime time.Time) []v1.Event { - return lo.Filter(events, func(e v1.Event, _ int) bool { +func filterTestEvents(events []corev1.Event, startTime time.Time) []corev1.Event { + return lo.Filter(events, func(e corev1.Event, _ int) bool { if !e.EventTime.IsZero() { if e.EventTime.BeforeTime(&metav1.Time{Time: startTime}) { return false @@ -90,13 +90,13 @@ func filterTestEvents(events []v1.Event, startTime time.Time) []v1.Event { }) } -func coallateEvents(events []v1.Event) map[v1.ObjectReference]*v1.EventList { - eventMap := map[v1.ObjectReference]*v1.EventList{} +func coallateEvents(events []corev1.Event) map[corev1.ObjectReference]*corev1.EventList { + eventMap := map[corev1.ObjectReference]*corev1.EventList{} for i := range events { elem := events[i] - objectKey := v1.ObjectReference{Kind: elem.InvolvedObject.Kind, Namespace: elem.InvolvedObject.Namespace, Name: elem.InvolvedObject.Name} + objectKey := corev1.ObjectReference{Kind: elem.InvolvedObject.Kind, Namespace: elem.InvolvedObject.Namespace, Name: elem.InvolvedObject.Name} if _, ok := eventMap[objectKey]; !ok { - eventMap[objectKey] = &v1.EventList{} + eventMap[objectKey] = &corev1.EventList{} } eventMap[objectKey].Items = append(eventMap[objectKey].Items, elem) } @@ -105,7 +105,7 @@ func coallateEvents(events []v1.Event) map[v1.ObjectReference]*v1.EventList { // Partially copied from // https://github.com/kubernetes/kubernetes/blob/04ee339c7a4d36b4037ce3635993e2a9e395ebf3/staging/src/k8s.io/kubectl/pkg/describe/describe.go#L4232 -func getEventInformation(o v1.ObjectReference, el *v1.EventList) string { +func getEventInformation(o corev1.ObjectReference, el *corev1.EventList) string { sb := strings.Builder{} sb.WriteString(fmt.Sprintf("------- %s/%s%s EVENTS -------\n", strings.ToLower(o.Kind), lo.Ternary(o.Namespace != "", o.Namespace+"/", ""), o.Name)) diff --git a/test/pkg/debug/node.go b/test/pkg/debug/node.go index 1e030f42db10..3549d743945e 100644 --- a/test/pkg/debug/node.go +++ b/test/pkg/debug/node.go @@ -19,7 +19,7 @@ import ( "fmt" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" nodeutils "sigs.k8s.io/karpenter/pkg/utils/node" ) @@ -45,7 +45,7 @@ func NewNodeController(kubeClient client.Client) *NodeController { } func (c *NodeController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - n := &v1.Node{} + n := &corev1.Node{} if err := c.kubeClient.Get(ctx, req.NamespacedName, n); err != nil { if errors.IsNotFound(err) { fmt.Printf("[DELETED %s] NODE %s\n", time.Now().Format(time.RFC3339), req.NamespacedName.String()) @@ -56,25 +56,25 @@ func (c *NodeController) Reconcile(ctx context.Context, req reconcile.Request) ( return reconcile.Result{}, nil } -func (c *NodeController) GetInfo(ctx context.Context, n *v1.Node) string { +func (c *NodeController) GetInfo(ctx context.Context, n *corev1.Node) string { pods, _ := nodeutils.GetPods(ctx, c.kubeClient, n) - return fmt.Sprintf("ready=%s schedulable=%t initialized=%s pods=%d taints=%v", nodeutils.GetCondition(n, v1.NodeReady).Status, !n.Spec.Unschedulable, n.Labels[v1beta1.NodeInitializedLabelKey], len(pods), n.Spec.Taints) + return fmt.Sprintf("ready=%s schedulable=%t initialized=%s pods=%d taints=%v", nodeutils.GetCondition(n, corev1.NodeReady).Status, !n.Spec.Unschedulable, n.Labels[karpv1.NodeInitializedLabelKey], len(pods), n.Spec.Taints) } func (c *NodeController) Register(ctx context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("node"). - For(&v1.Node{}). + For(&corev1.Node{}). WithEventFilter(predicate.And( predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - oldNode := e.ObjectOld.(*v1.Node) - newNode := e.ObjectNew.(*v1.Node) + oldNode := e.ObjectOld.(*corev1.Node) + newNode := e.ObjectNew.(*corev1.Node) return c.GetInfo(ctx, oldNode) != c.GetInfo(ctx, newNode) }, }, predicate.NewPredicateFuncs(func(o client.Object) bool { - return o.GetLabels()[v1beta1.NodePoolLabelKey] != "" + return o.GetLabels()[karpv1.NodePoolLabelKey] != "" }), )). WithOptions(controller.Options{MaxConcurrentReconciles: 10}). diff --git a/test/pkg/debug/nodeclaim.go b/test/pkg/debug/nodeclaim.go index 2e7ec0ca948f..ccbf67db933e 100644 --- a/test/pkg/debug/nodeclaim.go +++ b/test/pkg/debug/nodeclaim.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" ) type NodeClaimController struct { @@ -42,7 +42,7 @@ func NewNodeClaimController(kubeClient client.Client) *NodeClaimController { } func (c *NodeClaimController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - nc := &corev1beta1.NodeClaim{} + nc := &karpv1.NodeClaim{} if err := c.kubeClient.Get(ctx, req.NamespacedName, nc); err != nil { if errors.IsNotFound(err) { fmt.Printf("[DELETED %s] NODECLAIM %s\n", time.Now().Format(time.RFC3339), req.NamespacedName.String()) @@ -53,23 +53,23 @@ func (c *NodeClaimController) Reconcile(ctx context.Context, req reconcile.Reque return reconcile.Result{}, nil } -func (c *NodeClaimController) GetInfo(nc *corev1beta1.NodeClaim) string { +func (c *NodeClaimController) GetInfo(nc *karpv1.NodeClaim) string { return fmt.Sprintf("ready=%t launched=%t registered=%t initialized=%t", nc.StatusConditions().Root().IsTrue(), - nc.StatusConditions().Get(corev1beta1.ConditionTypeLaunched).IsTrue(), - nc.StatusConditions().Get(corev1beta1.ConditionTypeRegistered).IsTrue(), - nc.StatusConditions().Get(corev1beta1.ConditionTypeInitialized).IsTrue(), + nc.StatusConditions().Get(karpv1.ConditionTypeLaunched).IsTrue(), + nc.StatusConditions().Get(karpv1.ConditionTypeRegistered).IsTrue(), + nc.StatusConditions().Get(karpv1.ConditionTypeInitialized).IsTrue(), ) } func (c *NodeClaimController) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("nodeclaim"). - For(&corev1beta1.NodeClaim{}). + For(&karpv1.NodeClaim{}). WithEventFilter(predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - oldNodeClaim := e.ObjectOld.(*corev1beta1.NodeClaim) - newNodeClaim := e.ObjectNew.(*corev1beta1.NodeClaim) + oldNodeClaim := e.ObjectOld.(*karpv1.NodeClaim) + newNodeClaim := e.ObjectNew.(*karpv1.NodeClaim) return c.GetInfo(oldNodeClaim) != c.GetInfo(newNodeClaim) }, }). diff --git a/test/pkg/debug/pod.go b/test/pkg/debug/pod.go index cd51b6bf9b84..8545b6908f2f 100644 --- a/test/pkg/debug/pod.go +++ b/test/pkg/debug/pod.go @@ -21,7 +21,7 @@ import ( "time" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -45,7 +45,7 @@ func NewPodController(kubeClient client.Client) *PodController { } func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - p := &v1.Pod{} + p := &corev1.Pod{} if err := c.kubeClient.Get(ctx, req.NamespacedName, p); err != nil { if errors.IsNotFound(err) { fmt.Printf("[DELETED %s] POD %s\n", time.Now().Format(time.RFC3339), req.NamespacedName.String()) @@ -56,7 +56,7 @@ func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (r return reconcile.Result{}, nil } -func (c *PodController) GetInfo(p *v1.Pod) string { +func (c *PodController) GetInfo(p *corev1.Pod) string { var containerInfo strings.Builder for _, c := range p.Status.ContainerStatuses { if containerInfo.Len() > 0 { @@ -71,12 +71,12 @@ func (c *PodController) GetInfo(p *v1.Pod) string { func (c *PodController) Register(_ context.Context, m manager.Manager) error { return controllerruntime.NewControllerManagedBy(m). Named("pod"). - For(&v1.Pod{}). + For(&corev1.Pod{}). WithEventFilter(predicate.And( predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - oldPod := e.ObjectOld.(*v1.Pod) - newPod := e.ObjectNew.(*v1.Pod) + oldPod := e.ObjectOld.(*corev1.Pod) + newPod := e.ObjectNew.(*corev1.Pod) return c.GetInfo(oldPod) != c.GetInfo(newPod) }, }, diff --git a/test/pkg/environment/aws/environment.go b/test/pkg/environment/aws/environment.go index e882762994fc..64d307bfb58e 100644 --- a/test/pkg/environment/aws/environment.go +++ b/test/pkg/environment/aws/environment.go @@ -40,16 +40,16 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/utils/env" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/sqs" "github.com/aws/karpenter-provider-aws/pkg/test" "github.com/aws/karpenter-provider-aws/test/pkg/environment/common" ) func init() { - corev1beta1.NormalizedLabels = lo.Assign(corev1beta1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) + karpv1.NormalizedLabels = lo.Assign(karpv1.NormalizedLabels, map[string]string{"topology.ebs.csi.aws.com/zone": corev1.LabelTopologyZone}) } var WindowsDefaultImage = "mcr.microsoft.com/oss/kubernetes/pause:3.9" @@ -141,18 +141,18 @@ func GetTimeStreamAPI(session *session.Session) timestreamwriteiface.TimestreamW return &NoOpTimeStreamAPI{} } -func (env *Environment) DefaultEC2NodeClass() *v1beta1.EC2NodeClass { +func (env *Environment) DefaultEC2NodeClass() *v1.EC2NodeClass { nodeClass := test.EC2NodeClass() - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 nodeClass.Spec.Tags = map[string]string{ "testing/cluster": env.ClusterName, } - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, }, } - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, }, diff --git a/test/pkg/environment/aws/expectations.go b/test/pkg/environment/aws/expectations.go index f87261716841..b40a4c4ae545 100644 --- a/test/pkg/environment/aws/expectations.go +++ b/test/pkg/environment/aws/expectations.go @@ -36,7 +36,7 @@ import ( coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" awserrors "github.com/aws/karpenter-provider-aws/pkg/errors" . "github.com/onsi/ginkgo/v2" @@ -137,7 +137,7 @@ func (env *Environment) EventuallyExpectInstanceProfileExists(profileName string // GetInstanceProfileName gets the string for the profile name based on the cluster name, region and the NodeClass name. // The length of this string can never exceed the maximum instance profile name limit of 128 characters. -func (env *Environment) GetInstanceProfileName(nodeClass *v1beta1.EC2NodeClass) string { +func (env *Environment) GetInstanceProfileName(nodeClass *v1.EC2NodeClass) string { return fmt.Sprintf("%s_%d", env.ClusterName, lo.Must(hashstructure.Hash(fmt.Sprintf("%s%s", env.Region, nodeClass.Name), hashstructure.FormatV2, nil))) } diff --git a/test/pkg/environment/aws/setup.go b/test/pkg/environment/aws/setup.go index 99743b6cb7eb..92deae58ad25 100644 --- a/test/pkg/environment/aws/setup.go +++ b/test/pkg/environment/aws/setup.go @@ -15,17 +15,17 @@ limitations under the License. package aws import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) -var persistedSettings []v1.EnvVar +var persistedSettings []corev1.EnvVar var ( CleanableObjects = []client.Object{ - &v1beta1.EC2NodeClass{}, + &v1.EC2NodeClass{}, } ) diff --git a/test/pkg/environment/common/environment.go b/test/pkg/environment/common/environment.go index af2fbc9e0533..c7bbf4ad60e5 100644 --- a/test/pkg/environment/common/environment.go +++ b/test/pkg/environment/common/environment.go @@ -26,7 +26,7 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/onsi/gomega" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -39,11 +39,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/operator" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) type ContextKey string @@ -101,22 +101,22 @@ func NewConfig() *rest.Config { func NewClient(ctx context.Context, config *rest.Config) client.Client { cache := lo.Must(cache.New(config, cache.Options{Scheme: scheme.Scheme})) - lo.Must0(cache.IndexField(ctx, &v1.Pod{}, "spec.nodeName", func(o client.Object) []string { - pod := o.(*v1.Pod) + lo.Must0(cache.IndexField(ctx, &corev1.Pod{}, "spec.nodeName", func(o client.Object) []string { + pod := o.(*corev1.Pod) return []string{pod.Spec.NodeName} })) - lo.Must0(cache.IndexField(ctx, &v1.Event{}, "involvedObject.kind", func(o client.Object) []string { - evt := o.(*v1.Event) + lo.Must0(cache.IndexField(ctx, &corev1.Event{}, "involvedObject.kind", func(o client.Object) []string { + evt := o.(*corev1.Event) return []string{evt.InvolvedObject.Kind} })) - lo.Must0(cache.IndexField(ctx, &v1.Node{}, "spec.unschedulable", func(o client.Object) []string { - node := o.(*v1.Node) + lo.Must0(cache.IndexField(ctx, &corev1.Node{}, "spec.unschedulable", func(o client.Object) []string { + node := o.(*corev1.Node) return []string{strconv.FormatBool(node.Spec.Unschedulable)} })) - lo.Must0(cache.IndexField(ctx, &v1.Node{}, "spec.taints[*].karpenter.sh/disruption", func(o client.Object) []string { - node := o.(*v1.Node) - t, _ := lo.Find(node.Spec.Taints, func(t v1.Taint) bool { - return t.Key == corev1beta1.DisruptionTaintKey + lo.Must0(cache.IndexField(ctx, &corev1.Node{}, "spec.taints[*].karpenter.sh/disruption", func(o client.Object) []string { + node := o.(*corev1.Node) + t, _ := lo.Find(node.Spec.Taints, func(t corev1.Taint) bool { + return t.Key == karpv1.DisruptionTaintKey }) return []string{t.Value} })) @@ -132,56 +132,56 @@ func NewClient(ctx context.Context, config *rest.Config) client.Client { return c } -func (env *Environment) DefaultNodePool(nodeClass *v1beta1.EC2NodeClass) *corev1beta1.NodePool { +func (env *Environment) DefaultNodePool(nodeClass *v1.EC2NodeClass) *karpv1.NodePool { nodePool := coretest.NodePool() - nodePool.Spec.Template.Spec.NodeClassRef = &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool.Spec.Template.Spec.NodeClassRef = &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, } - nodePool.Spec.Template.Spec.Requirements = []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodePool.Spec.Template.Spec.Requirements = []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Linux)}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Linux)}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c", "m", "r"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceGeneration, - Operator: v1.NodeSelectorOpGt, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceGeneration, + Operator: corev1.NodeSelectorOpGt, Values: []string{"2"}, }, }, // Filter out a1 instance types, which are incompatible with AL2023 AMIs { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceFamily, - Operator: v1.NodeSelectorOpNotIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceFamily, + Operator: corev1.NodeSelectorOpNotIn, Values: []string{"a1"}, }, }, } - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{} nodePool.Spec.Disruption.ExpireAfter.Duration = nil - nodePool.Spec.Limits = corev1beta1.Limits(v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("1000"), - v1.ResourceMemory: resource.MustParse("1000Gi"), + nodePool.Spec.Limits = karpv1.Limits(corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000"), + corev1.ResourceMemory: resource.MustParse("1000Gi"), }) return nodePool } diff --git a/test/pkg/environment/common/expectations.go b/test/pkg/environment/common/expectations.go index 136047a35d3f..48fd37325bc1 100644 --- a/test/pkg/environment/common/expectations.go +++ b/test/pkg/environment/common/expectations.go @@ -28,7 +28,7 @@ import ( "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" coordinationv1 "k8s.io/api/coordination/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/log" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" pscheduling "sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling" "sigs.k8s.io/karpenter/pkg/scheduling" "sigs.k8s.io/karpenter/pkg/test" @@ -112,18 +112,18 @@ func (env *Environment) ExpectCreatedOrUpdated(objects ...client.Object) { } } -func (env *Environment) ExpectSettings() (res []v1.EnvVar) { +func (env *Environment) ExpectSettings() (res []corev1.EnvVar) { GinkgoHelper() d := &appsv1.Deployment{} Expect(env.Client.Get(env.Context, types.NamespacedName{Namespace: "kube-system", Name: "karpenter"}, d)).To(Succeed()) Expect(d.Spec.Template.Spec.Containers).To(HaveLen(1)) - return lo.Map(d.Spec.Template.Spec.Containers[0].Env, func(v v1.EnvVar, _ int) v1.EnvVar { + return lo.Map(d.Spec.Template.Spec.Containers[0].Env, func(v corev1.EnvVar, _ int) corev1.EnvVar { return *v.DeepCopy() }) } -func (env *Environment) ExpectSettingsReplaced(vars ...v1.EnvVar) { +func (env *Environment) ExpectSettingsReplaced(vars ...corev1.EnvVar) { GinkgoHelper() d := &appsv1.Deployment{} @@ -140,7 +140,7 @@ func (env *Environment) ExpectSettingsReplaced(vars ...v1.EnvVar) { } } -func (env *Environment) ExpectSettingsOverridden(vars ...v1.EnvVar) { +func (env *Environment) ExpectSettingsOverridden(vars ...corev1.EnvVar) { GinkgoHelper() d := &appsv1.Deployment{} @@ -149,7 +149,7 @@ func (env *Environment) ExpectSettingsOverridden(vars ...v1.EnvVar) { stored := d.DeepCopy() for _, v := range vars { - if _, i, ok := lo.FindIndexOf(d.Spec.Template.Spec.Containers[0].Env, func(e v1.EnvVar) bool { + if _, i, ok := lo.FindIndexOf(d.Spec.Template.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { return e.Name == v.Name }); ok { d.Spec.Template.Spec.Containers[0].Env[i] = v @@ -164,17 +164,17 @@ func (env *Environment) ExpectSettingsOverridden(vars ...v1.EnvVar) { } } -func (env *Environment) ExpectSettingsRemoved(vars ...v1.EnvVar) { +func (env *Environment) ExpectSettingsRemoved(vars ...corev1.EnvVar) { GinkgoHelper() - varNames := sets.New[string](lo.Map(vars, func(v v1.EnvVar, _ int) string { return v.Name })...) + varNames := sets.New(lo.Map(vars, func(v corev1.EnvVar, _ int) string { return v.Name })...) d := &appsv1.Deployment{} Expect(env.Client.Get(env.Context, types.NamespacedName{Namespace: "kube-system", Name: "karpenter"}, d)).To(Succeed()) Expect(d.Spec.Template.Spec.Containers).To(HaveLen(1)) stored := d.DeepCopy() - d.Spec.Template.Spec.Containers[0].Env = lo.Reject(d.Spec.Template.Spec.Containers[0].Env, func(v v1.EnvVar, _ int) bool { + d.Spec.Template.Spec.Containers[0].Env = lo.Reject(d.Spec.Template.Spec.Containers[0].Env, func(v corev1.EnvVar, _ int) bool { return varNames.Has(v.Name) }) if !equality.Semantic.DeepEqual(d, stored) { @@ -184,16 +184,16 @@ func (env *Environment) ExpectSettingsRemoved(vars ...v1.EnvVar) { } } -func (env *Environment) ExpectConfigMapExists(key types.NamespacedName) *v1.ConfigMap { +func (env *Environment) ExpectConfigMapExists(key types.NamespacedName) *corev1.ConfigMap { GinkgoHelper() - cm := &v1.ConfigMap{} + cm := &corev1.ConfigMap{} Expect(env.Client.Get(env, key, cm)).To(Succeed()) return cm } func (env *Environment) ExpectConfigMapDataReplaced(key types.NamespacedName, data ...map[string]string) (changed bool) { GinkgoHelper() - cm := &v1.ConfigMap{ + cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, @@ -216,7 +216,7 @@ func (env *Environment) ExpectConfigMapDataReplaced(key types.NamespacedName, da func (env *Environment) ExpectConfigMapDataOverridden(key types.NamespacedName, data ...map[string]string) (changed bool) { GinkgoHelper() - cm := &v1.ConfigMap{ + cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, @@ -264,7 +264,7 @@ func (env *Environment) ExpectExists(obj client.Object) client.Object { return obj } -func (env *Environment) EventuallyExpectBound(pods ...*v1.Pod) { +func (env *Environment) EventuallyExpectBound(pods ...*corev1.Pod) { GinkgoHelper() Eventually(func(g Gomega) { for _, pod := range pods { @@ -274,19 +274,19 @@ func (env *Environment) EventuallyExpectBound(pods ...*v1.Pod) { }).Should(Succeed()) } -func (env *Environment) EventuallyExpectHealthy(pods ...*v1.Pod) { +func (env *Environment) EventuallyExpectHealthy(pods ...*corev1.Pod) { GinkgoHelper() env.EventuallyExpectHealthyWithTimeout(-1, pods...) } -func (env *Environment) EventuallyExpectHealthyWithTimeout(timeout time.Duration, pods ...*v1.Pod) { +func (env *Environment) EventuallyExpectHealthyWithTimeout(timeout time.Duration, pods ...*corev1.Pod) { GinkgoHelper() Eventually(func(g Gomega) { for _, pod := range pods { g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(pod), pod)).To(Succeed()) g.Expect(pod.Status.Conditions).To(ContainElement(And( - HaveField("Type", Equal(v1.PodReady)), - HaveField("Status", Equal(v1.ConditionTrue)), + HaveField("Type", Equal(corev1.PodReady)), + HaveField("Status", Equal(corev1.ConditionTrue)), ))) } }).WithTimeout(timeout).Should(Succeed()) @@ -307,7 +307,7 @@ func (env *Environment) ExpectKarpenterLeaseOwnerChanged() { pods := env.ExpectKarpenterPods() Eventually(func(g Gomega) { name := env.ExpectActiveKarpenterPodName() - g.Expect(lo.ContainsBy(pods, func(p *v1.Pod) bool { + g.Expect(lo.ContainsBy(pods, func(p *corev1.Pod) bool { return p.Name == name })).To(BeTrue()) }).Should(Succeed()) @@ -328,29 +328,29 @@ func (env *Environment) EventuallyExpectRollout(name, namespace string) { By("waiting for the newly generated deployment to rollout") Eventually(func(g Gomega) { - podList := &v1.PodList{} + podList := &corev1.PodList{} g.Expect(env.Client.List(env.Context, podList, client.InNamespace(namespace))).To(Succeed()) - pods := lo.Filter(podList.Items, func(p v1.Pod, _ int) bool { + pods := lo.Filter(podList.Items, func(p corev1.Pod, _ int) bool { return p.Annotations["kubectl.kubernetes.io/restartedAt"] == restartedAtAnnotation["kubectl.kubernetes.io/restartedAt"] }) g.Expect(len(pods)).To(BeNumerically("==", lo.FromPtr(deploy.Spec.Replicas))) for _, pod := range pods { g.Expect(pod.Status.Conditions).To(ContainElement(And( - HaveField("Type", Equal(v1.PodReady)), - HaveField("Status", Equal(v1.ConditionTrue)), + HaveField("Type", Equal(corev1.PodReady)), + HaveField("Status", Equal(corev1.ConditionTrue)), ))) - g.Expect(pod.Status.Phase).To(Equal(v1.PodRunning)) + g.Expect(pod.Status.Phase).To(Equal(corev1.PodRunning)) } }).Should(Succeed()) } -func (env *Environment) ExpectKarpenterPods() []*v1.Pod { +func (env *Environment) ExpectKarpenterPods() []*corev1.Pod { GinkgoHelper() - podList := &v1.PodList{} + podList := &corev1.PodList{} Expect(env.Client.List(env.Context, podList, client.MatchingLabels{ "app.kubernetes.io/instance": "karpenter", })).To(Succeed()) - return lo.Map(podList.Items, func(p v1.Pod, _ int) *v1.Pod { return &p }) + return lo.Map(podList.Items, func(p corev1.Pod, _ int) *corev1.Pod { return &p }) } func (env *Environment) ExpectActiveKarpenterPodName() string { @@ -365,11 +365,11 @@ func (env *Environment) ExpectActiveKarpenterPodName() string { return holderArr[0] } -func (env *Environment) ExpectActiveKarpenterPod() *v1.Pod { +func (env *Environment) ExpectActiveKarpenterPod() *corev1.Pod { GinkgoHelper() podName := env.ExpectActiveKarpenterPodName() - pod := &v1.Pod{} + pod := &corev1.Pod{} Expect(env.Client.Get(env.Context, types.NamespacedName{Name: podName, Namespace: "kube-system"}, pod)).To(Succeed()) return pod } @@ -381,12 +381,12 @@ func (env *Environment) EventuallyExpectPendingPodCount(selector labels.Selector }).Should(Succeed()) } -func (env *Environment) EventuallyExpectBoundPodCount(selector labels.Selector, numPods int) []*v1.Pod { +func (env *Environment) EventuallyExpectBoundPodCount(selector labels.Selector, numPods int) []*corev1.Pod { GinkgoHelper() - var res []*v1.Pod + var res []*corev1.Pod Eventually(func(g Gomega) { - res = []*v1.Pod{} - podList := &v1.PodList{} + res = []*corev1.Pod{} + podList := &corev1.PodList{} g.Expect(env.Client.List(env.Context, podList, client.MatchingLabelsSelector{Selector: selector})).To(Succeed()) for i := range podList.Items { if podList.Items[i].Spec.NodeName != "" { @@ -398,15 +398,15 @@ func (env *Environment) EventuallyExpectBoundPodCount(selector labels.Selector, return res } -func (env *Environment) EventuallyExpectHealthyPodCount(selector labels.Selector, numPods int) []*v1.Pod { +func (env *Environment) EventuallyExpectHealthyPodCount(selector labels.Selector, numPods int) []*corev1.Pod { By(fmt.Sprintf("waiting for %d pods matching selector %s to be ready", numPods, selector.String())) GinkgoHelper() return env.EventuallyExpectHealthyPodCountWithTimeout(-1, selector, numPods) } -func (env *Environment) EventuallyExpectHealthyPodCountWithTimeout(timeout time.Duration, selector labels.Selector, numPods int) []*v1.Pod { +func (env *Environment) EventuallyExpectHealthyPodCountWithTimeout(timeout time.Duration, selector labels.Selector, numPods int) []*corev1.Pod { GinkgoHelper() - var pods []*v1.Pod + var pods []*corev1.Pod Eventually(func(g Gomega) { pods = env.Monitor.RunningPods(selector) g.Expect(pods).To(HaveLen(numPods)) @@ -414,10 +414,10 @@ func (env *Environment) EventuallyExpectHealthyPodCountWithTimeout(timeout time. return pods } -func (env *Environment) ExpectPodsMatchingSelector(selector labels.Selector) []*v1.Pod { +func (env *Environment) ExpectPodsMatchingSelector(selector labels.Selector) []*corev1.Pod { GinkgoHelper() - podList := &v1.PodList{} + podList := &corev1.PodList{} Expect(env.Client.List(env.Context, podList, client.MatchingLabelsSelector{Selector: selector})).To(Succeed()) return lo.ToSlicePtr(podList.Items) } @@ -457,7 +457,7 @@ func (env *Environment) EventuallyExpectNotFoundAssertion(objects ...client.Obje }) } -func (env *Environment) ExpectCreatedNodeCount(comparator string, count int) []*v1.Node { +func (env *Environment) ExpectCreatedNodeCount(comparator string, count int) []*corev1.Node { GinkgoHelper() createdNodes := env.Monitor.CreatedNodes() Expect(len(createdNodes)).To(BeNumerically(comparator, count), @@ -468,7 +468,7 @@ func (env *Environment) ExpectCreatedNodeCount(comparator string, count int) []* func (env *Environment) ExpectNodeCount(comparator string, count int) { GinkgoHelper() - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Expect(env.Client.List(env, nodeList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) Expect(len(nodeList.Items)).To(BeNumerically(comparator, count)) } @@ -476,27 +476,27 @@ func (env *Environment) ExpectNodeCount(comparator string, count int) { func (env *Environment) ExpectNodeClaimCount(comparator string, count int) { GinkgoHelper() - nodeClaimList := &corev1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} Expect(env.Client.List(env, nodeClaimList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) Expect(len(nodeClaimList.Items)).To(BeNumerically(comparator, count)) } -func NodeClaimNames(nodeClaims []*corev1beta1.NodeClaim) []string { - return lo.Map(nodeClaims, func(n *corev1beta1.NodeClaim, index int) string { +func NodeClaimNames(nodeClaims []*karpv1.NodeClaim) []string { + return lo.Map(nodeClaims, func(n *karpv1.NodeClaim, index int) string { return n.Name }) } -func NodeNames(nodes []*v1.Node) []string { - return lo.Map(nodes, func(n *v1.Node, index int) string { +func NodeNames(nodes []*corev1.Node) []string { + return lo.Map(nodes, func(n *corev1.Node, index int) string { return n.Name }) } -func (env *Environment) ConsistentlyExpectNodeCount(comparator string, count int, duration time.Duration) []*v1.Node { +func (env *Environment) ConsistentlyExpectNodeCount(comparator string, count int, duration time.Duration) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("expecting nodes to be %s to %d for %s", comparator, count, duration)) - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Consistently(func(g Gomega) { g.Expect(env.Client.List(env, nodeList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) g.Expect(len(nodeList.Items)).To(BeNumerically(comparator, count), @@ -505,28 +505,28 @@ func (env *Environment) ConsistentlyExpectNodeCount(comparator string, count int return lo.ToSlicePtr(nodeList.Items) } -func (env *Environment) ConsistentlyExpectNoDisruptions(nodeCount int, duration time.Duration) (taintedNodes []*v1.Node) { +func (env *Environment) ConsistentlyExpectNoDisruptions(nodeCount int, duration time.Duration) (taintedNodes []*corev1.Node) { GinkgoHelper() return env.ConsistentlyExpectDisruptionsWithNodeCount(0, nodeCount, duration) } // ConsistentlyExpectDisruptionsWithNodeCount will continually ensure that there are exactly disruptingNodes with totalNodes (including replacements and existing nodes) -func (env *Environment) ConsistentlyExpectDisruptionsWithNodeCount(disruptingNodes, totalNodes int, duration time.Duration) (taintedNodes []*v1.Node) { +func (env *Environment) ConsistentlyExpectDisruptionsWithNodeCount(disruptingNodes, totalNodes int, duration time.Duration) (taintedNodes []*corev1.Node) { GinkgoHelper() - nodes := []v1.Node{} + nodes := []corev1.Node{} Consistently(func(g Gomega) { // Ensure we don't change our NodeClaims - nodeClaimList := &corev1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} g.Expect(env.Client.List(env, nodeClaimList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) g.Expect(nodeClaimList.Items).To(HaveLen(totalNodes)) - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} g.Expect(env.Client.List(env, nodeList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) g.Expect(nodeList.Items).To(HaveLen(totalNodes)) - nodes = lo.Filter(nodeList.Items, func(n v1.Node, _ int) bool { - _, ok := lo.Find(n.Spec.Taints, func(t v1.Taint) bool { - return corev1beta1.IsDisruptingTaint(t) + nodes = lo.Filter(nodeList.Items, func(n corev1.Node, _ int) bool { + _, ok := lo.Find(n.Spec.Taints, func(t corev1.Taint) bool { + return karpv1.IsDisruptingTaint(t) }) return ok }) @@ -535,10 +535,10 @@ func (env *Environment) ConsistentlyExpectDisruptionsWithNodeCount(disruptingNod return lo.ToSlicePtr(nodes) } -func (env *Environment) EventuallyExpectTaintedNodeCount(comparator string, count int) []*v1.Node { +func (env *Environment) EventuallyExpectTaintedNodeCount(comparator string, count int) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for tainted nodes to be %s to %d", comparator, count)) - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Eventually(func(g Gomega) { g.Expect(env.Client.List(env, nodeList, client.MatchingFields{"spec.taints[*].karpenter.sh/disruption": "disrupting"})).To(Succeed()) g.Expect(len(nodeList.Items)).To(BeNumerically(comparator, count), @@ -547,21 +547,21 @@ func (env *Environment) EventuallyExpectTaintedNodeCount(comparator string, coun return lo.ToSlicePtr(nodeList.Items) } -func (env *Environment) EventuallyExpectNodesUntaintedWithTimeout(timeout time.Duration, nodes ...*v1.Node) { +func (env *Environment) EventuallyExpectNodesUntaintedWithTimeout(timeout time.Duration, nodes ...*corev1.Node) { GinkgoHelper() By(fmt.Sprintf("waiting for %d nodes to be untainted", len(nodes))) - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Eventually(func(g Gomega) { g.Expect(env.Client.List(env, nodeList, client.MatchingFields{"spec.taints[*].karpenter.sh/disruption": "disrupting"})).To(Succeed()) - taintedNodeNames := lo.Map(nodeList.Items, func(n v1.Node, _ int) string { return n.Name }) - g.Expect(taintedNodeNames).ToNot(ContainElements(lo.Map(nodes, func(n *v1.Node, _ int) interface{} { return n.Name })...)) + taintedNodeNames := lo.Map(nodeList.Items, func(n corev1.Node, _ int) string { return n.Name }) + g.Expect(taintedNodeNames).ToNot(ContainElements(lo.Map(nodes, func(n *corev1.Node, _ int) interface{} { return n.Name })...)) }).WithTimeout(timeout).Should(Succeed()) } -func (env *Environment) EventuallyExpectNodeClaimCount(comparator string, count int) []*corev1beta1.NodeClaim { +func (env *Environment) EventuallyExpectNodeClaimCount(comparator string, count int) []*karpv1.NodeClaim { GinkgoHelper() By(fmt.Sprintf("waiting for nodes to be %s to %d", comparator, count)) - nodeClaimList := &corev1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} Eventually(func(g Gomega) { g.Expect(env.Client.List(env, nodeClaimList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) g.Expect(len(nodeClaimList.Items)).To(BeNumerically(comparator, count), @@ -570,10 +570,10 @@ func (env *Environment) EventuallyExpectNodeClaimCount(comparator string, count return lo.ToSlicePtr(nodeClaimList.Items) } -func (env *Environment) EventuallyExpectNodeCount(comparator string, count int) []*v1.Node { +func (env *Environment) EventuallyExpectNodeCount(comparator string, count int) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for nodes to be %s to %d", comparator, count)) - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Eventually(func(g Gomega) { g.Expect(env.Client.List(env, nodeList, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) g.Expect(len(nodeList.Items)).To(BeNumerically(comparator, count), @@ -582,10 +582,10 @@ func (env *Environment) EventuallyExpectNodeCount(comparator string, count int) return lo.ToSlicePtr(nodeList.Items) } -func (env *Environment) EventuallyExpectNodeCountWithSelector(comparator string, count int, selector labels.Selector) []*v1.Node { +func (env *Environment) EventuallyExpectNodeCountWithSelector(comparator string, count int, selector labels.Selector) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for nodes with selector %v to be %s to %d", selector, comparator, count)) - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Eventually(func(g Gomega) { g.Expect(env.Client.List(env, nodeList, client.HasLabels{test.DiscoveryLabel}, client.MatchingLabelsSelector{Selector: selector})).To(Succeed()) g.Expect(len(nodeList.Items)).To(BeNumerically(comparator, count), @@ -594,10 +594,10 @@ func (env *Environment) EventuallyExpectNodeCountWithSelector(comparator string, return lo.ToSlicePtr(nodeList.Items) } -func (env *Environment) EventuallyExpectCreatedNodeCount(comparator string, count int) []*v1.Node { +func (env *Environment) EventuallyExpectCreatedNodeCount(comparator string, count int) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for created nodes to be %s to %d", comparator, count)) - var createdNodes []*v1.Node + var createdNodes []*corev1.Node Eventually(func(g Gomega) { createdNodes = env.Monitor.CreatedNodes() g.Expect(len(createdNodes)).To(BeNumerically(comparator, count), @@ -606,10 +606,10 @@ func (env *Environment) EventuallyExpectCreatedNodeCount(comparator string, coun return createdNodes } -func (env *Environment) EventuallyExpectDeletedNodeCount(comparator string, count int) []*v1.Node { +func (env *Environment) EventuallyExpectDeletedNodeCount(comparator string, count int) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for deleted nodes to be %s to %d", comparator, count)) - var deletedNodes []*v1.Node + var deletedNodes []*corev1.Node Eventually(func(g Gomega) { deletedNodes = env.Monitor.DeletedNodes() g.Expect(len(deletedNodes)).To(BeNumerically(comparator, count), @@ -618,13 +618,13 @@ func (env *Environment) EventuallyExpectDeletedNodeCount(comparator string, coun return deletedNodes } -func (env *Environment) EventuallyExpectDeletedNodeCountWithSelector(comparator string, count int, selector labels.Selector) []*v1.Node { +func (env *Environment) EventuallyExpectDeletedNodeCountWithSelector(comparator string, count int, selector labels.Selector) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for deleted nodes with selector %v to be %s to %d", selector, comparator, count)) - var deletedNodes []*v1.Node + var deletedNodes []*corev1.Node Eventually(func(g Gomega) { deletedNodes = env.Monitor.DeletedNodes() - deletedNodes = lo.Filter(deletedNodes, func(n *v1.Node, _ int) bool { + deletedNodes = lo.Filter(deletedNodes, func(n *corev1.Node, _ int) bool { return selector.Matches(labels.Set(n.Labels)) }) g.Expect(len(deletedNodes)).To(BeNumerically(comparator, count), @@ -633,79 +633,79 @@ func (env *Environment) EventuallyExpectDeletedNodeCountWithSelector(comparator return deletedNodes } -func (env *Environment) EventuallyExpectInitializedNodeCount(comparator string, count int) []*v1.Node { +func (env *Environment) EventuallyExpectInitializedNodeCount(comparator string, count int) []*corev1.Node { GinkgoHelper() By(fmt.Sprintf("waiting for initialized nodes to be %s to %d", comparator, count)) - var nodes []*v1.Node + var nodes []*corev1.Node Eventually(func(g Gomega) { nodes = env.Monitor.CreatedNodes() - nodes = lo.Filter(nodes, func(n *v1.Node, _ int) bool { - return n.Labels[corev1beta1.NodeInitializedLabelKey] == "true" + nodes = lo.Filter(nodes, func(n *corev1.Node, _ int) bool { + return n.Labels[karpv1.NodeInitializedLabelKey] == "true" }) g.Expect(len(nodes)).To(BeNumerically(comparator, count)) }).Should(Succeed()) return nodes } -func (env *Environment) EventuallyExpectCreatedNodeClaimCount(comparator string, count int) []*corev1beta1.NodeClaim { +func (env *Environment) EventuallyExpectCreatedNodeClaimCount(comparator string, count int) []*karpv1.NodeClaim { GinkgoHelper() By(fmt.Sprintf("waiting for created nodeclaims to be %s to %d", comparator, count)) - nodeClaimList := &corev1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} Eventually(func(g Gomega) { g.Expect(env.Client.List(env.Context, nodeClaimList)).To(Succeed()) g.Expect(len(nodeClaimList.Items)).To(BeNumerically(comparator, count)) }).Should(Succeed()) - return lo.Map(nodeClaimList.Items, func(nc corev1beta1.NodeClaim, _ int) *corev1beta1.NodeClaim { + return lo.Map(nodeClaimList.Items, func(nc karpv1.NodeClaim, _ int) *karpv1.NodeClaim { return &nc }) } -func (env *Environment) EventuallyExpectNodeClaimsReady(nodeClaims ...*corev1beta1.NodeClaim) { +func (env *Environment) EventuallyExpectNodeClaimsReady(nodeClaims ...*karpv1.NodeClaim) { GinkgoHelper() Eventually(func(g Gomega) { for _, nc := range nodeClaims { - temp := &corev1beta1.NodeClaim{} + temp := &karpv1.NodeClaim{} g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nc), temp)).Should(Succeed()) g.Expect(temp.StatusConditions().Root().IsTrue()).To(BeTrue()) } }).Should(Succeed()) } -func (env *Environment) EventuallyExpectDrifted(nodeClaims ...*corev1beta1.NodeClaim) { +func (env *Environment) EventuallyExpectDrifted(nodeClaims ...*karpv1.NodeClaim) { GinkgoHelper() Eventually(func(g Gomega) { for _, nc := range nodeClaims { g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nc), nc)).To(Succeed()) - g.Expect(nc.StatusConditions().Get(corev1beta1.ConditionTypeDrifted).IsTrue()).To(BeTrue()) + g.Expect(nc.StatusConditions().Get(karpv1.ConditionTypeDrifted).IsTrue()).To(BeTrue()) } }).Should(Succeed()) } -func (env *Environment) ConsistentlyExpectNodeClaimsNotDrifted(duration time.Duration, nodeClaims ...*corev1beta1.NodeClaim) { +func (env *Environment) ConsistentlyExpectNodeClaimsNotDrifted(duration time.Duration, nodeClaims ...*karpv1.NodeClaim) { GinkgoHelper() - nodeClaimNames := lo.Map(nodeClaims, func(nc *corev1beta1.NodeClaim, _ int) string { return nc.Name }) + nodeClaimNames := lo.Map(nodeClaims, func(nc *karpv1.NodeClaim, _ int) string { return nc.Name }) By(fmt.Sprintf("consistently expect nodeclaims %s not to be drifted for %s", nodeClaimNames, duration)) Consistently(func(g Gomega) { for _, nc := range nodeClaims { g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nc), nc)).To(Succeed()) - g.Expect(nc.StatusConditions().Get(corev1beta1.ConditionTypeDrifted)).To(BeNil()) + g.Expect(nc.StatusConditions().Get(karpv1.ConditionTypeDrifted)).To(BeNil()) } }, duration).Should(Succeed()) } -func (env *Environment) EventuallyExpectEmpty(nodeClaims ...*corev1beta1.NodeClaim) { +func (env *Environment) EventuallyExpectEmpty(nodeClaims ...*karpv1.NodeClaim) { GinkgoHelper() Eventually(func(g Gomega) { for _, nc := range nodeClaims { g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nc), nc)).To(Succeed()) - g.Expect(nc.StatusConditions().Get(corev1beta1.ConditionTypeEmpty).IsTrue()).To(BeTrue()) + g.Expect(nc.StatusConditions().Get(karpv1.ConditionTypeEmpty).IsTrue()).To(BeTrue()) } }).Should(Succeed()) } -func (env *Environment) GetNode(nodeName string) v1.Node { +func (env *Environment) GetNode(nodeName string) corev1.Node { GinkgoHelper() - var node v1.Node + var node corev1.Node Expect(env.Client.Get(env.Context, types.NamespacedName{Name: nodeName}, &node)).To(Succeed()) return node } @@ -723,7 +723,7 @@ var ( lastLogged = metav1.Now() ) -func (env *Environment) printControllerLogs(options *v1.PodLogOptions) { +func (env *Environment) printControllerLogs(options *corev1.PodLogOptions) { fmt.Println("------- START CONTROLLER LOGS -------") defer fmt.Println("------- END CONTROLLER LOGS -------") @@ -752,14 +752,14 @@ func (env *Environment) printControllerLogs(options *v1.PodLogOptions) { } } -func (env *Environment) EventuallyExpectMinUtilization(resource v1.ResourceName, comparator string, value float64) { +func (env *Environment) EventuallyExpectMinUtilization(resource corev1.ResourceName, comparator string, value float64) { GinkgoHelper() Eventually(func(g Gomega) { g.Expect(env.Monitor.MinUtilization(resource)).To(BeNumerically(comparator, value)) }).Should(Succeed()) } -func (env *Environment) EventuallyExpectAvgUtilization(resource v1.ResourceName, comparator string, value float64) { +func (env *Environment) EventuallyExpectAvgUtilization(resource corev1.ResourceName, comparator string, value float64) { GinkgoHelper() Eventually(func(g Gomega) { g.Expect(env.Monitor.AvgUtilization(resource)).To(BeNumerically(comparator, value)) @@ -782,12 +782,12 @@ func (env *Environment) ExpectDaemonSetEnvironmentVariableUpdated(obj client.Obj continue } // If the env var already exists, update its value. Otherwise, create a new var. - if _, i, ok := lo.FindIndexOf(c.Env, func(e v1.EnvVar) bool { + if _, i, ok := lo.FindIndexOf(c.Env, func(e corev1.EnvVar) bool { return e.Name == name }); ok { c.Env[i].Value = value } else { - c.Env = append(c.Env, v1.EnvVar{Name: name, Value: value}) + c.Env = append(c.Env, corev1.EnvVar{Name: name, Value: value}) } } Expect(env.Client.Patch(env.Context, ds, patch)).To(Succeed()) @@ -796,7 +796,7 @@ func (env *Environment) ExpectDaemonSetEnvironmentVariableUpdated(obj client.Obj // ForcePodsToSpread ensures that currently scheduled pods get spread evenly across all passed nodes by deleting pods off of existing // nodes and waiting them to reschedule. This is useful for scenarios where you want to force the nodes be underutilized // but you want to keep a consistent count of nodes rather than leaving around empty ones. -func (env *Environment) ForcePodsToSpread(nodes ...*v1.Node) { +func (env *Environment) ForcePodsToSpread(nodes ...*corev1.Node) { GinkgoHelper() // Get the total count of pods across @@ -809,8 +809,8 @@ func (env *Environment) ForcePodsToSpread(nodes ...*v1.Node) { By(fmt.Sprintf("forcing %d pods to spread across %d nodes", podCount, len(nodes))) start := time.Now() for { - var nodePods []*v1.Pod - node, found := lo.Find(nodes, func(n *v1.Node) bool { + var nodePods []*corev1.Pod + node, found := lo.Find(nodes, func(n *corev1.Node) bool { nodePods = env.ExpectActivePodsForNode(n.Name) return len(nodePods) > maxPodsPerNode }) @@ -843,12 +843,12 @@ func (env *Environment) ForcePodsToSpread(nodes ...*v1.Node) { } } -func (env *Environment) ExpectActivePodsForNode(nodeName string) []*v1.Pod { +func (env *Environment) ExpectActivePodsForNode(nodeName string) []*corev1.Pod { GinkgoHelper() - podList := &v1.PodList{} + podList := &corev1.PodList{} Expect(env.Client.List(env, podList, client.MatchingFields{"spec.nodeName": nodeName}, client.HasLabels{test.DiscoveryLabel})).To(Succeed()) - return lo.Filter(lo.ToSlicePtr(podList.Items), func(p *v1.Pod, _ int) bool { + return lo.Filter(lo.ToSlicePtr(podList.Items), func(p *corev1.Pod, _ int) bool { return p.DeletionTimestamp.IsZero() }) } @@ -867,7 +867,7 @@ func (env *Environment) ExpectCABundle() string { return base64.StdEncoding.EncodeToString(transportConfig.TLS.CAData) } -func (env *Environment) GetDaemonSetCount(np *corev1beta1.NodePool) int { +func (env *Environment) GetDaemonSetCount(np *karpv1.NodePool) int { GinkgoHelper() // Performs the same logic as the scheduler to get the number of daemonset @@ -876,7 +876,7 @@ func (env *Environment) GetDaemonSetCount(np *corev1beta1.NodePool) int { Expect(env.Client.List(env.Context, daemonSetList)).To(Succeed()) return lo.CountBy(daemonSetList.Items, func(d appsv1.DaemonSet) bool { - p := &v1.Pod{Spec: d.Spec.Template.Spec} + p := &corev1.Pod{Spec: d.Spec.Template.Spec} nodeClaimTemplate := pscheduling.NewNodeClaimTemplate(np) if err := scheduling.Taints(nodeClaimTemplate.Spec.Taints).Tolerates(p); err != nil { return false @@ -888,7 +888,7 @@ func (env *Environment) GetDaemonSetCount(np *corev1beta1.NodePool) int { }) } -func (env *Environment) GetDaemonSetOverhead(np *corev1beta1.NodePool) v1.ResourceList { +func (env *Environment) GetDaemonSetOverhead(np *karpv1.NodePool) corev1.ResourceList { GinkgoHelper() // Performs the same logic as the scheduler to get the number of daemonset @@ -896,8 +896,8 @@ func (env *Environment) GetDaemonSetOverhead(np *corev1beta1.NodePool) v1.Resour daemonSetList := &appsv1.DaemonSetList{} Expect(env.Client.List(env.Context, daemonSetList)).To(Succeed()) - return coreresources.RequestsForPods(lo.FilterMap(daemonSetList.Items, func(ds appsv1.DaemonSet, _ int) (*v1.Pod, bool) { - p := &v1.Pod{Spec: ds.Spec.Template.Spec} + return coreresources.RequestsForPods(lo.FilterMap(daemonSetList.Items, func(ds appsv1.DaemonSet, _ int) (*corev1.Pod, bool) { + p := &corev1.Pod{Spec: ds.Spec.Template.Spec} nodeClaimTemplate := pscheduling.NewNodeClaimTemplate(np) if err := scheduling.Taints(nodeClaimTemplate.Spec.Taints).Tolerates(p); err != nil { return nil, false diff --git a/test/pkg/environment/common/monitor.go b/test/pkg/environment/common/monitor.go index 3bfd2e64e42a..bf43ce71599b 100644 --- a/test/pkg/environment/common/monitor.go +++ b/test/pkg/environment/common/monitor.go @@ -20,7 +20,7 @@ import ( "math" "sync" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,7 +28,7 @@ import ( "github.com/samber/lo" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/utils/resources" ) @@ -39,21 +39,21 @@ type Monitor struct { mu sync.RWMutex - nodesAtReset map[string]*v1.Node + nodesAtReset map[string]*corev1.Node } type state struct { - pods v1.PodList - nodes map[string]*v1.Node // node name -> node - nodePods map[string][]*v1.Pod // node name -> pods bound to the node - nodeRequests map[string]v1.ResourceList // node name -> sum of pod resource requests + pods corev1.PodList + nodes map[string]*corev1.Node // node name -> node + nodePods map[string][]*corev1.Pod // node name -> pods bound to the node + nodeRequests map[string]corev1.ResourceList // node name -> sum of pod resource requests } func NewMonitor(ctx context.Context, kubeClient client.Client) *Monitor { m := &Monitor{ ctx: ctx, kubeClient: kubeClient, - nodesAtReset: map[string]*v1.Node{}, + nodesAtReset: map[string]*corev1.Node{}, } m.Reset() return m @@ -105,36 +105,36 @@ func (m *Monitor) CreatedNodeCount() int { } // NodesAtReset returns a slice of nodes that the monitor saw at the last reset -func (m *Monitor) NodesAtReset() []*v1.Node { +func (m *Monitor) NodesAtReset() []*corev1.Node { m.mu.RLock() defer m.mu.RUnlock() return deepCopySlice(lo.Values(m.nodesAtReset)) } // Nodes returns all the nodes on the cluster -func (m *Monitor) Nodes() []*v1.Node { +func (m *Monitor) Nodes() []*corev1.Node { st := m.poll() return lo.Values(st.nodes) } // CreatedNodes returns the nodes that have been created since the last reset (essentially Nodes - NodesAtReset) -func (m *Monitor) CreatedNodes() []*v1.Node { - resetNodeNames := sets.NewString(lo.Map(m.NodesAtReset(), func(n *v1.Node, _ int) string { return n.Name })...) - return lo.Filter(m.Nodes(), func(n *v1.Node, _ int) bool { return !resetNodeNames.Has(n.Name) }) +func (m *Monitor) CreatedNodes() []*corev1.Node { + resetNodeNames := sets.NewString(lo.Map(m.NodesAtReset(), func(n *corev1.Node, _ int) string { return n.Name })...) + return lo.Filter(m.Nodes(), func(n *corev1.Node, _ int) bool { return !resetNodeNames.Has(n.Name) }) } // DeletedNodes returns the nodes that have been deleted since the last reset (essentially NodesAtReset - Nodes) -func (m *Monitor) DeletedNodes() []*v1.Node { - currentNodeNames := sets.NewString(lo.Map(m.Nodes(), func(n *v1.Node, _ int) string { return n.Name })...) - return lo.Filter(m.NodesAtReset(), func(n *v1.Node, _ int) bool { return !currentNodeNames.Has(n.Name) }) +func (m *Monitor) DeletedNodes() []*corev1.Node { + currentNodeNames := sets.NewString(lo.Map(m.Nodes(), func(n *corev1.Node, _ int) string { return n.Name })...) + return lo.Filter(m.NodesAtReset(), func(n *corev1.Node, _ int) bool { return !currentNodeNames.Has(n.Name) }) } // PendingPods returns the number of pending pods matching the given selector -func (m *Monitor) PendingPods(selector labels.Selector) []*v1.Pod { - var pods []*v1.Pod +func (m *Monitor) PendingPods(selector labels.Selector) []*corev1.Pod { + var pods []*corev1.Pod for _, pod := range m.poll().pods.Items { pod := pod - if pod.Status.Phase != v1.PodPending { + if pod.Status.Phase != corev1.PodPending { continue } if selector.Matches(labels.Set(pod.Labels)) { @@ -149,11 +149,11 @@ func (m *Monitor) PendingPodsCount(selector labels.Selector) int { } // RunningPods returns the number of running pods matching the given selector -func (m *Monitor) RunningPods(selector labels.Selector) []*v1.Pod { - var pods []*v1.Pod +func (m *Monitor) RunningPods(selector labels.Selector) []*corev1.Pod { + var pods []*corev1.Pod for _, pod := range m.poll().pods.Items { pod := pod - if pod.Status.Phase != v1.PodRunning { + if pod.Status.Phase != corev1.PodRunning { continue } if selector.Matches(labels.Set(pod.Labels)) { @@ -168,19 +168,19 @@ func (m *Monitor) RunningPodsCount(selector labels.Selector) int { } func (m *Monitor) poll() state { - var nodes v1.NodeList + var nodes corev1.NodeList if err := m.kubeClient.List(m.ctx, &nodes); err != nil { log.FromContext(m.ctx).Error(err, "failed listing nodes") } - var pods v1.PodList + var pods corev1.PodList if err := m.kubeClient.List(m.ctx, &pods); err != nil { log.FromContext(m.ctx).Error(err, "failing listing pods") } st := state{ - nodes: map[string]*v1.Node{}, + nodes: map[string]*corev1.Node{}, pods: pods, - nodePods: map[string][]*v1.Pod{}, - nodeRequests: map[string]v1.ResourceList{}, + nodePods: map[string][]*corev1.Pod{}, + nodeRequests: map[string]corev1.ResourceList{}, } for i := range nodes.Items { st.nodes[nodes.Items[i].Name] = &nodes.Items[i] @@ -200,7 +200,7 @@ func (m *Monitor) poll() state { return st } -func (m *Monitor) AvgUtilization(resource v1.ResourceName) float64 { +func (m *Monitor) AvgUtilization(resource corev1.ResourceName) float64 { utilization := m.nodeUtilization(resource) sum := 0.0 for _, v := range utilization { @@ -209,7 +209,7 @@ func (m *Monitor) AvgUtilization(resource v1.ResourceName) float64 { return sum / float64(len(utilization)) } -func (m *Monitor) MinUtilization(resource v1.ResourceName) float64 { +func (m *Monitor) MinUtilization(resource corev1.ResourceName) float64 { min := math.MaxFloat64 for _, v := range m.nodeUtilization(resource) { min = math.Min(v, min) @@ -217,13 +217,13 @@ func (m *Monitor) MinUtilization(resource v1.ResourceName) float64 { return min } -func (m *Monitor) nodeUtilization(resource v1.ResourceName) []float64 { +func (m *Monitor) nodeUtilization(resource corev1.ResourceName) []float64 { st := m.poll() var utilization []float64 for nodeName, requests := range st.nodeRequests { allocatable := st.nodes[nodeName].Status.Allocatable[resource] // skip any nodes we didn't launch - if st.nodes[nodeName].Labels[v1beta1.NodePoolLabelKey] == "" { + if st.nodes[nodeName].Labels[karpv1.NodePoolLabelKey] == "" { continue } if allocatable.IsZero() { diff --git a/test/pkg/environment/common/setup.go b/test/pkg/environment/common/setup.go index 2c029a57c7c5..acc4a2610dbf 100644 --- a/test/pkg/environment/common/setup.go +++ b/test/pkg/environment/common/setup.go @@ -21,7 +21,7 @@ import ( "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" schedulingv1 "k8s.io/api/scheduling/v1" storagev1 "k8s.io/api/storage/v1" @@ -31,11 +31,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" "sigs.k8s.io/karpenter/pkg/utils/pod" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" . "github.com/onsi/ginkgo/v2" @@ -46,19 +46,19 @@ const TestingFinalizer = "testing/finalizer" var ( CleanableObjects = []client.Object{ - &v1.Pod{}, + &corev1.Pod{}, &appsv1.Deployment{}, &appsv1.DaemonSet{}, &policyv1.PodDisruptionBudget{}, - &v1.PersistentVolumeClaim{}, - &v1.PersistentVolume{}, + &corev1.PersistentVolumeClaim{}, + &corev1.PersistentVolume{}, &storagev1.StorageClass{}, - &corev1beta1.NodePool{}, - &v1.LimitRange{}, + &karpv1.NodePool{}, + &corev1.LimitRange{}, &schedulingv1.PriorityClass{}, - &v1.Node{}, - &corev1beta1.NodeClaim{}, - &v1beta1.EC2NodeClass{}, + &corev1.Node{}, + &karpv1.NodeClaim{}, + &v1.EC2NodeClass{}, } ) @@ -74,14 +74,14 @@ func (env *Environment) BeforeEach() { } func (env *Environment) ExpectCleanCluster() { - var nodes v1.NodeList + var nodes corev1.NodeList Expect(env.Client.List(env.Context, &nodes)).To(Succeed()) for _, node := range nodes.Items { if len(node.Spec.Taints) == 0 && !node.Spec.Unschedulable { Fail(fmt.Sprintf("expected system pool node %s to be tainted", node.Name)) } } - var pods v1.PodList + var pods corev1.PodList Expect(env.Client.List(env.Context, &pods)).To(Succeed()) for i := range pods.Items { Expect(pod.IsProvisionable(&pods.Items[i])).To(BeFalse(), @@ -89,7 +89,7 @@ func (env *Environment) ExpectCleanCluster() { Expect(pods.Items[i].Namespace).ToNot(Equal("default"), fmt.Sprintf("expected no pods in the `default` namespace, found %s/%s", pods.Items[i].Namespace, pods.Items[i].Name)) } - for _, obj := range []client.Object{&corev1beta1.NodePool{}, &v1beta1.EC2NodeClass{}} { + for _, obj := range []client.Object{&karpv1.NodePool{}, &v1.EC2NodeClass{}} { metaList := &metav1.PartialObjectMetadataList{} gvk := lo.Must(apiutil.GVKForObject(obj, env.Client.Scheme())) metaList.SetGroupVersionKind(gvk) @@ -106,7 +106,7 @@ func (env *Environment) Cleanup() { func (env *Environment) AfterEach() { debug.AfterEach(env.Context) - env.printControllerLogs(&v1.PodLogOptions{Container: "controller"}) + env.printControllerLogs(&corev1.PodLogOptions{Container: "controller"}) } func (env *Environment) CleanupObjects(cleanableObjects ...client.Object) { diff --git a/test/suites/ami/suite_test.go b/test/suites/ami/suite_test.go index 35dc80071996..85e2519819e5 100644 --- a/test/suites/ami/suite_test.go +++ b/test/suites/ami/suite_test.go @@ -24,9 +24,9 @@ import ( awssdk "github.com/aws/aws-sdk-go/aws" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -35,7 +35,7 @@ import ( "github.com/awslabs/operatorpkg/status" . "github.com/awslabs/operatorpkg/test/expectations" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -47,8 +47,8 @@ import ( ) var env *environmentaws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestAMI(t *testing.T) { RegisterFailHandler(Fail) @@ -77,7 +77,7 @@ var _ = Describe("AMI", func() { It("should use the AMI defined by the AMI Selector Terms", func() { pod := coretest.Pod() - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: customAMI, }, @@ -91,7 +91,7 @@ var _ = Describe("AMI", func() { It("should use the most recent AMI when discovering multiple", func() { // choose an old static image that will definitely have an older creation date oldCustomAMI := env.GetAMIBySSMPath(fmt.Sprintf("/aws/service/eks/optimized-ami/%[1]s/amazon-linux-2023/x86_64/standard/amazon-eks-node-al2023-x86_64-standard-%[1]s-v20240514/image_id", env.K8sVersion())) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: customAMI, }, @@ -113,7 +113,7 @@ var _ = Describe("AMI", func() { }) Expect(err).To(BeNil()) Expect(output.Images).To(HaveLen(1)) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Name: *output.Images[0].Name, Owner: "fakeOwnerValue", @@ -132,7 +132,7 @@ var _ = Describe("AMI", func() { Expect(err).To(BeNil()) Expect(output.Images).To(HaveLen(1)) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Name: *output.Images[0].Name, }, @@ -146,7 +146,7 @@ var _ = Describe("AMI", func() { env.ExpectInstance(pod.Spec.NodeName).To(HaveField("ImageId", HaveValue(Equal(customAMI)))) }) It("should support ami selector ids", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: customAMI, }, @@ -163,34 +163,34 @@ var _ = Describe("AMI", func() { Context("AMIFamily", func() { It("should provision a node using the AL2 family", func() { pod := coretest.Pod() - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 env.ExpectCreated(nodeClass, nodePool, pod) env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) }) It("should provision a node using the AL2023 family", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 pod := coretest.Pod() env.ExpectCreated(nodeClass, nodePool, pod) env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) }) It("should provision a node using the Bottlerocket family", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket pod := coretest.Pod() env.ExpectCreated(nodeClass, nodePool, pod) env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) }) It("should provision a node using the Ubuntu family", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyUbuntu + nodeClass.Spec.AMIFamily = &v1.AMIFamilyUbuntu // TODO (jmdeal@): remove once 22.04 AMIs are supported if env.K8sMinorVersion() >= 29 { nodeClass.Spec.AMISelectorTerms = lo.Map([]string{ "/aws/service/canonical/ubuntu/eks/20.04/1.28/stable/current/amd64/hvm/ebs-gp2/ami-id", "/aws/service/canonical/ubuntu/eks/20.04/1.28/stable/current/arm64/hvm/ebs-gp2/ami-id", - }, func(ssmPath string, _ int) v1beta1.AMISelectorTerm { - return v1beta1.AMISelectorTerm{ID: env.GetAMIBySSMPath(ssmPath)} + }, func(ssmPath string, _ int) v1.AMISelectorTerm { + return v1.AMISelectorTerm{ID: env.GetAMIBySSMPath(ssmPath)} }) } pod := coretest.Pod() @@ -199,9 +199,9 @@ var _ = Describe("AMI", func() { env.ExpectCreatedNodeCount("==", 1) }) It("should support Custom AMIFamily with AMI Selectors", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom al2023AMI := env.GetAMIBySSMPath(fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/x86_64/standard/recommended/image_id", env.K8sVersion())) - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: al2023AMI, }, @@ -219,7 +219,7 @@ var _ = Describe("AMI", func() { env.ExpectInstance(pod.Spec.NodeName).To(HaveField("ImageId", HaveValue(Equal(al2023AMI)))) }) It("should have the EC2NodeClass status for AMIs using wildcard", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Name: "*", }, @@ -229,7 +229,7 @@ var _ = Describe("AMI", func() { Expect(len(nc.Status.AMIs)).To(BeNumerically("<", 10)) }) It("should have the EC2NodeClass status for AMIs using tags", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: customAMI, }, @@ -238,17 +238,17 @@ var _ = Describe("AMI", func() { nc := EventuallyExpectAMIsToExist(nodeClass) Expect(len(nc.Status.AMIs)).To(BeNumerically("==", 1)) Expect(nc.Status.AMIs[0].ID).To(Equal(customAMI)) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeAMIsReady, Status: metav1.ConditionTrue}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeAMIsReady, Status: metav1.ConditionTrue}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionTrue}) }) It("should have ec2nodeClass status as not ready since AMI was not resolved", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: "ami-123", }, } env.ExpectCreated(nodeClass) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeAMIsReady, Status: metav1.ConditionFalse, Message: "AMISelector did not match any AMIs"}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeAMIsReady, Status: metav1.ConditionFalse, Message: "AMISelector did not match any AMIs"}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionFalse, Message: "AMIsReady=False"}) }) }) @@ -257,17 +257,17 @@ var _ = Describe("AMI", func() { It("should merge UserData contents for AL2 AMIFamily", func() { content, err := os.ReadFile("testdata/al2_userdata_input.sh") Expect(err).ToNot(HaveOccurred()) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 nodeClass.Spec.UserData = awssdk.String(string(content)) - nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} - pod := coretest.Pod(coretest.PodOptions{Tolerations: []v1.Toleration{{Key: "example.com", Operator: v1.TolerationOpExists}}}) + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} + pod := coretest.Pod(coretest.PodOptions{Tolerations: []corev1.Toleration{{Key: "example.com", Operator: corev1.TolerationOpExists}}}) env.ExpectCreated(pod, nodeClass, nodePool) env.EventuallyExpectHealthy(pod) Expect(env.GetNode(pod.Spec.NodeName).Spec.Taints).To(ContainElements( - v1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, - v1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, )) actualUserData, err := base64.StdEncoding.DecodeString(*getInstanceAttribute(pod.Spec.NodeName, "userData").UserData.Value) Expect(err).ToNot(HaveOccurred()) @@ -278,17 +278,17 @@ var _ = Describe("AMI", func() { It("should merge non-MIME UserData contents for AL2 AMIFamily", func() { content, err := os.ReadFile("testdata/al2_no_mime_userdata_input.sh") Expect(err).ToNot(HaveOccurred()) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 nodeClass.Spec.UserData = awssdk.String(string(content)) - nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} - pod := coretest.Pod(coretest.PodOptions{Tolerations: []v1.Toleration{{Key: "example.com", Operator: v1.TolerationOpExists}}}) + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} + pod := coretest.Pod(coretest.PodOptions{Tolerations: []corev1.Toleration{{Key: "example.com", Operator: corev1.TolerationOpExists}}}) env.ExpectCreated(pod, nodeClass, nodePool) env.EventuallyExpectHealthy(pod) Expect(env.GetNode(pod.Spec.NodeName).Spec.Taints).To(ContainElements( - v1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, - v1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, )) actualUserData, err := base64.StdEncoding.DecodeString(*getInstanceAttribute(pod.Spec.NodeName, "userData").UserData.Value) Expect(err).ToNot(HaveOccurred()) @@ -299,17 +299,17 @@ var _ = Describe("AMI", func() { It("should merge UserData contents for Bottlerocket AMIFamily", func() { content, err := os.ReadFile("testdata/br_userdata_input.sh") Expect(err).ToNot(HaveOccurred()) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket nodeClass.Spec.UserData = awssdk.String(string(content)) - nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} - pod := coretest.Pod(coretest.PodOptions{Tolerations: []v1.Toleration{{Key: "example.com", Operator: v1.TolerationOpExists}}}) + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} + pod := coretest.Pod(coretest.PodOptions{Tolerations: []corev1.Toleration{{Key: "example.com", Operator: corev1.TolerationOpExists}}}) env.ExpectCreated(pod, nodeClass, nodePool) env.EventuallyExpectHealthy(pod) Expect(env.GetNode(pod.Spec.NodeName).Spec.Taints).To(ContainElements( - v1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, - v1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, )) actualUserData, err := base64.StdEncoding.DecodeString(*getInstanceAttribute(pod.Spec.NodeName, "userData").UserData.Value) Expect(err).ToNot(HaveOccurred()) @@ -328,34 +328,34 @@ var _ = Describe("AMI", func() { content, err := os.ReadFile("testdata/windows_userdata_input.ps1") Expect(err).ToNot(HaveOccurred()) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2022 nodeClass.Spec.UserData = awssdk.String(string(content)) - nodePool.Spec.Template.Spec.Taints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoExecute"}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "example.com", Value: "value", Effect: "NoSchedule"}} nodePool = coretest.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Windows)}, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Windows)}, }, }, ) pod := coretest.Pod(coretest.PodOptions{ Image: environmentaws.WindowsDefaultImage, NodeSelector: map[string]string{ - v1.LabelOSStable: string(v1.Windows), - v1.LabelWindowsBuild: "10.0.20348", + corev1.LabelOSStable: string(corev1.Windows), + corev1.LabelWindowsBuild: "10.0.20348", }, - Tolerations: []v1.Toleration{{Key: "example.com", Operator: v1.TolerationOpExists}}, + Tolerations: []corev1.Toleration{{Key: "example.com", Operator: corev1.TolerationOpExists}}, }) env.ExpectCreated(pod, nodeClass, nodePool) env.EventuallyExpectHealthyWithTimeout(time.Minute*15, pod) // Wait 15 minutes because Windows nodes/containers take longer to spin up Expect(env.GetNode(pod.Spec.NodeName).Spec.Taints).To(ContainElements( - v1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, - v1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoExecute"}, + corev1.Taint{Key: "example.com", Value: "value", Effect: "NoSchedule"}, )) actualUserData, err := base64.StdEncoding.DecodeString(*getInstanceAttribute(pod.Spec.NodeName, "userData").UserData.Value) Expect(err).ToNot(HaveOccurred()) @@ -367,7 +367,7 @@ var _ = Describe("AMI", func() { //nolint:unparam func getInstanceAttribute(nodeName string, attribute string) *ec2.DescribeInstanceAttributeOutput { - var node v1.Node + var node corev1.Node Expect(env.Client.Get(env.Context, types.NamespacedName{Name: nodeName}, &node)).To(Succeed()) providerIDSplit := strings.Split(node.Spec.ProviderID, "/") instanceID := providerIDSplit[len(providerIDSplit)-1] @@ -379,8 +379,8 @@ func getInstanceAttribute(nodeName string, attribute string) *ec2.DescribeInstan return instanceAttribute } -func EventuallyExpectAMIsToExist(nodeClass *v1beta1.EC2NodeClass) *v1beta1.EC2NodeClass { - nc := &v1beta1.EC2NodeClass{} +func EventuallyExpectAMIsToExist(nodeClass *v1.EC2NodeClass) *v1.EC2NodeClass { + nc := &v1.EC2NodeClass{} Eventually(func(g Gomega) { g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nodeClass), nc)).To(Succeed()) g.Expect(nc.Status.AMIs).ToNot(BeNil()) diff --git a/test/suites/chaos/suite_test.go b/test/suites/chaos/suite_test.go index 9011876d9a36..aa35422b2c76 100644 --- a/test/suites/chaos/suite_test.go +++ b/test/suites/chaos/suite_test.go @@ -22,7 +22,7 @@ import ( "time" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/informers" "k8s.io/client-go/rest" @@ -33,11 +33,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" nodeutils "sigs.k8s.io/karpenter/pkg/utils/node" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" @@ -46,8 +46,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestChaos(t *testing.T) { RegisterFailHandler(Fail) @@ -74,14 +74,14 @@ var _ = Describe("Chaos", func() { ctx, cancel := context.WithCancel(env.Context) defer cancel() - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }, }) - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil numPods := 1 @@ -103,7 +103,7 @@ var _ = Describe("Chaos", func() { // Expect that we never get over a high number of nodes Consistently(func(g Gomega) { - list := &v1.NodeList{} + list := &corev1.NodeList{} g.Expect(env.Client.List(env.Context, list, client.HasLabels{coretest.DiscoveryLabel})).To(Succeed()) g.Expect(len(list.Items)).To(BeNumerically("<", 35)) }, time.Minute*5).Should(Succeed()) @@ -112,8 +112,8 @@ var _ = Describe("Chaos", func() { ctx, cancel := context.WithCancel(env.Context) defer cancel() - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(30 * time.Second)} + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenEmpty + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{Duration: lo.ToPtr(30 * time.Second)} numPods := 1 dep := coretest.Deployment(coretest.DeploymentOptions{ Replicas: int32(numPods), @@ -133,7 +133,7 @@ var _ = Describe("Chaos", func() { // Expect that we never get over a high number of nodes Consistently(func(g Gomega) { - list := &v1.NodeList{} + list := &corev1.NodeList{} g.Expect(env.Client.List(env.Context, list, client.HasLabels{coretest.DiscoveryLabel})).To(Succeed()) g.Expect(len(list.Items)).To(BeNumerically("<", 35)) }, time.Minute*5).Should(Succeed()) @@ -146,15 +146,15 @@ type taintAdder struct { } func (t *taintAdder) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - node := &v1.Node{} + node := &corev1.Node{} if err := t.kubeClient.Get(ctx, req.NamespacedName, node); err != nil { return reconcile.Result{}, client.IgnoreNotFound(err) } mergeFrom := client.StrategicMergeFrom(node.DeepCopy()) - taint := v1.Taint{ + taint := corev1.Taint{ Key: "test", Value: "true", - Effect: v1.TaintEffectNoExecute, + Effect: corev1.TaintEffectNoExecute, } if !lo.Contains(node.Spec.Taints, taint) { node.Spec.Taints = append(node.Spec.Taints, taint) @@ -167,9 +167,9 @@ func (t *taintAdder) Reconcile(ctx context.Context, req reconcile.Request) (reco func (t *taintAdder) Builder(mgr manager.Manager) *controllerruntime.Builder { return controllerruntime.NewControllerManagedBy(mgr). - For(&v1.Node{}). + For(&corev1.Node{}). WithEventFilter(predicate.NewPredicateFuncs(func(obj client.Object) bool { - node := obj.(*v1.Node) + node := obj.(*corev1.Node) if _, ok := node.Labels[coretest.DiscoveryLabel]; !ok { return false } @@ -197,7 +197,7 @@ func startNodeCountMonitor(ctx context.Context, kubeClient client.Client) { deletedNodes := atomic.Int64{} factory := informers.NewSharedInformerFactoryWithOptions(env.KubeClient, time.Second*30, - informers.WithTweakListOptions(func(l *metav1.ListOptions) { l.LabelSelector = corev1beta1.NodePoolLabelKey })) + informers.WithTweakListOptions(func(l *metav1.ListOptions) { l.LabelSelector = karpv1.NodePoolLabelKey })) nodeInformer := factory.Core().V1().Nodes().Informer() _ = lo.Must(nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(_ interface{}) { @@ -210,10 +210,10 @@ func startNodeCountMonitor(ctx context.Context, kubeClient client.Client) { factory.Start(ctx.Done()) go func() { for { - list := &v1.NodeList{} + list := &corev1.NodeList{} if err := kubeClient.List(ctx, list, client.HasLabels{coretest.DiscoveryLabel}); err == nil { - readyCount := lo.CountBy(list.Items, func(n v1.Node) bool { - return nodeutils.GetCondition(&n, v1.NodeReady).Status == v1.ConditionTrue + readyCount := lo.CountBy(list.Items, func(n corev1.Node) bool { + return nodeutils.GetCondition(&n, corev1.NodeReady).Status == corev1.ConditionTrue }) fmt.Printf("[NODE COUNT] CURRENT: %d | READY: %d | CREATED: %d | DELETED: %d\n", len(list.Items), readyCount, createdNodes.Load(), deletedNodes.Load()) } diff --git a/test/suites/consolidation/suite_test.go b/test/suites/consolidation/suite_test.go index b9d37a70c94d..3c31031ee170 100644 --- a/test/suites/consolidation/suite_test.go +++ b/test/suites/consolidation/suite_test.go @@ -24,16 +24,16 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" environmentaws "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" @@ -56,7 +56,7 @@ func TestConsolidation(t *testing.T) { RunSpecs(t, "Consolidation") } -var nodeClass *v1beta1.EC2NodeClass +var nodeClass *v1.EC2NodeClass var _ = BeforeEach(func() { nodeClass = env.DefaultEC2NodeClass() @@ -67,7 +67,7 @@ var _ = AfterEach(func() { env.AfterEach() }) var _ = Describe("Consolidation", func() { Context("Budgets", func() { - var nodePool *corev1beta1.NodePool + var nodePool *karpv1.NodePool var dep *appsv1.Deployment var selector labels.Selector var numPods int32 @@ -82,27 +82,27 @@ var _ = Describe("Consolidation", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "regular-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }, }) selector = labels.SelectorFromSet(dep.Spec.Selector.MatchLabels) }) It("should respect budgets for empty delete consolidation", func() { - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{ { Nodes: "40%", }, } // Hostname anti-affinity to require one pod on each node - dep.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + dep.Spec.Template.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: dep.Spec.Selector, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, @@ -152,18 +152,18 @@ var _ = Describe("Consolidation", func() { }) It("should respect budgets for non-empty delete consolidation", func() { // This test will hold consolidation until we are ready to execute it - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{} nodePool = test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"2xlarge"}, }, }, ) // We're expecting to create 3 nodes, so we'll expect to see at most 2 nodes deleting at one time. - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "50%", }} numPods = 9 @@ -174,9 +174,9 @@ var _ = Describe("Consolidation", func() { Labels: map[string]string{"app": "large-app"}, }, // Each 2xlarge has 8 cpu, so each node should fit no more than 3 pods. - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2100m"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2100m"), }, }, }, @@ -221,27 +221,27 @@ var _ = Describe("Consolidation", func() { It("should respect budgets for non-empty replace consolidation", func() { appLabels := map[string]string{"app": "large-app"} // This test will hold consolidation until we are ready to execute it - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{} nodePool = test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"xlarge", "2xlarge"}, }, }, // Add an Exists operator so that we can select on a fake partition later - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ Key: "test-partition", - Operator: v1.NodeSelectorOpExists, + Operator: corev1.NodeSelectorOpExists, }, }, ) nodePool.Labels = appLabels // We're expecting to create 5 nodes, so we'll expect to see at most 3 nodes deleting at one time. - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "3", }} @@ -253,9 +253,9 @@ var _ = Describe("Consolidation", func() { }, // Each 2xlarge has 8 cpu, so each node should fit no more than 1 pod since each node will have. // an equivalently sized daemonset - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("3"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), }, }, }, @@ -278,9 +278,9 @@ var _ = Describe("Consolidation", func() { NodeSelector: map[string]string{"test-partition": fmt.Sprintf("%d", i)}, // Each 2xlarge has 8 cpu, so each node should fit no more than 1 pod since each node will have. // an equivalently sized daemonset - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("3"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), }, }, }, @@ -325,24 +325,24 @@ var _ = Describe("Consolidation", func() { // Eventually expect all the nodes to be rolled and completely removed // Since this completes the disruption operation, this also ensures that we aren't leaking nodes into subsequent // tests since nodeclaims that are actively replacing but haven't brought-up nodes yet can register nodes later - env.EventuallyExpectNotFound(lo.Map(originalNodes, func(n *v1.Node, _ int) client.Object { return n })...) - env.EventuallyExpectNotFound(lo.Map(originalNodeClaims, func(n *corev1beta1.NodeClaim, _ int) client.Object { return n })...) + env.EventuallyExpectNotFound(lo.Map(originalNodes, func(n *corev1.Node, _ int) client.Object { return n })...) + env.EventuallyExpectNotFound(lo.Map(originalNodeClaims, func(n *karpv1.NodeClaim, _ int) client.Object { return n })...) env.ExpectNodeClaimCount("==", 5) env.ExpectNodeCount("==", 5) }) It("should not allow consolidation if the budget is fully blocking", func() { // We're going to define a budget that doesn't allow any consolidation to happen - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "0", }} // Hostname anti-affinity to require one pod on each node - dep.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + dep.Spec.Template.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: dep.Spec.Selector, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, @@ -366,19 +366,19 @@ var _ = Describe("Consolidation", func() { // the current time and extends 15 minutes past the current time // Times need to be in UTC since the karpenter containers were built in UTC time windowStart := time.Now().Add(-time.Minute * 15).UTC() - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "0", Schedule: lo.ToPtr(fmt.Sprintf("%d %d * * *", windowStart.Minute(), windowStart.Hour())), Duration: &metav1.Duration{Duration: time.Minute * 30}, }} // Hostname anti-affinity to require one pod on each node - dep.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + dep.Spec.Template.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: dep.Spec.Selector, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, @@ -399,34 +399,34 @@ var _ = Describe("Consolidation", func() { }) DescribeTable("should consolidate nodes (delete)", Label(debug.NoWatch), Label(debug.NoEvents), func(spotToSpot bool) { - nodePool := test.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Disruption: corev1beta1.Disruption{ - ConsolidationPolicy: corev1beta1.ConsolidationPolicyWhenUnderutilized, + nodePool := test.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Disruption: karpv1.Disruption{ + ConsolidationPolicy: karpv1.ConsolidationPolicyWhenUnderutilized, // Disable Consolidation until we're ready - ConsolidateAfter: &corev1beta1.NillableDuration{}, + ConsolidateAfter: &karpv1.NillableDuration{}, }, - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: lo.Ternary(spotToSpot, []string{corev1beta1.CapacityTypeSpot}, []string{corev1beta1.CapacityTypeOnDemand}), + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: lo.Ternary(spotToSpot, []string{karpv1.CapacityTypeSpot}, []string{karpv1.CapacityTypeOnDemand}), }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"medium", "large", "xlarge"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceFamily, - Operator: v1.NodeSelectorOpNotIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceFamily, + Operator: corev1.NodeSelectorOpNotIn, // remove some cheap burstable and the odd c1 instance types so we have // more control over what gets provisioned // TODO: jmdeal@ remove a1 from exclusion list once Karpenter implicitly filters a1 instances for AL2023 AMI family (incompatible) @@ -434,10 +434,10 @@ var _ = Describe("Consolidation", func() { }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, @@ -451,8 +451,8 @@ var _ = Describe("Consolidation", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, }, }, }) @@ -466,14 +466,14 @@ var _ = Describe("Consolidation", func() { // reduce the number of pods by 60% dep.Spec.Replicas = aws.Int32(40) env.ExpectUpdated(dep) - env.EventuallyExpectAvgUtilization(v1.ResourceCPU, "<", 0.5) + env.EventuallyExpectAvgUtilization(corev1.ResourceCPU, "<", 0.5) // Enable consolidation as WhenUnderutilized doesn't allow a consolidateAfter value nodePool.Spec.Disruption.ConsolidateAfter = nil env.ExpectUpdated(nodePool) // With consolidation enabled, we now must delete nodes - env.EventuallyExpectAvgUtilization(v1.ResourceCPU, ">", 0.6) + env.EventuallyExpectAvgUtilization(corev1.ResourceCPU, ">", 0.6) env.ExpectDeleted(dep) }, @@ -482,34 +482,34 @@ var _ = Describe("Consolidation", func() { ) DescribeTable("should consolidate nodes (replace)", func(spotToSpot bool) { - nodePool := test.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Disruption: corev1beta1.Disruption{ - ConsolidationPolicy: corev1beta1.ConsolidationPolicyWhenUnderutilized, + nodePool := test.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Disruption: karpv1.Disruption{ + ConsolidationPolicy: karpv1.ConsolidationPolicyWhenUnderutilized, // Disable Consolidation until we're ready - ConsolidateAfter: &corev1beta1.NillableDuration{}, + ConsolidateAfter: &karpv1.NillableDuration{}, }, - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: lo.Ternary(spotToSpot, []string{corev1beta1.CapacityTypeSpot}, []string{corev1beta1.CapacityTypeOnDemand}), + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: lo.Ternary(spotToSpot, []string{karpv1.CapacityTypeSpot}, []string{karpv1.CapacityTypeOnDemand}), }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"large", "2xlarge"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceFamily, - Operator: v1.NodeSelectorOpNotIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceFamily, + Operator: corev1.NodeSelectorOpNotIn, // remove some cheap burstable and the odd c1 / a1 instance types so we have // more control over what gets provisioned Values: []string{"t2", "t3", "c1", "t3a", "t4g", "a1"}, @@ -517,17 +517,17 @@ var _ = Describe("Consolidation", func() { }, // Specify Linux in the NodePool to filter out Windows only DS when discovering DS overhead { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Linux)}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Linux)}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, @@ -541,11 +541,11 @@ var _ = Describe("Consolidation", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{ + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { MaxSkew: 1, - TopologyKey: v1.LabelHostname, - WhenUnsatisfiable: v1.DoNotSchedule, + TopologyKey: corev1.LabelHostname, + WhenUnsatisfiable: corev1.DoNotSchedule, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "large-app", @@ -553,8 +553,8 @@ var _ = Describe("Consolidation", func() { }, }, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("4")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("4")}, }, }, }) @@ -564,11 +564,11 @@ var _ = Describe("Consolidation", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "small-app"}, }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{ + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { MaxSkew: 1, - TopologyKey: v1.LabelHostname, - WhenUnsatisfiable: v1.DoNotSchedule, + TopologyKey: corev1.LabelHostname, + WhenUnsatisfiable: corev1.DoNotSchedule, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "small-app", @@ -576,9 +576,9 @@ var _ = Describe("Consolidation", func() { }, }, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: func() resource.Quantity { + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: func() resource.Quantity { dsOverhead := env.GetDaemonSetOverhead(nodePool) base := lo.ToPtr(resource.MustParse("1800m")) base.Sub(*dsOverhead.Cpu()) @@ -601,25 +601,25 @@ var _ = Describe("Consolidation", func() { // scaling down the large deployment leaves only small pods on each node largeDep.Spec.Replicas = aws.Int32(0) env.ExpectUpdated(largeDep) - env.EventuallyExpectAvgUtilization(v1.ResourceCPU, "<", 0.5) + env.EventuallyExpectAvgUtilization(corev1.ResourceCPU, "<", 0.5) nodePool.Spec.Disruption.ConsolidateAfter = nil env.ExpectUpdated(nodePool) // With consolidation enabled, we now must replace each node in turn to consolidate due to the anti-affinity // rules on the smaller deployment. The 2xl nodes should go to a large - env.EventuallyExpectAvgUtilization(v1.ResourceCPU, ">", 0.8) + env.EventuallyExpectAvgUtilization(corev1.ResourceCPU, ">", 0.8) - var nodes v1.NodeList + var nodes corev1.NodeList Expect(env.Client.List(env.Context, &nodes)).To(Succeed()) numLargeNodes := 0 numOtherNodes := 0 for _, n := range nodes.Items { // only count the nodes created by the provisoiner - if n.Labels[corev1beta1.NodePoolLabelKey] != nodePool.Name { + if n.Labels[karpv1.NodePoolLabelKey] != nodePool.Name { continue } - if strings.HasSuffix(n.Labels[v1.LabelInstanceTypeStable], ".large") { + if strings.HasSuffix(n.Labels[corev1.LabelInstanceTypeStable], ".large") { numLargeNodes++ } else { numOtherNodes++ @@ -637,44 +637,44 @@ var _ = Describe("Consolidation", func() { Entry("if the nodes are spot nodes", true), ) It("should consolidate on-demand nodes to spot (replace)", func() { - nodePool := test.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Disruption: corev1beta1.Disruption{ - ConsolidationPolicy: corev1beta1.ConsolidationPolicyWhenUnderutilized, + nodePool := test.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Disruption: karpv1.Disruption{ + ConsolidationPolicy: karpv1.ConsolidationPolicyWhenUnderutilized, // Disable Consolidation until we're ready - ConsolidateAfter: &corev1beta1.NillableDuration{}, + ConsolidateAfter: &karpv1.NillableDuration{}, }, - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"large"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceFamily, - Operator: v1.NodeSelectorOpNotIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceFamily, + Operator: corev1.NodeSelectorOpNotIn, // remove some cheap burstable and the odd c1 / a1 instance types so we have // more control over what gets provisioned Values: []string{"t2", "t3", "c1", "t3a", "t4g", "a1"}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, @@ -688,11 +688,11 @@ var _ = Describe("Consolidation", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "small-app"}, }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{ + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { MaxSkew: 1, - TopologyKey: v1.LabelHostname, - WhenUnsatisfiable: v1.DoNotSchedule, + TopologyKey: corev1.LabelHostname, + WhenUnsatisfiable: corev1.DoNotSchedule, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "small-app", @@ -700,8 +700,8 @@ var _ = Describe("Consolidation", func() { }, }, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: func() resource.Quantity { + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: func() resource.Quantity { dsOverhead := env.GetDaemonSetOverhead(nodePool) base := lo.ToPtr(resource.MustParse("1800m")) base.Sub(*dsOverhead.Cpu()) @@ -724,16 +724,16 @@ var _ = Describe("Consolidation", func() { // instance than on-demand nodePool.Spec.Disruption.ConsolidateAfter = nil test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpExists, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpExists, }, }, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"large"}, }, }, @@ -743,16 +743,16 @@ var _ = Describe("Consolidation", func() { // Eventually expect the on-demand nodes to be consolidated into // spot nodes after some time Eventually(func(g Gomega) { - var nodes v1.NodeList + var nodes corev1.NodeList Expect(env.Client.List(env.Context, &nodes)).To(Succeed()) - var spotNodes []*v1.Node - var otherNodes []*v1.Node + var spotNodes []*corev1.Node + var otherNodes []*corev1.Node for i, n := range nodes.Items { // only count the nodes created by the nodePool - if n.Labels[corev1beta1.NodePoolLabelKey] != nodePool.Name { + if n.Labels[karpv1.NodePoolLabelKey] != nodePool.Name { continue } - if n.Labels[corev1beta1.CapacityTypeLabelKey] == corev1beta1.CapacityTypeSpot { + if n.Labels[karpv1.CapacityTypeLabelKey] == karpv1.CapacityTypeSpot { spotNodes = append(spotNodes, &nodes.Items[i]) } else { otherNodes = append(otherNodes, &nodes.Items[i]) diff --git a/test/suites/drift/suite_test.go b/test/suites/drift/suite_test.go index 453f54021ffa..5590fd20b064 100644 --- a/test/suites/drift/suite_test.go +++ b/test/suites/drift/suite_test.go @@ -23,7 +23,7 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -36,10 +36,10 @@ import ( awssdk "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/eks" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/test" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" "github.com/aws/karpenter-provider-aws/test/pkg/environment/common" @@ -50,8 +50,8 @@ import ( var env *aws.Environment var amdAMI string -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestDrift(t *testing.T) { RegisterFailHandler(Fail) @@ -88,7 +88,7 @@ var _ = Describe("Drift", func() { "app": "my-app", }, Annotations: map[string]string{ - corev1beta1.DoNotDisruptAnnotationKey: "true", + karpv1.DoNotDisruptAnnotationKey: "true", }, }, TerminationGracePeriodSeconds: lo.ToPtr[int64](0), @@ -99,16 +99,16 @@ var _ = Describe("Drift", func() { Context("Budgets", func() { It("should respect budgets for empty drift", func() { nodePool = coretest.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"2xlarge"}, }, }, ) // We're expecting to create 3 nodes, so we'll expect to see 2 nodes deleting at one time. - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "50%", }} var numPods int32 = 6 @@ -117,14 +117,14 @@ var _ = Describe("Drift", func() { PodOptions: coretest.PodOptions{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - corev1beta1.DoNotDisruptAnnotationKey: "true", + karpv1.DoNotDisruptAnnotationKey: "true", }, Labels: map[string]string{"app": "large-app"}, }, // Each 2xlarge has 8 cpu, so each node should fit 2 pods. - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("3"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), }, }, }, @@ -176,16 +176,16 @@ var _ = Describe("Drift", func() { }) It("should respect budgets for non-empty delete drift", func() { nodePool = coretest.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"2xlarge"}, }, }, ) // We're expecting to create 3 nodes, so we'll expect to see at most 2 nodes deleting at one time. - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "50%", }} var numPods int32 = 9 @@ -194,14 +194,14 @@ var _ = Describe("Drift", func() { PodOptions: coretest.PodOptions{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - corev1beta1.DoNotDisruptAnnotationKey: "true", + karpv1.DoNotDisruptAnnotationKey: "true", }, Labels: map[string]string{"app": "large-app"}, }, // Each 2xlarge has 8 cpu, so each node should fit no more than 3 pods. - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2100m"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2100m"), }, }, }, @@ -242,7 +242,7 @@ var _ = Describe("Drift", func() { pods := env.EventuallyExpectHealthyPodCount(selector, 3) // Remove the do-not-disrupt annotation so that the nodes are now disruptable for _, pod := range pods { - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) } @@ -262,7 +262,7 @@ var _ = Describe("Drift", func() { appLabels := map[string]string{"app": "large-app"} nodePool.Labels = appLabels // We're expecting to create 5 nodes, so we'll expect to see at most 3 nodes deleting at one time. - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "3", }} @@ -275,8 +275,8 @@ var _ = Describe("Drift", func() { ObjectMeta: metav1.ObjectMeta{ Labels: appLabels, }, - PodAntiRequirements: []v1.PodAffinityTerm{{ - TopologyKey: v1.LabelHostname, + PodAntiRequirements: []corev1.PodAffinityTerm{{ + TopologyKey: corev1.LabelHostname, LabelSelector: &metav1.LabelSelector{ MatchLabels: appLabels, }, @@ -317,14 +317,14 @@ var _ = Describe("Drift", func() { // Eventually expect all the nodes to be rolled and completely removed // Since this completes the disruption operation, this also ensures that we aren't leaking nodes into subsequent // tests since nodeclaims that are actively replacing but haven't brought-up nodes yet can register nodes later - env.EventuallyExpectNotFound(lo.Map(originalNodes, func(n *v1.Node, _ int) client.Object { return n })...) - env.EventuallyExpectNotFound(lo.Map(originalNodeClaims, func(n *corev1beta1.NodeClaim, _ int) client.Object { return n })...) + env.EventuallyExpectNotFound(lo.Map(originalNodes, func(n *corev1.Node, _ int) client.Object { return n })...) + env.EventuallyExpectNotFound(lo.Map(originalNodeClaims, func(n *karpv1.NodeClaim, _ int) client.Object { return n })...) env.ExpectNodeClaimCount("==", 5) env.ExpectNodeCount("==", 5) }) It("should not allow drift if the budget is fully blocking", func() { // We're going to define a budget that doesn't allow any drift to happen - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "0", }} @@ -349,7 +349,7 @@ var _ = Describe("Drift", func() { // the current time and extends 15 minutes past the current time // Times need to be in UTC since the karpenter containers were built in UTC time windowStart := time.Now().Add(-time.Minute * 15).UTC() - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "0", Schedule: lo.ToPtr(fmt.Sprintf("%d %d * * *", windowStart.Minute(), windowStart.Hour())), Duration: &metav1.Duration{Duration: time.Minute * 30}, @@ -377,8 +377,8 @@ var _ = Describe("Drift", func() { "/aws/service/eks/optimized-ami/1.23/amazon-linux-2023/x86_64/standard/amazon-eks-node-al2023-x86_64-standard-1.23-v20240307/image_id", fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/x86_64/standard/recommended/image_id", env.K8sVersionWithOffset(1)), )) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: oldCustomAMI}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: oldCustomAMI}} env.ExpectCreated(dep, nodeClass, nodePool) pod := env.EventuallyExpectHealthyPodCount(selector, numPods)[0] @@ -386,20 +386,20 @@ var _ = Describe("Drift", func() { nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] node := env.EventuallyExpectNodeCount("==", 1)[0] - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: amdAMI}} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: amdAMI}} env.ExpectCreatedOrUpdated(nodeClass) env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, nodeClaim, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }) It("should return drifted if the AMI no longer matches the existing NodeClaims instance type", func() { armAMI := env.GetAMIBySSMPath(fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/arm64/standard/recommended/image_id", env.K8sVersion())) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: armAMI}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: armAMI}} env.ExpectCreated(dep, nodeClass, nodePool) pod := env.EventuallyExpectHealthyPodCount(selector, numPods)[0] @@ -407,38 +407,38 @@ var _ = Describe("Drift", func() { nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] node := env.EventuallyExpectNodeCount("==", 1)[0] - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: amdAMI}} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: amdAMI}} env.ExpectCreatedOrUpdated(nodeClass) env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, nodeClaim, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }) It("should not disrupt nodes that have drifted without the featureGate enabled", func() { - env.ExpectSettingsOverridden(v1.EnvVar{Name: "FEATURE_GATES", Value: "Drift=false"}) + env.ExpectSettingsOverridden(corev1.EnvVar{Name: "FEATURE_GATES", Value: "Drift=false"}) // Choose an old static image (AL2023 AMIs don't exist for 1.22) oldCustomAMI := env.GetAMIBySSMPath(lo.Ternary(env.K8sMinorVersion() == 23, "/aws/service/eks/optimized-ami/1.23/amazon-linux-2023/x86_64/standard/amazon-eks-node-al2023-x86_64-standard-1.23-v20240307/image_id", fmt.Sprintf("/aws/service/eks/optimized-ami/%s/amazon-linux-2023/x86_64/standard/recommended/image_id", env.K8sVersionWithOffset(1)), )) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2023 - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: oldCustomAMI}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2023 + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: oldCustomAMI}} env.ExpectCreated(dep, nodeClass, nodePool) env.EventuallyExpectHealthyPodCount(selector, numPods) env.ExpectCreatedNodeCount("==", 1) node := env.Monitor.CreatedNodes()[0] - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: amdAMI}} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: amdAMI}} env.ExpectUpdated(nodeClass) // We should consistently get the same node existing for a minute Consistently(func(g Gomega) { - g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(node), &v1.Node{})).To(Succeed()) + g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(node), &corev1.Node{})).To(Succeed()) }).WithTimeout(time.Minute).Should(Succeed()) }) It("should disrupt nodes that have drifted due to securitygroup", func() { @@ -491,9 +491,9 @@ var _ = Describe("Drift", func() { } return "", false }) - sgTerms := []v1beta1.SecurityGroupSelectorTerm{{ID: awssdk.StringValue(testSecurityGroup.GroupId)}} + sgTerms := []v1.SecurityGroupSelectorTerm{{ID: awssdk.StringValue(testSecurityGroup.GroupId)}} for _, id := range awsIDs { - sgTerms = append(sgTerms, v1beta1.SecurityGroupSelectorTerm{ID: id}) + sgTerms = append(sgTerms, v1.SecurityGroupSelectorTerm{ID: id}) } nodeClass.Spec.SecurityGroupSelectorTerms = sgTerms @@ -502,7 +502,7 @@ var _ = Describe("Drift", func() { nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] node := env.ExpectCreatedNodeCount("==", 1)[0] - sgTerms = lo.Reject(sgTerms, func(t v1beta1.SecurityGroupSelectorTerm, _ int) bool { + sgTerms = lo.Reject(sgTerms, func(t v1.SecurityGroupSelectorTerm, _ int) bool { return t.ID == awssdk.StringValue(testSecurityGroup.GroupId) }) nodeClass.Spec.SecurityGroupSelectorTerms = sgTerms @@ -510,7 +510,7 @@ var _ = Describe("Drift", func() { env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, nodeClaim, node) env.EventuallyExpectHealthyPodCount(selector, numPods) @@ -519,33 +519,33 @@ var _ = Describe("Drift", func() { subnets := env.GetSubnetInfo(map[string]string{"karpenter.sh/discovery": env.ClusterName}) Expect(len(subnets)).To(BeNumerically(">", 1)) - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{{ID: subnets[0].ID}} + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{{ID: subnets[0].ID}} env.ExpectCreated(dep, nodeClass, nodePool) pod := env.EventuallyExpectHealthyPodCount(selector, numPods)[0] nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] node := env.ExpectCreatedNodeCount("==", 1)[0] - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{{ID: subnets[1].ID}} + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{{ID: subnets[1].ID}} env.ExpectCreatedOrUpdated(nodeClass) env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }) - DescribeTable("NodePool Drift", func(nodeClaimTemplate corev1beta1.NodeClaimTemplate) { + DescribeTable("NodePool Drift", func(nodeClaimTemplate karpv1.NodeClaimTemplate) { updatedNodePool := coretest.NodePool( - corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, // keep the same instance type requirements to prevent considering instance types that require swap Requirements: nodePool.Spec.Template.Spec.Requirements, @@ -553,8 +553,8 @@ var _ = Describe("Drift", func() { }, }, }, - corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ + karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ Template: nodeClaimTemplate, }, }, @@ -570,7 +570,7 @@ var _ = Describe("Drift", func() { env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) // Nodes will need to have the start-up taint removed before the node can be considered as initialized @@ -585,54 +585,46 @@ var _ = Describe("Drift", func() { Eventually(func(g Gomega) { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeTwo), nodeTwo)).To(Succeed()) stored := nodeTwo.DeepCopy() - nodeTwo.Spec.Taints = lo.Reject(nodeTwo.Spec.Taints, func(t v1.Taint, _ int) bool { return t.Key == "example.com/another-taint-2" }) + nodeTwo.Spec.Taints = lo.Reject(nodeTwo.Spec.Taints, func(t corev1.Taint, _ int) bool { return t.Key == "example.com/another-taint-2" }) g.Expect(env.Client.Patch(env.Context, nodeTwo, client.StrategicMergeFrom(stored))).To(Succeed()) }).Should(Succeed()) } env.EventuallyExpectNotFound(pod, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }, - Entry("Annotations", corev1beta1.NodeClaimTemplate{ - ObjectMeta: corev1beta1.ObjectMeta{ + Entry("Annotations", karpv1.NodeClaimTemplate{ + ObjectMeta: karpv1.ObjectMeta{ Annotations: map[string]string{"keyAnnotationTest": "valueAnnotationTest"}, }, }), - Entry("Labels", corev1beta1.NodeClaimTemplate{ - ObjectMeta: corev1beta1.ObjectMeta{ + Entry("Labels", karpv1.NodeClaimTemplate{ + ObjectMeta: karpv1.ObjectMeta{ Labels: map[string]string{"keyLabelTest": "valueLabelTest"}, }, }), - Entry("Taints", corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Taints: []v1.Taint{{Key: "example.com/another-taint-2", Effect: v1.TaintEffectPreferNoSchedule}}, + Entry("Taints", karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + Taints: []corev1.Taint{{Key: "example.com/another-taint-2", Effect: corev1.TaintEffectPreferNoSchedule}}, }, }), - Entry("KubeletConfiguration", corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - Kubelet: &corev1beta1.KubeletConfiguration{ - EvictionSoft: map[string]string{"memory.available": "5%"}, - EvictionSoftGracePeriod: map[string]metav1.Duration{"memory.available": {Duration: time.Minute}}, - }, - }, - }), - Entry("Start-up Taints", corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - StartupTaints: []v1.Taint{{Key: "example.com/another-taint-2", Effect: v1.TaintEffectPreferNoSchedule}}, + Entry("Start-up Taints", karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + StartupTaints: []corev1.Taint{{Key: "example.com/another-taint-2", Effect: corev1.TaintEffectPreferNoSchedule}}, }, }), - Entry("NodeRequirements", corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ + Entry("NodeRequirements", karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ // since this will overwrite the default requirements, add instance category and family selectors back into requirements - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: corev1beta1.CapacityTypeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{corev1beta1.CapacityTypeSpot}}}, - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1beta1.LabelInstanceCategory, Operator: v1.NodeSelectorOpIn, Values: []string{"c", "m", "r"}}}, - {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1beta1.LabelInstanceFamily, Operator: v1.NodeSelectorOpNotIn, Values: []string{"a1"}}}, + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: karpv1.CapacityTypeLabelKey, Operator: corev1.NodeSelectorOpIn, Values: []string{karpv1.CapacityTypeSpot}}}, + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: v1.LabelInstanceCategory, Operator: corev1.NodeSelectorOpIn, Values: []string{"c", "m", "r"}}}, + {NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: v1.LabelInstanceFamily, Operator: corev1.NodeSelectorOpNotIn, Values: []string{"a1"}}}, }, }, }), ) - DescribeTable("EC2NodeClass", func(nodeClassSpec v1beta1.EC2NodeClassSpec) { - updatedNodeClass := test.EC2NodeClass(v1beta1.EC2NodeClass{Spec: *nodeClass.Spec.DeepCopy()}, v1beta1.EC2NodeClass{Spec: nodeClassSpec}) + DescribeTable("EC2NodeClass", func(nodeClassSpec v1.EC2NodeClassSpec) { + updatedNodeClass := test.EC2NodeClass(v1.EC2NodeClass{Spec: *nodeClass.Spec.DeepCopy()}, v1.EC2NodeClass{Spec: nodeClassSpec}) updatedNodeClass.ObjectMeta = nodeClass.ObjectMeta env.ExpectCreated(dep, nodeClass, nodePool) @@ -644,25 +636,31 @@ var _ = Describe("Drift", func() { env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }, - Entry("UserData", v1beta1.EC2NodeClassSpec{UserData: awssdk.String("#!/bin/bash\necho \"Hello, AL2023\"")}), - Entry("Tags", v1beta1.EC2NodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}), - Entry("MetadataOptions", v1beta1.EC2NodeClassSpec{MetadataOptions: &v1beta1.MetadataOptions{HTTPTokens: awssdk.String("required"), HTTPPutResponseHopLimit: awssdk.Int64(10)}}), - Entry("BlockDeviceMappings", v1beta1.EC2NodeClassSpec{BlockDeviceMappings: []*v1beta1.BlockDeviceMapping{ + Entry("UserData", v1.EC2NodeClassSpec{UserData: awssdk.String("#!/bin/bash\necho \"Hello, AL2023\"")}), + Entry("Tags", v1.EC2NodeClassSpec{Tags: map[string]string{"keyTag-test-3": "valueTag-test-3"}}), + Entry("MetadataOptions", v1.EC2NodeClassSpec{MetadataOptions: &v1.MetadataOptions{HTTPTokens: awssdk.String("required"), HTTPPutResponseHopLimit: awssdk.Int64(10)}}), + Entry("BlockDeviceMappings", v1.EC2NodeClassSpec{BlockDeviceMappings: []*v1.BlockDeviceMapping{ { DeviceName: awssdk.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resources.Quantity("20Gi"), VolumeType: awssdk.String("gp3"), Encrypted: awssdk.Bool(true), }, }}}), - Entry("DetailedMonitoring", v1beta1.EC2NodeClassSpec{DetailedMonitoring: awssdk.Bool(true)}), - Entry("AMIFamily", v1beta1.EC2NodeClassSpec{AMIFamily: awssdk.String(v1beta1.AMIFamilyBottlerocket)}), + Entry("DetailedMonitoring", v1.EC2NodeClassSpec{DetailedMonitoring: awssdk.Bool(true)}), + Entry("AMIFamily", v1.EC2NodeClassSpec{AMIFamily: awssdk.String(v1.AMIFamilyBottlerocket)}), + Entry("KubeletConfiguration", v1.EC2NodeClassSpec{ + Kubelet: &v1.KubeletConfiguration{ + EvictionSoft: map[string]string{"memory.available": "5%"}, + EvictionSoftGracePeriod: map[string]metav1.Duration{"memory.available": {Duration: time.Minute}}, + }, + }), ) It("should drift the EC2NodeClass on InstanceProfile", func() { // Create a separate test case for this one since we can't use the default NodeClass that's created due to it having @@ -690,16 +688,16 @@ var _ = Describe("Drift", func() { env.EventuallyExpectDrifted(nodeClaim) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, node) env.EventuallyExpectHealthyPodCount(selector, numPods) }) It("should drift the EC2NodeClass on BlockDeviceMappings volume size update", func() { - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: awssdk.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resources.Quantity("20Gi"), VolumeType: awssdk.String("gp3"), Encrypted: awssdk.Bool(true), @@ -717,11 +715,11 @@ var _ = Describe("Drift", func() { By("validating the drifted status condition has propagated") Eventually(func(g Gomega) { g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nodeClaim), nodeClaim)).To(Succeed()) - g.Expect(nodeClaim.StatusConditions().Get(corev1beta1.ConditionTypeDrifted)).ToNot(BeNil()) - g.Expect(nodeClaim.StatusConditions().Get(corev1beta1.ConditionTypeDrifted).IsTrue()).To(BeTrue()) + g.Expect(nodeClaim.StatusConditions().Get(karpv1.ConditionTypeDrifted)).ToNot(BeNil()) + g.Expect(nodeClaim.StatusConditions().Get(karpv1.ConditionTypeDrifted).IsTrue()).To(BeTrue()) }).Should(Succeed()) - delete(pod.Annotations, corev1beta1.DoNotDisruptAnnotationKey) + delete(pod.Annotations, karpv1.DoNotDisruptAnnotationKey) env.ExpectUpdated(pod) env.EventuallyExpectNotFound(pod, node) env.EventuallyExpectHealthyPodCount(selector, numPods) @@ -730,23 +728,23 @@ var _ = Describe("Drift", func() { env.ExpectCreated(dep, nodeClass, nodePool) env.EventuallyExpectHealthyPodCount(selector, numPods) nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] - nodePool = env.ExpectExists(nodePool).(*corev1beta1.NodePool) + nodePool = env.ExpectExists(nodePool).(*karpv1.NodePool) expectedHash := nodePool.Hash() - By(fmt.Sprintf("expect nodepool %s and nodeclaim %s to contain %s and %s annotations", nodePool.Name, nodeClaim.Name, corev1beta1.NodePoolHashAnnotationKey, corev1beta1.NodePoolHashVersionAnnotationKey)) + By(fmt.Sprintf("expect nodepool %s and nodeclaim %s to contain %s and %s annotations", nodePool.Name, nodeClaim.Name, karpv1.NodePoolHashAnnotationKey, karpv1.NodePoolHashVersionAnnotationKey)) Eventually(func(g Gomega) { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodePool), nodePool)).To(Succeed()) g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClaim), nodeClaim)).To(Succeed()) - g.Expect(nodePool.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashAnnotationKey, expectedHash)) - g.Expect(nodePool.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashVersionAnnotationKey, corev1beta1.NodePoolHashVersion)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashAnnotationKey, expectedHash)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashVersionAnnotationKey, corev1beta1.NodePoolHashVersion)) + g.Expect(nodePool.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashAnnotationKey, expectedHash)) + g.Expect(nodePool.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashVersionAnnotationKey, karpv1.NodePoolHashVersion)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashAnnotationKey, expectedHash)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashVersionAnnotationKey, karpv1.NodePoolHashVersion)) }).WithTimeout(30 * time.Second).Should(Succeed()) nodePool.Annotations = lo.Assign(nodePool.Annotations, map[string]string{ - corev1beta1.NodePoolHashAnnotationKey: "test-hash-1", - corev1beta1.NodePoolHashVersionAnnotationKey: "test-hash-version-1", + karpv1.NodePoolHashAnnotationKey: "test-hash-1", + karpv1.NodePoolHashVersionAnnotationKey: "test-hash-version-1", }) // Updating `nodePool.Spec.Template.Annotations` would normally trigger drift on all nodeclaims owned by the // nodepool. However, the nodepool-hash-version does not match the controller hash version, so we will see that @@ -755,8 +753,8 @@ var _ = Describe("Drift", func() { "test-key": "test-value", }) nodeClaim.Annotations = lo.Assign(nodePool.Annotations, map[string]string{ - corev1beta1.NodePoolHashAnnotationKey: "test-hash-2", - corev1beta1.NodePoolHashVersionAnnotationKey: "test-hash-version-2", + karpv1.NodePoolHashAnnotationKey: "test-hash-2", + karpv1.NodePoolHashVersionAnnotationKey: "test-hash-version-2", }) // The nodeclaim will need to be updated first, as the hash controller will only be triggered on changes to the nodepool @@ -768,33 +766,33 @@ var _ = Describe("Drift", func() { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodePool), nodePool)).To(Succeed()) g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClaim), nodeClaim)).To(Succeed()) - g.Expect(nodePool.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashAnnotationKey, expectedHash)) - g.Expect(nodePool.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashVersionAnnotationKey, corev1beta1.NodePoolHashVersion)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashAnnotationKey, expectedHash)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(corev1beta1.NodePoolHashVersionAnnotationKey, corev1beta1.NodePoolHashVersion)) + g.Expect(nodePool.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashAnnotationKey, expectedHash)) + g.Expect(nodePool.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashVersionAnnotationKey, karpv1.NodePoolHashVersion)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashAnnotationKey, expectedHash)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(karpv1.NodePoolHashVersionAnnotationKey, karpv1.NodePoolHashVersion)) }) }) It("should update the ec2nodeclass-hash annotation on the ec2nodeclass and nodeclaim when the ec2nodeclass's ec2nodeclass-hash-version annotation does not match the controller hash version", func() { env.ExpectCreated(dep, nodeClass, nodePool) env.EventuallyExpectHealthyPodCount(selector, numPods) nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] - nodeClass = env.ExpectExists(nodeClass).(*v1beta1.EC2NodeClass) + nodeClass = env.ExpectExists(nodeClass).(*v1.EC2NodeClass) expectedHash := nodeClass.Hash() - By(fmt.Sprintf("expect nodeclass %s and nodeclaim %s to contain %s and %s annotations", nodeClass.Name, nodeClaim.Name, v1beta1.AnnotationEC2NodeClassHash, v1beta1.AnnotationEC2NodeClassHashVersion)) + By(fmt.Sprintf("expect nodeclass %s and nodeclaim %s to contain %s and %s annotations", nodeClass.Name, nodeClaim.Name, v1.AnnotationEC2NodeClassHash, v1.AnnotationEC2NodeClassHashVersion)) Eventually(func(g Gomega) { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClass), nodeClass)).To(Succeed()) g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClaim), nodeClaim)).To(Succeed()) - g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) }).WithTimeout(30 * time.Second).Should(Succeed()) nodeClass.Annotations = lo.Assign(nodeClass.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-1", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-hash-version-1", + v1.AnnotationEC2NodeClassHash: "test-hash-1", + v1.AnnotationEC2NodeClassHashVersion: "test-hash-version-1", }) // Updating `nodeClass.Spec.Tags` would normally trigger drift on all nodeclaims using the // nodeclass. However, the ec2nodeclass-hash-version does not match the controller hash version, so we will see that @@ -803,8 +801,8 @@ var _ = Describe("Drift", func() { "test-key": "test-value", }) nodeClaim.Annotations = lo.Assign(nodePool.Annotations, map[string]string{ - v1beta1.AnnotationEC2NodeClassHash: "test-hash-2", - v1beta1.AnnotationEC2NodeClassHashVersion: "test-hash-version-2", + v1.AnnotationEC2NodeClassHash: "test-hash-2", + v1.AnnotationEC2NodeClassHashVersion: "test-hash-version-2", }) // The nodeclaim will need to be updated first, as the hash controller will only be triggered on changes to the nodeclass @@ -816,10 +814,10 @@ var _ = Describe("Drift", func() { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClass), nodeClass)).To(Succeed()) g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClaim), nodeClaim)).To(Succeed()) - g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHash, expectedHash)) - g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationEC2NodeClassHashVersion, v1beta1.EC2NodeClassHashVersion)) + g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + g.Expect(nodeClass.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHash, expectedHash)) + g.Expect(nodeClaim.Annotations).To(HaveKeyWithValue(v1.AnnotationEC2NodeClassHashVersion, v1.EC2NodeClassHashVersion)) }).WithTimeout(30 * time.Second).Should(Succeed()) env.ConsistentlyExpectNodeClaimsNotDrifted(time.Minute, nodeClaim) }) @@ -831,8 +829,8 @@ var _ = Describe("Drift", func() { Replicas: 2, PodOptions: coretest.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "inflate"}}, - PodAntiRequirements: []v1.PodAffinityTerm{{ - TopologyKey: v1.LabelHostname, + PodAntiRequirements: []corev1.PodAffinityTerm{{ + TopologyKey: corev1.LabelHostname, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "inflate"}, }}, @@ -845,7 +843,7 @@ var _ = Describe("Drift", func() { env.EventuallyExpectCreatedNodeCount("==", int(numPods)) // Drift the nodeClaim with bad configuration that will not register a NodeClaim - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: env.GetAMIBySSMPath("/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-ebs")}} + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: env.GetAMIBySSMPath("/aws/service/ami-amazon-linux-latest/amzn2-ami-hvm-x86_64-ebs")}} env.ExpectCreatedOrUpdated(nodeClass) env.EventuallyExpectDrifted(startingNodeClaimState...) @@ -861,10 +859,10 @@ var _ = Describe("Drift", func() { // Assert this over several minutes to ensure a subsequent disruption controller pass doesn't // successfully schedule the evicted pods to the in-flight nodeclaim and disrupt the original node Consistently(func(g Gomega) { - nodeClaims := &corev1beta1.NodeClaimList{} + nodeClaims := &karpv1.NodeClaimList{} g.Expect(env.Client.List(env, nodeClaims, client.HasLabels{coretest.DiscoveryLabel})).To(Succeed()) - startingNodeClaimUIDs := sets.New(lo.Map(startingNodeClaimState, func(nc *corev1beta1.NodeClaim, _ int) types.UID { return nc.UID })...) - nodeClaimUIDs := sets.New(lo.Map(nodeClaims.Items, func(nc corev1beta1.NodeClaim, _ int) types.UID { return nc.UID })...) + startingNodeClaimUIDs := sets.New(lo.Map(startingNodeClaimState, func(nc *karpv1.NodeClaim, _ int) types.UID { return nc.UID })...) + nodeClaimUIDs := sets.New(lo.Map(nodeClaims.Items, func(nc karpv1.NodeClaim, _ int) types.UID { return nc.UID })...) g.Expect(nodeClaimUIDs.IsSuperset(startingNodeClaimUIDs)).To(BeTrue()) }, "2m").Should(Succeed()) }) @@ -875,8 +873,8 @@ var _ = Describe("Drift", func() { Replicas: 2, PodOptions: coretest.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "inflate"}}, - PodAntiRequirements: []v1.PodAffinityTerm{{ - TopologyKey: v1.LabelHostname, + PodAntiRequirements: []corev1.PodAffinityTerm{{ + TopologyKey: corev1.LabelHostname, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "inflate"}, }}, @@ -889,7 +887,7 @@ var _ = Describe("Drift", func() { env.EventuallyExpectCreatedNodeCount("==", int(numPods)) // Drift the nodeClaim with bad configuration that never initializes - nodePool.Spec.Template.Spec.StartupTaints = []v1.Taint{{Key: "example.com/taint", Effect: v1.TaintEffectPreferNoSchedule}} + nodePool.Spec.Template.Spec.StartupTaints = []corev1.Taint{{Key: "example.com/taint", Effect: corev1.TaintEffectPreferNoSchedule}} env.ExpectCreatedOrUpdated(nodePool) env.EventuallyExpectDrifted(startingNodeClaimState...) @@ -902,11 +900,11 @@ var _ = Describe("Drift", func() { env.EventuallyExpectNodesUntaintedWithTimeout(11*time.Minute, taintedNodes...) // Expect that the new nodeClaim/node is kept around after the un-cordon - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} Expect(env.Client.List(env, nodeList, client.HasLabels{coretest.DiscoveryLabel})).To(Succeed()) Expect(nodeList.Items).To(HaveLen(int(numPods) + 1)) - nodeClaimList := &corev1beta1.NodeClaimList{} + nodeClaimList := &karpv1.NodeClaimList{} Expect(env.Client.List(env, nodeClaimList, client.HasLabels{coretest.DiscoveryLabel})).To(Succeed()) Expect(nodeClaimList.Items).To(HaveLen(int(numPods) + 1)) @@ -914,10 +912,10 @@ var _ = Describe("Drift", func() { // Assert this over several minutes to ensure a subsequent disruption controller pass doesn't // successfully schedule the evicted pods to the in-flight nodeclaim and disrupt the original node Consistently(func(g Gomega) { - nodeClaims := &corev1beta1.NodeClaimList{} + nodeClaims := &karpv1.NodeClaimList{} g.Expect(env.Client.List(env, nodeClaims, client.HasLabels{coretest.DiscoveryLabel})).To(Succeed()) - startingNodeClaimUIDs := sets.New(lo.Map(startingNodeClaimState, func(m *corev1beta1.NodeClaim, _ int) types.UID { return m.UID })...) - nodeClaimUIDs := sets.New(lo.Map(nodeClaims.Items, func(m corev1beta1.NodeClaim, _ int) types.UID { return m.UID })...) + startingNodeClaimUIDs := sets.New(lo.Map(startingNodeClaimState, func(m *karpv1.NodeClaim, _ int) types.UID { return m.UID })...) + nodeClaimUIDs := sets.New(lo.Map(nodeClaims.Items, func(m karpv1.NodeClaim, _ int) types.UID { return m.UID })...) g.Expect(nodeClaimUIDs.IsSuperset(startingNodeClaimUIDs)).To(BeTrue()) }, "2m").Should(Succeed()) }) @@ -929,15 +927,15 @@ var _ = Describe("Drift", func() { Replicas: 2, PodOptions: coretest.PodOptions{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "inflate"}}, - PodAntiRequirements: []v1.PodAffinityTerm{{ - TopologyKey: v1.LabelHostname, + PodAntiRequirements: []corev1.PodAffinityTerm{{ + TopologyKey: corev1.LabelHostname, LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "inflate"}, }}, }, - ReadinessProbe: &v1.Probe{ - ProbeHandler: v1.ProbeHandler{ - HTTPGet: &v1.HTTPGetAction{ + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ Port: intstr.FromInt32(80), }, }, diff --git a/test/suites/expiration/suite_test.go b/test/suites/expiration/suite_test.go index 13095367a30a..9035a275767f 100644 --- a/test/suites/expiration/suite_test.go +++ b/test/suites/expiration/suite_test.go @@ -20,15 +20,15 @@ import ( "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" coretest "sigs.k8s.io/karpenter/pkg/test" @@ -38,8 +38,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestExpiration(t *testing.T) { RegisterFailHandler(Fail) @@ -76,7 +76,7 @@ var _ = Describe("Expiration", func() { "app": "my-app", }, Annotations: map[string]string{ - corev1beta1.DoNotDisruptAnnotationKey: "true", + karpv1.DoNotDisruptAnnotationKey: "true", }, }, TerminationGracePeriodSeconds: lo.ToPtr[int64](0), @@ -93,14 +93,14 @@ var _ = Describe("Expiration", func() { env.Monitor.Reset() // Reset the monitor so that we can expect a single node to be spun up after expiration // Set the expireAfter value to get the node deleted - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Second * 15)} + nodePool.Spec.Disruption.ExpireAfter = karpv1.NillableDuration{Duration: lo.ToPtr(time.Second * 15)} env.ExpectUpdated(nodePool) // Eventually the node will be tainted, which means its actively being disrupted Eventually(func(g Gomega) { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(node), node)).Should(Succeed()) - _, ok := lo.Find(node.Spec.Taints, func(t v1.Taint) bool { - return corev1beta1.IsDisruptingTaint(t) + _, ok := lo.Find(node.Spec.Taints, func(t corev1.Taint) bool { + return karpv1.IsDisruptingTaint(t) }) g.Expect(ok).To(BeTrue()) }).Should(Succeed()) @@ -134,7 +134,7 @@ var _ = Describe("Expiration", func() { PodOptions: coretest.PodOptions{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - corev1beta1.DoNotDisruptAnnotationKey: "true", + karpv1.DoNotDisruptAnnotationKey: "true", }, Labels: map[string]string{"app": "large-app"}, }, @@ -155,8 +155,8 @@ var _ = Describe("Expiration", func() { // Eventually the node will be tainted, which means its actively being disrupted Eventually(func(g Gomega) { g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(node), node)).Should(Succeed()) - _, ok := lo.Find(node.Spec.Taints, func(t v1.Taint) bool { - return corev1beta1.IsDisruptingTaint(t) + _, ok := lo.Find(node.Spec.Taints, func(t corev1.Taint) bool { + return karpv1.IsDisruptingTaint(t) }) g.Expect(ok).To(BeTrue()) }).Should(Succeed()) diff --git a/test/suites/integration/aws_metadata_test.go b/test/suites/integration/aws_metadata_test.go index a13d5ad30261..d48600206bc7 100644 --- a/test/suites/integration/aws_metadata_test.go +++ b/test/suites/integration/aws_metadata_test.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -27,7 +27,7 @@ import ( var _ = Describe("MetadataOptions", func() { It("should use specified metadata options", func() { - nodeClass.Spec.MetadataOptions = &v1beta1.MetadataOptions{ + nodeClass.Spec.MetadataOptions = &v1.MetadataOptions{ HTTPEndpoint: aws.String("enabled"), HTTPProtocolIPv6: aws.String("enabled"), HTTPPutResponseHopLimit: aws.Int64(1), diff --git a/test/suites/integration/block_device_mappings_test.go b/test/suites/integration/block_device_mappings_test.go index 56e0643eeaa0..0006b1fa9431 100644 --- a/test/suites/integration/block_device_mappings_test.go +++ b/test/suites/integration/block_device_mappings_test.go @@ -19,7 +19,7 @@ import ( "sigs.k8s.io/karpenter/pkg/test" "sigs.k8s.io/karpenter/pkg/utils/resources" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -27,10 +27,10 @@ import ( var _ = Describe("BlockDeviceMappings", func() { It("should use specified block device mappings", func() { - nodeClass.Spec.BlockDeviceMappings = []*v1beta1.BlockDeviceMapping{ + nodeClass.Spec.BlockDeviceMappings = []*v1.BlockDeviceMapping{ { DeviceName: aws.String("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: resources.Quantity("20Gi"), VolumeType: aws.String("io2"), IOPS: aws.Int64(1000), diff --git a/test/suites/integration/cni_test.go b/test/suites/integration/cni_test.go index 97ce0999001a..41d65f04dae3 100644 --- a/test/suites/integration/cni_test.go +++ b/test/suites/integration/cni_test.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" ) @@ -42,7 +42,7 @@ var _ = Describe("CNITests", func() { Expect(allocatablePods).To(Equal(eniLimitedPodsFor(node.Labels["node.kubernetes.io/instance-type"]))) }) It("should set max pods to 110 if maxPods is set in kubelet", func() { - nodePool.Spec.Template.Spec.Kubelet = &v1beta1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{MaxPods: lo.ToPtr[int32](110)} pod := test.Pod() env.ExpectCreated(pod, nodeClass, nodePool) env.EventuallyExpectHealthy(pod) diff --git a/test/suites/integration/daemonset_test.go b/test/suites/integration/daemonset_test.go index 98c7a0013d06..afa9f27a6c19 100644 --- a/test/suites/integration/daemonset_test.go +++ b/test/suites/integration/daemonset_test.go @@ -16,7 +16,7 @@ package integration_test import ( appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,18 +26,18 @@ import ( . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" ) var _ = Describe("DaemonSet", func() { - var limitrange *v1.LimitRange + var limitrange *corev1.LimitRange var priorityclass *schedulingv1.PriorityClass var daemonset *appsv1.DaemonSet var dep *appsv1.Deployment BeforeEach(func() { - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil priorityclass = &schedulingv1.PriorityClass{ ObjectMeta: metav1.ObjectMeta{ @@ -47,7 +47,7 @@ var _ = Describe("DaemonSet", func() { GlobalDefault: false, Description: "This priority class should be used for daemonsets.", } - limitrange = &v1.LimitRange{ + limitrange = &corev1.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "limitrange", Namespace: "default", @@ -55,7 +55,7 @@ var _ = Describe("DaemonSet", func() { } daemonset = test.DaemonSet(test.DaemonSetOptions{ PodOptions: test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("1Gi")}}, + ResourceRequirements: corev1.ResourceRequirements{Limits: corev1.ResourceList{corev1.ResourceMemory: resource.MustParse("1Gi")}}, PriorityClassName: "high-priority-daemonsets", }, }) @@ -66,19 +66,19 @@ var _ = Describe("DaemonSet", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("4")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceMemory: resource.MustParse("4")}, }, }, }) }) It("should account for LimitRange Default on daemonSet pods for resources", func() { - limitrange.Spec.Limits = []v1.LimitRangeItem{ + limitrange.Spec.Limits = []corev1.LimitRangeItem{ { - Type: v1.LimitTypeContainer, - Default: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2"), - v1.ResourceMemory: resource.MustParse("1Gi"), + Type: corev1.LimitTypeContainer, + Default: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, } @@ -89,7 +89,7 @@ var _ = Describe("DaemonSet", func() { // Eventually expect a single node to exist and both the deployment pod and the daemonset pod to schedule to it Eventually(func(g Gomega) { - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} g.Expect(env.Client.List(env, nodeList, client.HasLabels{"testing/cluster"})).To(Succeed()) g.Expect(nodeList.Items).To(HaveLen(1)) @@ -104,12 +104,12 @@ var _ = Describe("DaemonSet", func() { }).Should(Succeed()) }) It("should account for LimitRange DefaultRequest on daemonSet pods for resources", func() { - limitrange.Spec.Limits = []v1.LimitRangeItem{ + limitrange.Spec.Limits = []corev1.LimitRangeItem{ { - Type: v1.LimitTypeContainer, - DefaultRequest: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2"), - v1.ResourceMemory: resource.MustParse("1Gi"), + Type: corev1.LimitTypeContainer, + DefaultRequest: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, } @@ -120,7 +120,7 @@ var _ = Describe("DaemonSet", func() { // Eventually expect a single node to exist and both the deployment pod and the daemonset pod to schedule to it Eventually(func(g Gomega) { - nodeList := &v1.NodeList{} + nodeList := &corev1.NodeList{} g.Expect(env.Client.List(env, nodeList, client.HasLabels{"testing/cluster"})).To(Succeed()) g.Expect(nodeList.Items).To(HaveLen(1)) diff --git a/test/suites/integration/emptiness_test.go b/test/suites/integration/emptiness_test.go index fc63b3b401b1..c480e82d1265 100644 --- a/test/suites/integration/emptiness_test.go +++ b/test/suites/integration/emptiness_test.go @@ -28,7 +28,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" ) @@ -37,8 +37,8 @@ var _ = Describe("Emptiness", func() { var selector labels.Selector var numPods int BeforeEach(func() { - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Duration(0))} + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenEmpty + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{Duration: lo.ToPtr(time.Duration(0))} numPods = 1 dep = test.Deployment(test.DeploymentOptions{ @@ -54,7 +54,7 @@ var _ = Describe("Emptiness", func() { Context("Budgets", func() { It("should not allow emptiness if the budget is fully blocking", func() { // We're going to define a budget that doesn't allow any emptiness disruption to happen - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "0", }} @@ -76,7 +76,7 @@ var _ = Describe("Emptiness", func() { // the current time and extends 15 minutes past the current time // Times need to be in UTC since the karpenter containers were built in UTC time windowStart := time.Now().Add(-time.Minute * 15).UTC() - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ Nodes: "0", Schedule: lo.ToPtr(fmt.Sprintf("%d %d * * *", windowStart.Minute(), windowStart.Hour())), Duration: &metav1.Duration{Duration: time.Minute * 30}, @@ -96,7 +96,7 @@ var _ = Describe("Emptiness", func() { }) }) It("should terminate an empty node", func() { - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Hour * 300)} + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{Duration: lo.ToPtr(time.Hour * 300)} const numPods = 1 deployment := test.Deployment(test.DeploymentOptions{Replicas: numPods}) @@ -115,7 +115,7 @@ var _ = Describe("Emptiness", func() { env.EventuallyExpectEmpty(nodeClaim) By("waiting for the nodeclaim to deprovision when past its ConsolidateAfter timeout of 0") - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Duration(0))} + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{Duration: lo.ToPtr(time.Duration(0))} env.ExpectUpdated(nodePool) env.EventuallyExpectNotFound(nodeClaim, node) diff --git a/test/suites/integration/extended_resources_test.go b/test/suites/integration/extended_resources_test.go index 6fa4e52a46c5..fcc36a081a79 100644 --- a/test/suites/integration/extended_resources_test.go +++ b/test/suites/integration/extended_resources_test.go @@ -21,19 +21,19 @@ import ( "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/karpenter/pkg/test" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) var _ = Describe("Extended Resources", func() { @@ -45,7 +45,7 @@ var _ = Describe("Extended Resources", func() { It("should provision nodes for a deployment that requests nvidia.com/gpu", func() { ExpectNvidiaDevicePluginCreated() // TODO: jmdeal@ remove AL2 pin once AL2023 accelerated AMIs are available - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 numPods := 1 dep := test.Deployment(test.DeploymentOptions{ Replicas: int32(numPods), @@ -53,21 +53,21 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ "nvidia.com/gpu": resource.MustParse("1"), }, - Limits: v1.ResourceList{ + Limits: corev1.ResourceList{ "nvidia.com/gpu": resource.MustParse("1"), }, }, }, }) selector := labels.SelectorFromSet(dep.Spec.Selector.MatchLabels) - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpExists, + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpExists, }, }) env.ExpectCreated(nodeClass, nodePool, dep) @@ -77,7 +77,7 @@ var _ = Describe("Extended Resources", func() { }) It("should provision nodes for a deployment that requests nvidia.com/gpu (Bottlerocket)", func() { // For Bottlerocket, we are testing that resources are initialized without needing a device plugin - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket numPods := 1 dep := test.Deployment(test.DeploymentOptions{ Replicas: int32(numPods), @@ -85,21 +85,21 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ "nvidia.com/gpu": resource.MustParse("1"), }, - Limits: v1.ResourceList{ + Limits: corev1.ResourceList{ "nvidia.com/gpu": resource.MustParse("1"), }, }, }, }) selector := labels.SelectorFromSet(dep.Spec.Selector.MatchLabels) - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpExists, + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpExists, }}) env.ExpectCreated(nodeClass, nodePool, dep) env.EventuallyExpectHealthyPodCount(selector, numPods) @@ -118,11 +118,11 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ "vpc.amazonaws.com/pod-eni": resource.MustParse("1"), }, - Limits: v1.ResourceList{ + Limits: corev1.ResourceList{ "vpc.amazonaws.com/pod-eni": resource.MustParse("1"), }, }, @@ -144,8 +144,8 @@ var _ = Describe("Extended Resources", func() { // We use a Custom AMI so that we can reboot after we start the kubelet service rawContent, err := os.ReadFile("testdata/amd_driver_input.sh") Expect(err).ToNot(HaveOccurred()) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: customAMI, }, @@ -160,11 +160,11 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ "amd.com/gpu": resource.MustParse("1"), }, - Limits: v1.ResourceList{ + Limits: corev1.ResourceList{ "amd.com/gpu": resource.MustParse("1"), }, }, @@ -184,7 +184,7 @@ var _ = Describe("Extended Resources", func() { Skip("skipping test on an exotic instance type") ExpectHabanaDevicePluginCreated() - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: "ami-0fae925f94979981f", }, @@ -196,11 +196,11 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ "habana.ai/gaudi": resource.MustParse("1"), }, - Limits: v1.ResourceList{ + Limits: corev1.ResourceList{ "habana.ai/gaudi": resource.MustParse("1"), }, }, @@ -219,16 +219,16 @@ var _ = Describe("Extended Resources", func() { nodePool.Spec.Template.Labels = map[string]string{ "aws.amazon.com/efa": "true", } - nodePool.Spec.Template.Spec.Taints = []v1.Taint{ + nodePool.Spec.Template.Spec.Taints = []corev1.Taint{ { Key: "aws.amazon.com/efa", - Effect: v1.TaintEffectNoSchedule, + Effect: corev1.TaintEffectNoSchedule, }, } // Only select private subnets since instances with multiple network instances at launch won't get a public IP. nodeClass.Spec.SubnetSelectorTerms[0].Tags["Name"] = "*Private*" // TODO: jmdeal@ remove AL2 pin once AL2023 accelerated AMIs are available - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyAL2 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyAL2 numPods := 1 dep := test.Deployment(test.DeploymentOptions{ @@ -237,17 +237,17 @@ var _ = Describe("Extended Resources", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "efa-app"}, }, - Tolerations: []v1.Toleration{ + Tolerations: []corev1.Toleration{ { Key: "aws.amazon.com/efa", - Operator: v1.TolerationOpExists, + Operator: corev1.TolerationOpExists, }, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ "vpc.amazonaws.com/efa": resource.MustParse("2"), }, - Limits: v1.ResourceList{ + Limits: corev1.ResourceList{ "vpc.amazonaws.com/efa": resource.MustParse("2"), }, }, @@ -277,38 +277,38 @@ func ExpectNvidiaDevicePluginCreated() { UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ Type: appsv1.RollingUpdateDaemonSetStrategyType, }, - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: test.ObjectMeta(metav1.ObjectMeta{ Labels: map[string]string{ "name": "nvidia-device-plugin-ds", }, }), - Spec: v1.PodSpec{ - Tolerations: []v1.Toleration{ + Spec: corev1.PodSpec{ + Tolerations: []corev1.Toleration{ { Key: "nvidia.com/gpu", - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, }, }, PriorityClassName: "system-node-critical", - Containers: []v1.Container{ + Containers: []corev1.Container{ { Name: "nvidia-device-plugin-ctr", Image: "nvcr.io/nvidia/k8s-device-plugin:v0.12.3", - Env: []v1.EnvVar{ + Env: []corev1.EnvVar{ { Name: "FAIL_ON_INIT_ERROR", Value: "false", }, }, - SecurityContext: &v1.SecurityContext{ + SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: lo.ToPtr(false), - Capabilities: &v1.Capabilities{ - Drop: []v1.Capability{"ALL"}, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, }, }, - VolumeMounts: []v1.VolumeMount{ + VolumeMounts: []corev1.VolumeMount{ { Name: "device-plugin", MountPath: "/var/lib/kubelet/device-plugins", @@ -316,11 +316,11 @@ func ExpectNvidiaDevicePluginCreated() { }, }, }, - Volumes: []v1.Volume{ + Volumes: []corev1.Volume{ { Name: "device-plugin", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ Path: "/var/lib/kubelet/device-plugins", }, }, @@ -345,32 +345,32 @@ func ExpectAMDDevicePluginCreated() { "name": "amdgpu-dp-ds", }, }, - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: test.ObjectMeta(metav1.ObjectMeta{ Labels: map[string]string{ "name": "amdgpu-dp-ds", }, }), - Spec: v1.PodSpec{ + Spec: corev1.PodSpec{ PriorityClassName: "system-node-critical", - Tolerations: []v1.Toleration{ + Tolerations: []corev1.Toleration{ { Key: "amd.com/gpu", - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, }, }, - Containers: []v1.Container{ + Containers: []corev1.Container{ { Name: "amdgpu-dp-cntr", Image: "rocm/k8s-device-plugin", - SecurityContext: &v1.SecurityContext{ + SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: lo.ToPtr(false), - Capabilities: &v1.Capabilities{ - Drop: []v1.Capability{"ALL"}, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, }, }, - VolumeMounts: []v1.VolumeMount{ + VolumeMounts: []corev1.VolumeMount{ { Name: "dp", MountPath: "/var/lib/kubelet/device-plugins", @@ -382,19 +382,19 @@ func ExpectAMDDevicePluginCreated() { }, }, }, - Volumes: []v1.Volume{ + Volumes: []corev1.Volume{ { Name: "dp", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ Path: "/var/lib/kubelet/device-plugins", }, }, }, { Name: "sys", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ Path: "/sys", }, }, @@ -408,7 +408,7 @@ func ExpectAMDDevicePluginCreated() { func ExpectHabanaDevicePluginCreated() { GinkgoHelper() - env.ExpectCreated(&v1.Namespace{ + env.ExpectCreated(&corev1.Namespace{ ObjectMeta: test.ObjectMeta(metav1.ObjectMeta{ Name: "habana-system", }), @@ -427,7 +427,7 @@ func ExpectHabanaDevicePluginCreated() { UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ Type: appsv1.RollingUpdateDaemonSetStrategyType, }, - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: test.ObjectMeta(metav1.ObjectMeta{ Annotations: map[string]string{ "scheduler.alpha.kubernetes.io/critical-pod": "", @@ -436,23 +436,23 @@ func ExpectHabanaDevicePluginCreated() { "name": "habanalabs-device-plugin-ds", }, }), - Spec: v1.PodSpec{ - Tolerations: []v1.Toleration{ + Spec: corev1.PodSpec{ + Tolerations: []corev1.Toleration{ { Key: "habana.ai/gaudi", - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, }, }, PriorityClassName: "system-node-critical", - Containers: []v1.Container{ + Containers: []corev1.Container{ { Name: "habanalabs-device-plugin-ctr", Image: "vault.habana.ai/docker-k8s-device-plugin/docker-k8s-device-plugin:latest", - SecurityContext: &v1.SecurityContext{ + SecurityContext: &corev1.SecurityContext{ Privileged: lo.ToPtr(true), }, - VolumeMounts: []v1.VolumeMount{ + VolumeMounts: []corev1.VolumeMount{ { Name: "device-plugin", MountPath: "/var/lib/kubelet/device-plugins", @@ -460,11 +460,11 @@ func ExpectHabanaDevicePluginCreated() { }, }, }, - Volumes: []v1.Volume{ + Volumes: []corev1.Volume{ { Name: "device-plugin", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ Path: "/var/lib/kubelet/device-plugins", }, }, @@ -492,7 +492,7 @@ func ExpectEFADevicePluginCreated() { UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ Type: appsv1.RollingUpdateDaemonSetStrategyType, }, - Template: v1.PodTemplateSpec{ + Template: corev1.PodTemplateSpec{ ObjectMeta: test.ObjectMeta(metav1.ObjectMeta{ Annotations: map[string]string{ "scheduler.alpha.kubernetes.io/critical-pod": "", @@ -501,35 +501,35 @@ func ExpectEFADevicePluginCreated() { "name": "aws-efa-k8s-device-plugin", }, }), - Spec: v1.PodSpec{ + Spec: corev1.PodSpec{ NodeSelector: map[string]string{ "aws.amazon.com/efa": "true", }, - Tolerations: []v1.Toleration{ + Tolerations: []corev1.Toleration{ { Key: "CriticalAddonsOnly", - Operator: v1.TolerationOpExists, + Operator: corev1.TolerationOpExists, }, { Key: "aws.amazon.com/efa", - Operator: v1.TolerationOpExists, - Effect: v1.TaintEffectNoSchedule, + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, }, }, PriorityClassName: "system-node-critical", HostNetwork: true, - Containers: []v1.Container{ + Containers: []corev1.Container{ { Name: "aws-efea-k8s-device-plugin", Image: "602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-efa-k8s-device-plugin:v0.3.3", - SecurityContext: &v1.SecurityContext{ + SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: lo.ToPtr(false), - Capabilities: &v1.Capabilities{ - Drop: []v1.Capability{"ALL"}, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, }, RunAsNonRoot: lo.ToPtr(false), }, - VolumeMounts: []v1.VolumeMount{ + VolumeMounts: []corev1.VolumeMount{ { Name: "device-plugin", MountPath: "/var/lib/kubelet/device-plugins", @@ -537,11 +537,11 @@ func ExpectEFADevicePluginCreated() { }, }, }, - Volumes: []v1.Volume{ + Volumes: []corev1.Volume{ { Name: "device-plugin", - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ Path: "/var/lib/kubelet/device-plugins", }, }, diff --git a/test/suites/integration/hash_test.go b/test/suites/integration/hash_test.go index fc12376986be..7583546d1756 100644 --- a/test/suites/integration/hash_test.go +++ b/test/suites/integration/hash_test.go @@ -17,9 +17,9 @@ package integration_test import ( "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -30,11 +30,11 @@ var _ = Describe("CRD Hash", func() { env.ExpectCreated(nodeClass, nodePool) Eventually(func(g Gomega) { - np := &corev1beta1.NodePool{} + np := &karpv1.NodePool{} err := env.Client.Get(env, client.ObjectKeyFromObject(nodePool), np) g.Expect(err).ToNot(HaveOccurred()) - hash, found := np.Annotations[corev1beta1.NodePoolHashAnnotationKey] + hash, found := np.Annotations[karpv1.NodePoolHashAnnotationKey] g.Expect(found).To(BeTrue()) g.Expect(hash).To(Equal(np.Hash())) }) @@ -43,11 +43,11 @@ var _ = Describe("CRD Hash", func() { env.ExpectCreated(nodeClass) Eventually(func(g Gomega) { - nc := &v1beta1.EC2NodeClass{} + nc := &v1.EC2NodeClass{} err := env.Client.Get(env, client.ObjectKeyFromObject(nodeClass), nc) g.Expect(err).ToNot(HaveOccurred()) - hash, found := nc.Annotations[v1beta1.AnnotationEC2NodeClassHash] + hash, found := nc.Annotations[v1.AnnotationEC2NodeClassHash] g.Expect(found).To(BeTrue()) g.Expect(hash).To(Equal(nc.Hash())) }) diff --git a/test/suites/integration/instance_profile_test.go b/test/suites/integration/instance_profile_test.go index e395a3dfb54d..80bfc957018c 100644 --- a/test/suites/integration/instance_profile_test.go +++ b/test/suites/integration/instance_profile_test.go @@ -18,7 +18,7 @@ import ( "fmt" "time" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/awslabs/operatorpkg/status" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -88,13 +88,13 @@ var _ = Describe("InstanceProfile Generation", func() { instance := env.GetInstance(node.Name) Expect(instance.IamInstanceProfile).ToNot(BeNil()) Expect(lo.FromPtr(instance.IamInstanceProfile.Arn)).To(ContainSubstring(nodeClass.Status.InstanceProfile)) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeInstanceProfileReady, Status: metav1.ConditionTrue}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeInstanceProfileReady, Status: metav1.ConditionTrue}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionTrue}) }) It("should have the EC2NodeClass status as not ready since Instance Profile was not resolved", func() { nodeClass.Spec.Role = fmt.Sprintf("KarpenterNodeRole-%s", "invalidRole") env.ExpectCreated(nodeClass) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeInstanceProfileReady, Status: metav1.ConditionUnknown}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeInstanceProfileReady, Status: metav1.ConditionUnknown}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionUnknown}) }) }) diff --git a/test/suites/integration/kubelet_config_test.go b/test/suites/integration/kubelet_config_test.go index 4599b8e214e1..142c59d55d87 100644 --- a/test/suites/integration/kubelet_config_test.go +++ b/test/suites/integration/kubelet_config_test.go @@ -18,12 +18,12 @@ import ( "math" "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "github.com/samber/lo" @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" ) @@ -40,18 +40,18 @@ var _ = Describe("KubeletConfiguration Overrides", func() { Context("All kubelet configuration set", func() { BeforeEach(func() { // MaxPods needs to account for the daemonsets that will run on the nodes - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr(int32(110)), PodsPerCore: lo.ToPtr(int32(10)), SystemReserved: map[string]string{ - string(v1.ResourceCPU): "200m", - string(v1.ResourceMemory): "200Mi", - string(v1.ResourceEphemeralStorage): "1Gi", + string(corev1.ResourceCPU): "200m", + string(corev1.ResourceMemory): "200Mi", + string(corev1.ResourceEphemeralStorage): "1Gi", }, KubeReserved: map[string]string{ - string(v1.ResourceCPU): "200m", - string(v1.ResourceMemory): "200Mi", - string(v1.ResourceEphemeralStorage): "1Gi", + string(corev1.ResourceCPU): "200m", + string(corev1.ResourceMemory): "200Mi", + string(corev1.ResourceEphemeralStorage): "1Gi", }, EvictionHard: map[string]string{ "memory.available": "5%", @@ -87,28 +87,28 @@ var _ = Describe("KubeletConfiguration Overrides", func() { func(amiFamily *string) { nodeClass.Spec.AMIFamily = amiFamily // TODO (jmdeal@): remove once 22.04 AMIs are supported - if *amiFamily == v1beta1.AMIFamilyUbuntu && env.K8sMinorVersion() >= 29 { + if *amiFamily == v1.AMIFamilyUbuntu && env.K8sMinorVersion() >= 29 { nodeClass.Spec.AMISelectorTerms = lo.Map([]string{ "/aws/service/canonical/ubuntu/eks/20.04/1.28/stable/current/amd64/hvm/ebs-gp2/ami-id", "/aws/service/canonical/ubuntu/eks/20.04/1.28/stable/current/arm64/hvm/ebs-gp2/ami-id", - }, func(ssmPath string, _ int) v1beta1.AMISelectorTerm { - return v1beta1.AMISelectorTerm{ID: env.GetAMIBySSMPath(ssmPath)} + }, func(ssmPath string, _ int) v1.AMISelectorTerm { + return v1.AMISelectorTerm{ID: env.GetAMIBySSMPath(ssmPath)} }) } pod := test.Pod(test.PodOptions{ NodeSelector: map[string]string{ - v1.LabelOSStable: string(v1.Linux), - v1.LabelArchStable: "amd64", + corev1.LabelOSStable: string(corev1.Linux), + corev1.LabelArchStable: "amd64", }, }) env.ExpectCreated(nodeClass, nodePool, pod) env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) }, - Entry("when the AMIFamily is AL2", &v1beta1.AMIFamilyAL2), - Entry("when the AMIFamily is AL2023", &v1beta1.AMIFamilyAL2023), - Entry("when the AMIFamily is Ubuntu", &v1beta1.AMIFamilyUbuntu), - Entry("when the AMIFamily is Bottlerocket", &v1beta1.AMIFamilyBottlerocket), + Entry("when the AMIFamily is AL2", &v1.AMIFamilyAL2), + Entry("when the AMIFamily is AL2023", &v1.AMIFamilyAL2023), + Entry("when the AMIFamily is Ubuntu", &v1.AMIFamilyUbuntu), + Entry("when the AMIFamily is Bottlerocket", &v1.AMIFamilyBottlerocket), ) DescribeTable("Windows AMIFamilies", func(amiFamily *string) { @@ -122,19 +122,19 @@ var _ = Describe("KubeletConfiguration Overrides", func() { // requirements, not off of the instance type options so scheduling can fail if nodepool aren't // properly scoped test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Windows)}, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Windows)}, }, }, ) pod := test.Pod(test.PodOptions{ Image: aws.WindowsDefaultImage, NodeSelector: map[string]string{ - v1.LabelOSStable: string(v1.Windows), - v1.LabelArchStable: "amd64", + corev1.LabelOSStable: string(corev1.Windows), + corev1.LabelArchStable: "amd64", }, }) env.ExpectCreated(nodeClass, nodePool, pod) @@ -146,14 +146,14 @@ var _ = Describe("KubeletConfiguration Overrides", func() { // If the instance type is not supported by the controller resource `vpc.amazonaws.com/PrivateIPv4Address` will not register. // Issue: https://github.com/aws/karpenter-provider-aws/issues/4472 // See: https://github.com/aws/amazon-vpc-resource-controller-k8s/blob/master/pkg/aws/vpc/limits.go - Entry("when the AMIFamily is Windows2019", &v1beta1.AMIFamilyWindows2019), - Entry("when the AMIFamily is Windows2022", &v1beta1.AMIFamilyWindows2022), + Entry("when the AMIFamily is Windows2019", &v1.AMIFamilyWindows2019), + Entry("when the AMIFamily is Windows2022", &v1.AMIFamilyWindows2022), ) }) It("should schedule pods onto separate nodes when maxPods is set", func() { // Get the DS pod count and use it to calculate the DS pod overhead dsCount := env.GetDaemonSetCount(nodePool) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr(1 + int32(dsCount)), } @@ -164,8 +164,8 @@ var _ = Describe("KubeletConfiguration Overrides", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, }, }, }) @@ -180,10 +180,10 @@ var _ = Describe("KubeletConfiguration Overrides", func() { // PodsPerCore needs to account for the daemonsets that will run on the nodes // This will have 4 pods available on each node (2 taken by daemonset pods) test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCPU, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCPU, + Operator: corev1.NodeSelectorOpIn, Values: []string{"2"}, }, }, @@ -195,8 +195,8 @@ var _ = Describe("KubeletConfiguration Overrides", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, }, }, }) @@ -211,7 +211,7 @@ var _ = Describe("KubeletConfiguration Overrides", func() { // Since we restrict node to two cores, we will allow 6 pods. Both nodes will have // 4 DS pods and 2 test pods. dsCount := env.GetDaemonSetCount(nodePool) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ PodsPerCore: lo.ToPtr(int32(math.Ceil(float64(2+dsCount) / 2))), } @@ -221,20 +221,20 @@ var _ = Describe("KubeletConfiguration Overrides", func() { env.EventuallyExpectUniqueNodeNames(selector, 2) }) It("should ignore podsPerCore value when Bottlerocket is used", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket // All pods should schedule to a single node since we are ignoring podsPerCore value // This would normally schedule to 3 nodes if not using Bottlerocket test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCPU, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCPU, + Operator: corev1.NodeSelectorOpIn, Values: []string{"2"}, }, }, ) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{PodsPerCore: lo.ToPtr(int32(1))} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{PodsPerCore: lo.ToPtr(int32(1))} numPods := 6 dep := test.Deployment(test.DeploymentOptions{ Replicas: int32(numPods), @@ -242,8 +242,8 @@ var _ = Describe("KubeletConfiguration Overrides", func() { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"app": "large-app"}, }, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, }, }, }) diff --git a/test/suites/integration/launch_template_test.go b/test/suites/integration/launch_template_test.go index 41f602105ad7..c9fd58dbc84d 100644 --- a/test/suites/integration/launch_template_test.go +++ b/test/suites/integration/launch_template_test.go @@ -21,7 +21,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -38,7 +38,7 @@ var _ = Describe("Launch Template Deletion", func() { Eventually(func(g Gomega) { output, _ := env.EC2API.DescribeLaunchTemplatesWithContext(env.Context, &ec2.DescribeLaunchTemplatesInput{ Filters: []*ec2.Filter{ - {Name: aws.String(fmt.Sprintf("tag:%s", v1beta1.LabelNodeClass)), Values: []*string{aws.String(nodeClass.Name)}}, + {Name: aws.String(fmt.Sprintf("tag:%s", v1.LabelNodeClass)), Values: []*string{aws.String(nodeClass.Name)}}, }, }) g.Expect(output.LaunchTemplates).To(HaveLen(0)) diff --git a/test/suites/integration/lease_garbagecollection_test.go b/test/suites/integration/lease_garbagecollection_test.go index d22774c2d55d..bcd1c42c0c05 100644 --- a/test/suites/integration/lease_garbagecollection_test.go +++ b/test/suites/integration/lease_garbagecollection_test.go @@ -18,7 +18,7 @@ import ( "time" coordinationsv1 "k8s.io/api/coordination/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/karpenter/pkg/test" @@ -29,8 +29,8 @@ var _ = Describe("Lease Garbage Collection", func() { var badLease *coordinationsv1.Lease BeforeEach(func() { badLease = &coordinationsv1.Lease{ - ObjectMeta: v1.ObjectMeta{ - CreationTimestamp: v1.Time{Time: time.Now().Add(-time.Hour * 2)}, + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{Time: time.Now().Add(-time.Hour * 2)}, Name: "new-lease", Namespace: "kube-node-lease", Labels: map[string]string{test.DiscoveryLabel: "unspecified"}, diff --git a/test/suites/integration/network_interface_test.go b/test/suites/integration/network_interface_test.go index 8a5976fed031..443913cc95d6 100644 --- a/test/suites/integration/network_interface_test.go +++ b/test/suites/integration/network_interface_test.go @@ -20,14 +20,14 @@ import ( "github.com/samber/lo" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" ) var _ = Describe("NetworkInterfaces", func() { DescribeTable( "should correctly configure public IP assignment on instances", func(associatePublicIPAddress *bool) { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{{ Tags: map[string]string{ "Name": "*Private*", "karpenter.sh/discovery": env.ClusterName, diff --git a/test/suites/integration/security_group_test.go b/test/suites/integration/security_group_test.go index d981fc6ad156..c4e0476495b1 100644 --- a/test/suites/integration/security_group_test.go +++ b/test/suites/integration/security_group_test.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/awslabs/operatorpkg/test/expectations" @@ -38,8 +38,8 @@ var _ = Describe("SecurityGroups", func() { It("should use the security-group-id selector", func() { securityGroups := env.GetSecurityGroups(map[string]string{"karpenter.sh/discovery": env.ClusterName}) Expect(len(securityGroups)).To(BeNumerically(">", 1)) - nodeClass.Spec.SecurityGroupSelectorTerms = lo.Map(securityGroups, func(sg aws.SecurityGroup, _ int) v1beta1.SecurityGroupSelectorTerm { - return v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = lo.Map(securityGroups, func(sg aws.SecurityGroup, _ int) v1.SecurityGroupSelectorTerm { + return v1.SecurityGroupSelectorTerm{ ID: lo.FromPtr(sg.GroupId), } }) @@ -58,7 +58,7 @@ var _ = Describe("SecurityGroups", func() { first := securityGroups[0] last := securityGroups[len(securityGroups)-1] - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"Name": lo.FromPtr(lo.FindOrElse(first.Tags, &ec2.Tag{}, func(tag *ec2.Tag) bool { return lo.FromPtr(tag.Key) == "Name" }).Value)}, }, @@ -78,23 +78,23 @@ var _ = Describe("SecurityGroups", func() { It("should update the EC2NodeClass status security groups", func() { env.ExpectCreated(nodeClass) EventuallyExpectSecurityGroups(env, nodeClass) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeSecurityGroupsReady, Status: metav1.ConditionTrue}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeSecurityGroupsReady, Status: metav1.ConditionTrue}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionTrue}) }) It("should have the NodeClass status as not ready since security groups were not resolved", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": "invalidName"}, }, } env.ExpectCreated(nodeClass) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeSecurityGroupsReady, Status: metav1.ConditionFalse, Message: "SecurityGroupSelector did not match any SecurityGroups"}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeSecurityGroupsReady, Status: metav1.ConditionFalse, Message: "SecurityGroupSelector did not match any SecurityGroups"}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionFalse, Message: "SecurityGroupsReady=False"}) }) }) -func EventuallyExpectSecurityGroups(env *aws.Environment, nodeClass *v1beta1.EC2NodeClass) { +func EventuallyExpectSecurityGroups(env *aws.Environment, nodeClass *v1.EC2NodeClass) { securityGroups := env.GetSecurityGroups(map[string]string{"karpenter.sh/discovery": env.ClusterName}) Expect(securityGroups).ToNot(HaveLen(0)) @@ -102,9 +102,9 @@ func EventuallyExpectSecurityGroups(env *aws.Environment, nodeClass *v1beta1.EC2 return lo.FromPtr(s.GroupId) })...) Eventually(func(g Gomega) { - temp := &v1beta1.EC2NodeClass{} + temp := &v1.EC2NodeClass{} g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nodeClass), temp)).To(Succeed()) - g.Expect(sets.New(lo.Map(temp.Status.SecurityGroups, func(s v1beta1.SecurityGroup, _ int) string { + g.Expect(sets.New(lo.Map(temp.Status.SecurityGroups, func(s v1.SecurityGroup, _ int) string { return s.ID })...).Equal(ids)) }).WithTimeout(10 * time.Second).Should(Succeed()) diff --git a/test/suites/integration/subnet_test.go b/test/suites/integration/subnet_test.go index 821a94ebdb29..d9a233390d9d 100644 --- a/test/suites/integration/subnet_test.go +++ b/test/suites/integration/subnet_test.go @@ -21,16 +21,16 @@ import ( "github.com/awslabs/operatorpkg/status" "github.com/onsi/gomega/types" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/karpenter/pkg/test" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/awslabs/operatorpkg/test/expectations" @@ -45,7 +45,7 @@ var _ = Describe("Subnets", func() { shuffledAZs := lo.Shuffle(lo.Keys(subnets)) firstSubnet := subnets[shuffledAZs[0]][0] - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { ID: firstSubnet, }, @@ -83,7 +83,7 @@ var _ = Describe("Subnets", func() { firstSubnet := subnets[0] lastSubnet := subnets[len(subnets)-1] - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{"Name": firstSubnet.Name}, }, @@ -105,9 +105,9 @@ var _ = Describe("Subnets", func() { Expect(len(subnets)).ToNot(Equal(0)) shuffledAZs := lo.Shuffle(lo.Keys(subnets)) - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelZoneFailureDomainStable, + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelZoneFailureDomainStable, Operator: "In", Values: []string{shuffledAZs[0]}, }}) @@ -125,17 +125,17 @@ var _ = Describe("Subnets", func() { It("should have the NodeClass status for subnets", func() { env.ExpectCreated(nodeClass) EventuallyExpectSubnets(env, nodeClass) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeSubnetsReady, Status: metav1.ConditionTrue}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeSubnetsReady, Status: metav1.ConditionTrue}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionTrue}) }) It("should have the NodeClass status as not ready since subnets were not resolved", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": "invalidName"}, }, } env.ExpectCreated(nodeClass) - ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1beta1.ConditionTypeSubnetsReady, Status: metav1.ConditionFalse, Message: "SubnetSelector did not match any Subnets"}) + ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: v1.ConditionTypeSubnetsReady, Status: metav1.ConditionFalse, Message: "SubnetSelector did not match any Subnets"}) ExpectStatusConditions(env, env.Client, 1*time.Minute, nodeClass, status.Condition{Type: status.ConditionReady, Status: metav1.ConditionFalse, Message: "SubnetsReady=False"}) }) }) @@ -186,15 +186,15 @@ type SubnetInfo struct { ID string } -func EventuallyExpectSubnets(env *aws.Environment, nodeClass *v1beta1.EC2NodeClass) { +func EventuallyExpectSubnets(env *aws.Environment, nodeClass *v1.EC2NodeClass) { subnets := env.GetSubnets(map[string]string{"karpenter.sh/discovery": env.ClusterName}) Expect(subnets).ToNot(HaveLen(0)) ids := sets.New(lo.Flatten(lo.Values(subnets))...) Eventually(func(g Gomega) { - temp := &v1beta1.EC2NodeClass{} + temp := &v1.EC2NodeClass{} g.Expect(env.Client.Get(env, client.ObjectKeyFromObject(nodeClass), temp)).To(Succeed()) - g.Expect(sets.New(lo.Map(temp.Status.Subnets, func(s v1beta1.Subnet, _ int) string { + g.Expect(sets.New(lo.Map(temp.Status.Subnets, func(s v1.Subnet, _ int) string { return s.ID })...).Equal(ids)) }).WithTimeout(10 * time.Second).Should(Succeed()) diff --git a/test/suites/integration/suite_test.go b/test/suites/integration/suite_test.go index 9a2f70e8cd54..0330c3c929b9 100644 --- a/test/suites/integration/suite_test.go +++ b/test/suites/integration/suite_test.go @@ -17,9 +17,9 @@ package integration_test import ( "testing" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/onsi/ginkgo/v2" @@ -27,8 +27,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestIntegration(t *testing.T) { RegisterFailHandler(Fail) diff --git a/test/suites/integration/tags_test.go b/test/suites/integration/tags_test.go index c2e28fe2fd51..ef44297db7d3 100644 --- a/test/suites/integration/tags_test.go +++ b/test/suites/integration/tags_test.go @@ -22,13 +22,13 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/providers/instance" "github.com/aws/karpenter-provider-aws/pkg/test" @@ -67,11 +67,11 @@ var _ = Describe("Tags", func() { } }) It("should tag spot instance requests when creating resources", func() { - coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }}) nodeClass.Spec.Tags = map[string]string{"TestTag": "TestVal"} pod := coretest.Pod() @@ -96,9 +96,9 @@ var _ = Describe("Tags", func() { nodeName := client.ObjectKeyFromObject(node) Eventually(func(g Gomega) { - node = &v1.Node{} + node = &corev1.Node{} g.Expect(env.Client.Get(env.Context, nodeName, node)).To(Succeed()) - g.Expect(node.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationInstanceTagged, "true")) + g.Expect(node.Annotations).To(HaveKeyWithValue(v1.AnnotationInstanceTagged, "true")) }, time.Minute) nodeInstance := instance.NewInstance(lo.ToPtr(env.GetInstance(node.Name))) @@ -107,21 +107,21 @@ var _ = Describe("Tags", func() { }) It("shouldn't overwrite custom Name tags", func() { - nodeClass = test.EC2NodeClass(*nodeClass, v1beta1.EC2NodeClass{Spec: v1beta1.EC2NodeClassSpec{ + nodeClass = test.EC2NodeClass(*nodeClass, v1.EC2NodeClass{Spec: v1.EC2NodeClassSpec{ Tags: map[string]string{"Name": "custom-name", "testing/cluster": env.ClusterName}, }}) if env.PrivateCluster { nodeClass.Spec.Role = "" nodeClass.Spec.InstanceProfile = lo.ToPtr(fmt.Sprintf("KarpenterNodeInstanceProfile-%s", env.ClusterName)) } - nodePool = coretest.NodePool(*nodePool, corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + nodePool = coretest.NodePool(*nodePool, karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }, @@ -135,9 +135,9 @@ var _ = Describe("Tags", func() { nodeName := client.ObjectKeyFromObject(node) Eventually(func(g Gomega) { - node = &v1.Node{} + node = &corev1.Node{} g.Expect(env.Client.Get(env.Context, nodeName, node)).To(Succeed()) - g.Expect(node.Annotations).To(HaveKeyWithValue(v1beta1.AnnotationInstanceTagged, "true")) + g.Expect(node.Annotations).To(HaveKeyWithValue(v1.AnnotationInstanceTagged, "true")) }, time.Minute) nodeInstance := instance.NewInstance(lo.ToPtr(env.GetInstance(node.Name))) diff --git a/test/suites/integration/utilization_test.go b/test/suites/integration/utilization_test.go index 798093f46c1c..0d3c3154da03 100644 --- a/test/suites/integration/utilization_test.go +++ b/test/suites/integration/utilization_test.go @@ -17,17 +17,17 @@ package integration_test import ( "time" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/karpenter/pkg/test" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "github.com/samber/lo" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" . "github.com/onsi/ginkgo/v2" @@ -36,26 +36,26 @@ import ( var _ = Describe("Utilization", Label(debug.NoWatch), Label(debug.NoEvents), func() { It("should provision one pod per node", func() { test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"t3.small"}, }, }, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpExists, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpExists, }, }, ) deployment := test.Deployment(test.DeploymentOptions{ Replicas: 100, PodOptions: test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: func() resource.Quantity { + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: func() resource.Quantity { dsOverhead := env.GetDaemonSetOverhead(nodePool) base := lo.ToPtr(resource.MustParse("1800m")) base.Sub(*dsOverhead.Cpu()) diff --git a/test/suites/integration/validation_test.go b/test/suites/integration/validation_test.go index d116e07af33a..d5c5d498cf74 100644 --- a/test/suites/integration/validation_test.go +++ b/test/suites/integration/validation_test.go @@ -19,12 +19,12 @@ import ( "time" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -34,7 +34,7 @@ var _ = Describe("Validation", func() { Context("NodePool", func() { It("should error when a restricted label is used in labels (karpenter.sh/nodepool)", func() { nodePool.Spec.Template.Labels = map[string]string{ - corev1beta1.NodePoolLabelKey: "my-custom-nodepool", + karpv1.NodePoolLabelKey: "my-custom-nodepool", } Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) @@ -46,93 +46,76 @@ var _ = Describe("Validation", func() { }) It("should allow a restricted label exception to be used in labels (node-restriction.kubernetes.io/custom-label)", func() { nodePool.Spec.Template.Labels = map[string]string{ - v1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", + corev1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", } Expect(env.Client.Create(env.Context, nodePool)).To(Succeed()) }) It("should allow a restricted label exception to be used in labels ([*].node-restriction.kubernetes.io/custom-label)", func() { nodePool.Spec.Template.Labels = map[string]string{ - "subdomain" + v1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", + "subdomain" + corev1.LabelNamespaceNodeRestriction + "/custom-label": "custom-value", } Expect(env.Client.Create(env.Context, nodePool)).To(Succeed()) }) It("should error when a requirement references a restricted label (karpenter.sh/nodepool)", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.NodePoolLabelKey, - Operator: v1.NodeSelectorOpIn, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.NodePoolLabelKey, + Operator: corev1.NodeSelectorOpIn, Values: []string{"default"}, }}) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when a requirement uses In but has no values", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{}, }}) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when a requirement uses an unknown operator", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, Operator: "within", - Values: []string{corev1beta1.CapacityTypeSpot}, + Values: []string{karpv1.CapacityTypeSpot}, }}) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when Gt is used with multiple integer values", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceMemory, - Operator: v1.NodeSelectorOpGt, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceMemory, + Operator: corev1.NodeSelectorOpGt, Values: []string{"1000000", "2000000"}, }}) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when Lt is used with multiple integer values", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceMemory, - Operator: v1.NodeSelectorOpLt, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceMemory, + Operator: corev1.NodeSelectorOpLt, Values: []string{"1000000", "2000000"}, }}) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when ttlSecondAfterEmpty is negative", func() { - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(-time.Second)} + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenEmpty + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{Duration: lo.ToPtr(-time.Second)} Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when ConsolidationPolicy=WhenUnderutilized is used with consolidateAfter", func() { - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Minute)} - Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) - }) - It("should error if imageGCHighThresholdPercent is less than imageGCLowThresholdPercent", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ - ImageGCHighThresholdPercent: lo.ToPtr(int32(10)), - ImageGCLowThresholdPercent: lo.ToPtr(int32(60)), - } - Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) - }) - It("should error if imageGCHighThresholdPercent or imageGCLowThresholdPercent is negative", func() { - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ - ImageGCHighThresholdPercent: lo.ToPtr(int32(-10)), - } - Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ - ImageGCLowThresholdPercent: lo.ToPtr(int32(-10)), - } + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{Duration: lo.ToPtr(time.Minute)} Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when minValues for a requirement key is negative", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"insance-type-1", "insance-type-2"}, }, MinValues: lo.ToPtr(-1)}, @@ -140,10 +123,10 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when minValues for a requirement key is zero", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"insance-type-1", "insance-type-2"}, }, MinValues: lo.ToPtr(0)}, @@ -151,10 +134,10 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when minValues for a requirement key is more than 50", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"insance-type-1", "insance-type-2"}, }, MinValues: lo.ToPtr(51)}, @@ -162,10 +145,10 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when minValues for a requirement key is greater than the values specified within In operator", func() { - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"insance-type-1", "insance-type-2"}, }, MinValues: lo.ToPtr(3)}, @@ -175,11 +158,11 @@ var _ = Describe("Validation", func() { }) Context("EC2NodeClass", func() { It("should error when amiSelectorTerms are not defined for amiFamily Custom", func() { - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) }) It("should fail for poorly formatted AMI ids", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { ID: "must-start-with-ami", }, @@ -207,7 +190,7 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) }) It("should fail when securityGroupSelectorTerms has id and other filters", func() { - nodeClass.Spec.SecurityGroupSelectorTerms = []v1beta1.SecurityGroupSelectorTerm{ + nodeClass.Spec.SecurityGroupSelectorTerms = []v1.SecurityGroupSelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, ID: "sg-12345", @@ -216,7 +199,7 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) }) It("should fail when subnetSelectorTerms has id and other filters", func() { - nodeClass.Spec.SubnetSelectorTerms = []v1beta1.SubnetSelectorTerm{ + nodeClass.Spec.SubnetSelectorTerms = []v1.SubnetSelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, ID: "subnet-12345", @@ -225,7 +208,7 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) }) It("should fail when amiSelectorTerms has id and other filters", func() { - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{ + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{ { Tags: map[string]string{"karpenter.sh/discovery": env.ClusterName}, ID: "ami-12345", @@ -267,5 +250,22 @@ var _ = Describe("Validation", func() { nodeClass.Spec.InstanceProfile = lo.ToPtr("test-instance-profile") Expect(env.Client.Update(env.Context, nodeClass)).ToNot(Succeed()) }) + It("should error if imageGCHighThresholdPercent is less than imageGCLowThresholdPercent", func() { + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ + ImageGCHighThresholdPercent: lo.ToPtr(int32(10)), + ImageGCLowThresholdPercent: lo.ToPtr(int32(60)), + } + Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) + }) + It("should error if imageGCHighThresholdPercent or imageGCLowThresholdPercent is negative", func() { + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ + ImageGCHighThresholdPercent: lo.ToPtr(int32(-10)), + } + Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ + ImageGCLowThresholdPercent: lo.ToPtr(int32(-10)), + } + Expect(env.Client.Create(env.Context, nodeClass)).ToNot(Succeed()) + }) }) }) diff --git a/test/suites/interruption/suite_test.go b/test/suites/interruption/suite_test.go index 3b83e69e9257..5c2d557d0ccc 100644 --- a/test/suites/interruption/suite_test.go +++ b/test/suites/interruption/suite_test.go @@ -20,15 +20,15 @@ import ( "time" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/controllers/interruption/messages" "github.com/aws/karpenter-provider-aws/pkg/controllers/interruption/messages/scheduledchange" "github.com/aws/karpenter-provider-aws/pkg/operator/options" @@ -41,8 +41,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestInterruption(t *testing.T) { RegisterFailHandler(Fail) @@ -69,11 +69,11 @@ var _ = AfterEach(func() { env.AfterEach() }) var _ = Describe("Interruption", func() { It("should terminate the spot instance and spin-up a new node on spot interruption warning", func() { By("Creating a single healthy node with a healthy deployment") - nodePool = coretest.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeSpot}, + nodePool = coretest.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeSpot}, }}) numPods := 1 dep := coretest.Deployment(coretest.DeploymentOptions{ diff --git a/test/suites/ipv6/suite_test.go b/test/suites/ipv6/suite_test.go index f1b2f33ea104..980bfc8e1b73 100644 --- a/test/suites/ipv6/suite_test.go +++ b/test/suites/ipv6/suite_test.go @@ -19,12 +19,12 @@ import ( "testing" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/onsi/ginkgo/v2" @@ -32,8 +32,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestIPv6(t *testing.T) { RegisterFailHandler(Fail) @@ -51,16 +51,16 @@ var _ = BeforeEach(func() { nodeClass = env.DefaultEC2NodeClass() nodePool = env.DefaultNodePool(nodeClass) nodePool = coretest.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpExists, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpExists, }, }, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"t3a.small"}, }, }, @@ -76,21 +76,21 @@ var _ = Describe("IPv6", func() { env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) node := env.GetNode(pod.Spec.NodeName) - internalIPv6Addrs := lo.Filter(node.Status.Addresses, func(addr v1.NodeAddress, _ int) bool { - return addr.Type == v1.NodeInternalIP && net.ParseIP(addr.Address).To4() == nil + internalIPv6Addrs := lo.Filter(node.Status.Addresses, func(addr corev1.NodeAddress, _ int) bool { + return addr.Type == corev1.NodeInternalIP && net.ParseIP(addr.Address).To4() == nil }) Expect(internalIPv6Addrs).To(HaveLen(1)) }) It("should provision an IPv6 node by discovering kubeletConfig kube-dns IP", func() { clusterDNSAddr := env.ExpectIPv6ClusterDNS() - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ClusterDNS: []string{clusterDNSAddr}} + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ClusterDNS: []string{clusterDNSAddr}} pod := coretest.Pod() env.ExpectCreated(pod, nodeClass, nodePool) env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) node := env.GetNode(pod.Spec.NodeName) - internalIPv6Addrs := lo.Filter(node.Status.Addresses, func(addr v1.NodeAddress, _ int) bool { - return addr.Type == v1.NodeInternalIP && net.ParseIP(addr.Address).To4() == nil + internalIPv6Addrs := lo.Filter(node.Status.Addresses, func(addr corev1.NodeAddress, _ int) bool { + return addr.Type == corev1.NodeInternalIP && net.ParseIP(addr.Address).To4() == nil }) Expect(internalIPv6Addrs).To(HaveLen(1)) }) diff --git a/test/suites/localzone/suite_test.go b/test/suites/localzone/suite_test.go index 8c3ecc034b6c..e181465fe438 100644 --- a/test/suites/localzone/suite_test.go +++ b/test/suites/localzone/suite_test.go @@ -18,15 +18,15 @@ import ( "testing" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/onsi/ginkgo/v2" @@ -34,8 +34,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestLocalZone(t *testing.T) { RegisterFailHandler(Fail) @@ -53,9 +53,9 @@ var _ = BeforeEach(func() { nodeClass = env.DefaultEC2NodeClass() // The majority of local zones do not support GP3. Feature support in local zones can be tracked here: // https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/ - nodeClass.Spec.BlockDeviceMappings = append(nodeClass.Spec.BlockDeviceMappings, &v1beta1.BlockDeviceMapping{ + nodeClass.Spec.BlockDeviceMappings = append(nodeClass.Spec.BlockDeviceMappings, &v1.BlockDeviceMapping{ DeviceName: lo.ToPtr("/dev/xvda"), - EBS: &v1beta1.BlockDevice{ + EBS: &v1.BlockDevice{ VolumeSize: func() *resource.Quantity { quantity, err := resource.ParseQuantity("80Gi") Expect(err).To(BeNil()) @@ -66,10 +66,10 @@ var _ = BeforeEach(func() { }, }) nodePool = env.DefaultNodePool(nodeClass) - nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelTopologyZone, - Operator: v1.NodeSelectorOpIn, + nodePool.Spec.Template.Spec.Requirements = append(nodePool.Spec.Template.Spec.Requirements, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelTopologyZone, + Operator: corev1.NodeSelectorOpIn, Values: lo.FilterMap(env.GetSubnetInfo(map[string]string{"karpenter.sh/discovery": env.ClusterName}), func(info aws.SubnetInfo, _ int) (string, bool) { return info.Zone, info.ZoneType == "local-zone" }), @@ -90,10 +90,10 @@ var _ = Describe("LocalZone", func() { ObjectMeta: metav1.ObjectMeta{ Labels: depLabels, }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{{ - TopologyKey: v1.LabelHostname, + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{{ + TopologyKey: corev1.LabelHostname, MaxSkew: 1, - WhenUnsatisfiable: v1.DoNotSchedule, + WhenUnsatisfiable: corev1.DoNotSchedule, LabelSelector: &metav1.LabelSelector{ MatchLabels: depLabels, }, diff --git a/test/suites/nodeclaim/garbage_collection_test.go b/test/suites/nodeclaim/garbage_collection_test.go index 78f7cca88f64..3931947062c5 100644 --- a/test/suites/nodeclaim/garbage_collection_test.go +++ b/test/suites/nodeclaim/garbage_collection_test.go @@ -23,12 +23,12 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" coretest "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" awserrors "github.com/aws/karpenter-provider-aws/pkg/errors" "github.com/aws/karpenter-provider-aws/pkg/utils" environmentaws "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" @@ -82,11 +82,11 @@ var _ = Describe("GarbageCollection", func() { Value: aws.String("owned"), }, { - Key: aws.String(corev1beta1.NodePoolLabelKey), + Key: aws.String(karpv1.NodePoolLabelKey), Value: aws.String(nodePool.Name), }, { - Key: aws.String(v1beta1.LabelNodeClass), + Key: aws.String(v1.LabelNodeClass), Value: aws.String(nodeClass.Name), }, }, @@ -130,7 +130,7 @@ var _ = Describe("GarbageCollection", func() { Resources: []*string{out.Instances[0].InstanceId}, Tags: []*ec2.Tag{ { - Key: aws.String(corev1beta1.ManagedByAnnotationKey), + Key: aws.String(karpv1.ManagedByAnnotationKey), Value: aws.String(env.ClusterName), }, }, @@ -145,7 +145,7 @@ var _ = Describe("GarbageCollection", func() { }) It("should succeed to garbage collect an Instance that was deleted without the cluster's knowledge", func() { // Disable the interruption queue for the garbage collection coretest - env.ExpectSettingsOverridden(v1.EnvVar{Name: "INTERRUPTION_QUEUE", Value: ""}) + env.ExpectSettingsOverridden(corev1.EnvVar{Name: "INTERRUPTION_QUEUE", Value: ""}) pod := coretest.Pod() env.ExpectCreated(nodeClass, nodePool, pod) diff --git a/test/suites/nodeclaim/nodeclaim_test.go b/test/suites/nodeclaim/nodeclaim_test.go index c8a1c6b6ec97..4fb499886025 100644 --- a/test/suites/nodeclaim/nodeclaim_test.go +++ b/test/suites/nodeclaim/nodeclaim_test.go @@ -26,11 +26,11 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + corev1 "k8s.io/api/core/v1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -38,50 +38,50 @@ import ( var _ = Describe("StandaloneNodeClaim", func() { It("should create a standard NodeClaim within the 'c' instance family", func() { - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) env.ExpectCreated(nodeClass, nodeClaim) node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] nodeClaim = env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0] - Expect(node.Labels).To(HaveKeyWithValue(v1beta1.LabelInstanceCategory, "c")) + Expect(node.Labels).To(HaveKeyWithValue(v1.LabelInstanceCategory, "c")) env.EventuallyExpectNodeClaimsReady(nodeClaim) }) It("should create a standard NodeClaim based on resource requests", func() { - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - Resources: corev1beta1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("3"), - v1.ResourceMemory: resource.MustParse("64Gi"), + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + Resources: karpv1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("64Gi"), }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -92,7 +92,7 @@ var _ = Describe("StandaloneNodeClaim", func() { env.EventuallyExpectNodeClaimsReady(nodeClaim) }) It("should create a NodeClaim propagating all the NodeClaim spec details", func() { - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ "custom-annotation": "custom-value", @@ -101,70 +101,29 @@ var _ = Describe("StandaloneNodeClaim", func() { "custom-label": "custom-value", }, }, - Spec: corev1beta1.NodeClaimSpec{ - Taints: []v1.Taint{ + Spec: karpv1.NodeClaimSpec{ + Taints: []corev1.Taint{ { Key: "custom-taint", - Effect: v1.TaintEffectNoSchedule, + Effect: corev1.TaintEffectNoSchedule, Value: "custom-value", }, { Key: "other-custom-taint", - Effect: v1.TaintEffectNoExecute, + Effect: corev1.TaintEffectNoExecute, Value: "other-custom-value", }, }, - Resources: corev1beta1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("3"), - v1.ResourceMemory: resource.MustParse("16Gi"), + Resources: karpv1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("16Gi"), }, }, - Kubelet: &corev1beta1.KubeletConfiguration{ - MaxPods: lo.ToPtr[int32](110), - PodsPerCore: lo.ToPtr[int32](10), - SystemReserved: map[string]string{ - string(v1.ResourceCPU): "200m", - string(v1.ResourceMemory): "200Mi", - string(v1.ResourceEphemeralStorage): "1Gi", - }, - KubeReserved: map[string]string{ - string(v1.ResourceCPU): "200m", - string(v1.ResourceMemory): "200Mi", - string(v1.ResourceEphemeralStorage): "1Gi", - }, - EvictionHard: map[string]string{ - "memory.available": "5%", - "nodefs.available": "5%", - "nodefs.inodesFree": "5%", - "imagefs.available": "5%", - "imagefs.inodesFree": "5%", - "pid.available": "3%", - }, - EvictionSoft: map[string]string{ - "memory.available": "10%", - "nodefs.available": "10%", - "nodefs.inodesFree": "10%", - "imagefs.available": "10%", - "imagefs.inodesFree": "10%", - "pid.available": "6%", - }, - EvictionSoftGracePeriod: map[string]metav1.Duration{ - "memory.available": {Duration: time.Minute * 2}, - "nodefs.available": {Duration: time.Minute * 2}, - "nodefs.inodesFree": {Duration: time.Minute * 2}, - "imagefs.available": {Duration: time.Minute * 2}, - "imagefs.inodesFree": {Duration: time.Minute * 2}, - "pid.available": {Duration: time.Minute * 2}, - }, - EvictionMaxPodGracePeriod: lo.ToPtr[int32](120), - ImageGCHighThresholdPercent: lo.ToPtr[int32](50), - ImageGCLowThresholdPercent: lo.ToPtr[int32](10), - }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -173,14 +132,14 @@ var _ = Describe("StandaloneNodeClaim", func() { Expect(node.Annotations).To(HaveKeyWithValue("custom-annotation", "custom-value")) Expect(node.Labels).To(HaveKeyWithValue("custom-label", "custom-value")) Expect(node.Spec.Taints).To(ContainElements( - v1.Taint{ + corev1.Taint{ Key: "custom-taint", - Effect: v1.TaintEffectNoSchedule, + Effect: corev1.TaintEffectNoSchedule, Value: "custom-value", }, - v1.Taint{ + corev1.Taint{ Key: "other-custom-taint", - Effect: v1.TaintEffectNoExecute, + Effect: corev1.TaintEffectNoExecute, Value: "other-custom-value", }, )) @@ -197,28 +156,28 @@ var _ = Describe("StandaloneNodeClaim", func() { env.EventuallyExpectNodeClaimsReady(nodeClaim) }) It("should remove the cloudProvider NodeClaim when the cluster NodeClaim is deleted", func() { - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -238,28 +197,28 @@ var _ = Describe("StandaloneNodeClaim", func() { }, time.Second*10).Should(Succeed()) }) It("should delete a NodeClaim from the node termination finalizer", func() { - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -285,40 +244,40 @@ var _ = Describe("StandaloneNodeClaim", func() { Expect(err).ToNot(HaveOccurred()) // Create userData that adds custom labels through the --node-labels - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: customAMI}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: customAMI}} nodeClass.Spec.UserData = lo.ToPtr(fmt.Sprintf(string(rawContent), env.ClusterName, env.ClusterEndpoint, env.ExpectCABundle())) - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"amd64"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -337,41 +296,41 @@ var _ = Describe("StandaloneNodeClaim", func() { Expect(err).ToNot(HaveOccurred()) // Create userData that adds custom labels through the --node-labels - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyCustom - nodeClass.Spec.AMISelectorTerms = []v1beta1.AMISelectorTerm{{ID: customAMI}} + nodeClass.Spec.AMIFamily = &v1.AMIFamilyCustom + nodeClass.Spec.AMISelectorTerms = []v1.AMISelectorTerm{{ID: customAMI}} // Giving bad clusterName and clusterEndpoint to the userData nodeClass.Spec.UserData = lo.ToPtr(fmt.Sprintf(string(rawContent), "badName", "badEndpoint", env.ExpectCABundle())) - nodeClaim := test.NodeClaim(corev1beta1.NodeClaim{ - Spec: corev1beta1.NodeClaimSpec{ - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + nodeClaim := test.NodeClaim(karpv1.NodeClaim{ + Spec: karpv1.NodeClaimSpec{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelArchStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelArchStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"amd64"}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: corev1beta1.CapacityTypeLabelKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{corev1beta1.CapacityTypeOnDemand}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: karpv1.CapacityTypeLabelKey, + Operator: corev1.NodeSelectorOpIn, + Values: []string{karpv1.CapacityTypeOnDemand}, }, }, }, - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, }, }) @@ -381,11 +340,11 @@ var _ = Describe("StandaloneNodeClaim", func() { // Expect that the nodeClaim eventually launches and has false Registration/Initialization Eventually(func(g Gomega) { - temp := &corev1beta1.NodeClaim{} + temp := &karpv1.NodeClaim{} g.Expect(env.Client.Get(env.Context, client.ObjectKeyFromObject(nodeClaim), temp)).To(Succeed()) - g.Expect(temp.StatusConditions().Get(corev1beta1.ConditionTypeLaunched).IsTrue()).To(BeTrue()) - g.Expect(temp.StatusConditions().Get(corev1beta1.ConditionTypeRegistered).IsFalse()).To(BeTrue()) - g.Expect(temp.StatusConditions().Get(corev1beta1.ConditionTypeInitialized).IsFalse()).To(BeTrue()) + g.Expect(temp.StatusConditions().Get(karpv1.ConditionTypeLaunched).IsTrue()).To(BeTrue()) + g.Expect(temp.StatusConditions().Get(karpv1.ConditionTypeRegistered).IsFalse()).To(BeTrue()) + g.Expect(temp.StatusConditions().Get(karpv1.ConditionTypeInitialized).IsFalse()).To(BeTrue()) }).Should(Succeed()) // Expect that the nodeClaim is eventually de-provisioned due to the registration timeout diff --git a/test/suites/nodeclaim/suite_test.go b/test/suites/nodeclaim/suite_test.go index d5b49bcbed83..530b8faae9a1 100644 --- a/test/suites/nodeclaim/suite_test.go +++ b/test/suites/nodeclaim/suite_test.go @@ -17,9 +17,9 @@ package nodeclaim_test import ( "testing" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/onsi/ginkgo/v2" @@ -27,8 +27,8 @@ import ( ) var env *aws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestNodeClaim(t *testing.T) { RegisterFailHandler(Fail) diff --git a/test/suites/scale/deprovisioning_test.go b/test/suites/scale/deprovisioning_test.go index 438c0568ed9c..33c711d2dccc 100644 --- a/test/suites/scale/deprovisioning_test.go +++ b/test/suites/scale/deprovisioning_test.go @@ -24,16 +24,16 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/pkg/controllers/interruption/messages" "github.com/aws/karpenter-provider-aws/pkg/controllers/interruption/messages/scheduledchange" awstest "github.com/aws/karpenter-provider-aws/pkg/test" @@ -67,51 +67,49 @@ const ( // disableProvisioningLimits represents limits that can be applied to a nodePool if you want a nodePool // that can deprovision nodes but cannot provision nodes -var disableProvisioningLimits = corev1beta1.Limits{ - v1.ResourceCPU: resource.MustParse("0"), - v1.ResourceMemory: resource.MustParse("0Gi"), +var disableProvisioningLimits = karpv1.Limits{ + corev1.ResourceCPU: resource.MustParse("0"), + corev1.ResourceMemory: resource.MustParse("0Gi"), } var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), func() { - var nodePool *corev1beta1.NodePool - var nodeClass *v1beta1.EC2NodeClass + var nodePool *karpv1.NodePool + var nodeClass *v1.EC2NodeClass var deployment *appsv1.Deployment var deploymentOptions test.DeploymentOptions var selector labels.Selector var dsCount int BeforeEach(func() { - env.ExpectSettingsOverridden(v1.EnvVar{Name: "FEATURE_GATES", Value: "Drift=True"}) + env.ExpectSettingsOverridden(corev1.EnvVar{Name: "FEATURE_GATES", Value: "Drift=True"}) nodeClass = env.DefaultEC2NodeClass() nodePool = env.DefaultNodePool(nodeClass) nodePool.Spec.Limits = nil - test.ReplaceRequirements(nodePool, []corev1beta1.NodeSelectorRequirementWithMinValues{ + test.ReplaceRequirements(nodePool, []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: v1beta1.LabelInstanceHypervisor, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: v1.LabelInstanceHypervisor, + Operator: corev1.NodeSelectorOpIn, Values: []string{"nitro"}, }, }, // Ensure that all pods can fit on to the provisioned nodes including all daemonsets { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"large"}, }, }, }...) - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ - { - Nodes: "70%", - }, - } + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ + Nodes: "70%", + }} deploymentOptions = test.DeploymentOptions{ PodOptions: test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("10m"), - v1.ResourceMemory: resource.MustParse("50Mi"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, TerminationGracePeriodSeconds: lo.ToPtr[int64](0), @@ -150,12 +148,12 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), deploymentOptions.PodOptions.Labels = map[string]string{ deprovisioningTypeKey: v, } - deploymentOptions.PodOptions.Tolerations = []v1.Toleration{ + deploymentOptions.PodOptions.Tolerations = []corev1.Toleration{ { Key: deprovisioningTypeKey, - Operator: v1.TolerationOpEqual, + Operator: corev1.TolerationOpEqual, Value: v, - Effect: v1.TaintEffectNoSchedule, + Effect: corev1.TaintEffectNoSchedule, }, } deploymentOptions.Replicas = int32(replicas) @@ -163,24 +161,21 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), deploymentMap[v] = d } - nodePoolMap := map[string]*corev1beta1.NodePool{} + nodePoolMap := map[string]*karpv1.NodePool{} // Generate all the nodePools for multi-deprovisioning for _, v := range disruptionMethods { np := test.NodePool() np.Spec = *nodePool.Spec.DeepCopy() - np.Spec.Template.Spec.Taints = []v1.Taint{ + np.Spec.Template.Spec.Taints = []corev1.Taint{ { Key: deprovisioningTypeKey, Value: v, - Effect: v1.TaintEffectNoSchedule, + Effect: corev1.TaintEffectNoSchedule, }, } np.Spec.Template.Labels = map[string]string{ deprovisioningTypeKey: v, } - np.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ - MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), - } nodePoolMap[v] = test.NodePool(*np) } @@ -198,14 +193,17 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), } wg.Wait() + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ + MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), + } // Create a separate nodeClass for drift so that we can change the nodeClass later without it affecting // the other nodePools driftNodeClass := awstest.EC2NodeClass() driftNodeClass.Spec = *nodeClass.Spec.DeepCopy() - nodePoolMap[driftValue].Spec.Template.Spec.NodeClassRef = &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: driftNodeClass.Name, + nodePoolMap[driftValue].Spec.Template.Spec.NodeClassRef = &karpv1.NodeClassReference{ + Group: object.GVK(driftNodeClass).Group, + Kind: object.GVK(driftNodeClass).Kind, + Name: driftNodeClass.Name, } env.MeasureProvisioningDurationFor(func() { By("kicking off provisioning by applying the nodePool and nodeClass") @@ -253,12 +251,12 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), // Enable consolidation, emptiness, and expiration nodePoolMap[consolidationValue].Spec.Disruption.ConsolidateAfter = nil - nodePoolMap[emptinessValue].Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty + nodePoolMap[emptinessValue].Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenEmpty nodePoolMap[emptinessValue].Spec.Disruption.ConsolidateAfter.Duration = lo.ToPtr(time.Duration(0)) nodePoolMap[expirationValue].Spec.Disruption.ExpireAfter.Duration = lo.ToPtr(time.Duration(0)) nodePoolMap[expirationValue].Spec.Limits = disableProvisioningLimits // Update the drift NodeClass to start drift on Nodes assigned to this NodeClass - driftNodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + driftNodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket // Create test assertions to ensure during the multiple deprovisioner scale-downs type testAssertions struct { @@ -283,7 +281,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), deletedCount: nodeCountPerNodePool, nodeCount: nodeCountPerNodePool, nodeCountSelector: labels.SelectorFromSet(map[string]string{ - corev1beta1.NodePoolLabelKey: nodePoolMap[noExpirationValue].Name, + karpv1.NodePoolLabelKey: nodePoolMap[noExpirationValue].Name, }), createdCount: nodeCountPerNodePool, }, @@ -302,11 +300,9 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("enabling deprovisioning across nodePools") for _, p := range nodePoolMap { - p.Spec.Disruption.Budgets = []corev1beta1.Budget{ - { - Nodes: "70%", - }, - } + p.Spec.Disruption.Budgets = []karpv1.Budget{{ + Nodes: "70%", + }} env.ExpectCreatedOrUpdated(p) } env.ExpectUpdated(driftNodeClass) @@ -322,14 +318,14 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), // Provide a default selector based on the original nodePool name if one isn't specified selector = assertions.deletedNodeCountSelector if selector == nil { - selector = labels.SelectorFromSet(map[string]string{corev1beta1.NodePoolLabelKey: nodePoolMap[d].Name}) + selector = labels.SelectorFromSet(map[string]string{karpv1.NodePoolLabelKey: nodePoolMap[d].Name}) } env.EventuallyExpectDeletedNodeCountWithSelector("==", assertions.deletedCount, selector) // Provide a default selector based on the original nodePool name if one isn't specified selector = assertions.nodeCountSelector if selector == nil { - selector = labels.SelectorFromSet(map[string]string{corev1beta1.NodePoolLabelKey: nodePoolMap[d].Name}) + selector = labels.SelectorFromSet(map[string]string{karpv1.NodePoolLabelKey: nodePoolMap[d].Name}) } env.EventuallyExpectNodeCountWithSelector("==", assertions.nodeCount, selector) env.EventuallyExpectHealthyPodCount(labels.SelectorFromSet(deploymentMap[d].Spec.Selector.MatchLabels), int(lo.FromPtr(deploymentMap[d].Spec.Replicas))) @@ -354,7 +350,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } @@ -386,7 +382,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool") - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil env.ExpectUpdated(nodePool) @@ -407,7 +403,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } @@ -440,7 +436,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool") - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil env.ExpectUpdated(nodePool) @@ -461,21 +457,21 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount // Add in a instance type size requirement that's larger than the smallest that fits the pods. - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"2xlarge"}, }}) deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) // Hostname anti-affinity to require one pod on each node - deployment.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + deployment.Spec.Template.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: deployment.Spec.Selector, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, @@ -507,10 +503,10 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool") // The nodePool defaults to a larger instance type than we need so enabling consolidation and making // the requirements wide-open should cause deletes and increase our utilization on the cluster - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil - nodePool.Spec.Template.Spec.Requirements = lo.Reject(nodePool.Spec.Template.Spec.Requirements, func(r corev1beta1.NodeSelectorRequirementWithMinValues, _ int) bool { - return r.Key == v1beta1.LabelInstanceSize + nodePool.Spec.Template.Spec.Requirements = lo.Reject(nodePool.Spec.Template.Spec.Requirements, func(r karpv1.NodeSelectorRequirementWithMinValues, _ int) bool { + return r.Key == v1.LabelInstanceSize }) env.ExpectUpdated(nodePool) @@ -534,7 +530,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } @@ -567,7 +563,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("kicking off deprovisioning emptiness by setting the ttlSecondsAfterEmpty value on the nodePool") - nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty + nodePool.Spec.Disruption.ConsolidationPolicy = karpv1.ConsolidationPolicyWhenEmpty nodePool.Spec.Disruption.ConsolidateAfter.Duration = lo.ToPtr(time.Duration(0)) env.ExpectCreatedOrUpdated(nodePool) @@ -590,7 +586,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } @@ -626,13 +622,10 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), noExpireNodePool := test.NodePool(*nodePool.DeepCopy()) // Disable Expiration - noExpireNodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} + noExpireNodePool.Spec.Disruption.ConsolidateAfter = &karpv1.NillableDuration{} noExpireNodePool.Spec.Disruption.ExpireAfter.Duration = nil noExpireNodePool.ObjectMeta = metav1.ObjectMeta{Name: test.RandomName()} - noExpireNodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ - MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), - } noExpireNodePool.Spec.Limits = nil env.ExpectCreatedOrUpdated(nodePool, noExpireNodePool) @@ -657,7 +650,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } @@ -685,7 +678,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("kicking off deprovisioning drift by changing the nodeClass AMIFamily") - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket + nodeClass.Spec.AMIFamily = &v1.AMIFamilyBottlerocket env.ExpectCreatedOrUpdated(nodeClass) env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount) @@ -708,7 +701,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } @@ -716,7 +709,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.ExpectCreated(deployment) env.EventuallyExpectPendingPodCount(selector, replicas) - var nodes []*v1.Node + var nodes []*corev1.Node env.MeasureProvisioningDurationFor(func() { By("kicking off provisioning by applying the nodePool and nodeClass") env.ExpectCreated(nodePool, nodeClass) diff --git a/test/suites/scale/provisioning_test.go b/test/suites/scale/provisioning_test.go index cffd85133962..d66957235e86 100644 --- a/test/suites/scale/provisioning_test.go +++ b/test/suites/scale/provisioning_test.go @@ -21,14 +21,14 @@ import ( "github.com/samber/lo" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" @@ -38,8 +38,8 @@ import ( const testGroup = "provisioning" var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), func() { - var nodePool *corev1beta1.NodePool - var nodeClass *v1beta1.EC2NodeClass + var nodePool *karpv1.NodePool + var nodeClass *v1.EC2NodeClass var deployment *appsv1.Deployment var selector labels.Selector var dsCount int @@ -48,23 +48,22 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu nodeClass = env.DefaultEC2NodeClass() nodePool = env.DefaultNodePool(nodeClass) nodePool.Spec.Limits = nil - nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ - { - Nodes: "70%", - }, - } - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceHypervisor, - Operator: v1.NodeSelectorOpIn, + nodePool.Spec.Disruption.Budgets = []karpv1.Budget{{ + Nodes: "70%", + }} + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceHypervisor, + Operator: corev1.NodeSelectorOpIn, Values: []string{"nitro"}, - }}) + }, + }) deployment = test.Deployment(test.DeploymentOptions{ PodOptions: test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("10m"), - v1.ResourceMemory: resource.MustParse("50Mi"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, TerminationGracePeriodSeconds: lo.ToPtr[int64](0), @@ -90,12 +89,12 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) // Hostname anti-affinity to require one pod on each node - deployment.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + deployment.Spec.Template.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: deployment.Spec.Selector, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, @@ -137,22 +136,22 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) // Hostname anti-affinity to require one pod on each node - deployment.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + deployment.Spec.Template.Spec.Affinity = &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: deployment.Spec.Selector, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, } - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ // minValues is restricted to 30 to have enough instance types to be sent to launch API and not make this test flaky. - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpExists, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpExists, }, MinValues: lo.ToPtr(30), }) @@ -183,14 +182,14 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu expectedNodeCount := 60 replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ // With Prefix Delegation enabled, .large instances can have 434 pods. - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"large"}, }, }, @@ -222,23 +221,23 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu expectedNodeCount := 60 replicas := replicasPerNode * expectedNodeCount deployment.Spec.Replicas = lo.ToPtr[int32](int32(replicas)) - nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ + nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ + karpv1.NodeSelectorRequirementWithMinValues{ // With Prefix Delegation enabled, .large instances can have 434 pods. - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceSize, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceSize, + Operator: corev1.NodeSelectorOpIn, Values: []string{"large"}, }, }, - corev1beta1.NodeSelectorRequirementWithMinValues{ + karpv1.NodeSelectorRequirementWithMinValues{ // minValues is restricted to 30 to have enough instance types to be sent to launch API and not make this test flaky. - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpExists, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpExists, }, MinValues: lo.ToPtr(30), }, diff --git a/test/suites/scheduling/suite_test.go b/test/suites/scheduling/suite_test.go index d38f0f78b146..3801f6113768 100644 --- a/test/suites/scheduling/suite_test.go +++ b/test/suites/scheduling/suite_test.go @@ -21,16 +21,16 @@ import ( "github.com/awslabs/operatorpkg/object" "github.com/samber/lo" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/test" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" "github.com/aws/karpenter-provider-aws/test/pkg/debug" environmentaws "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" @@ -39,8 +39,8 @@ import ( ) var env *environmentaws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestScheduling(t *testing.T) { RegisterFailHandler(Fail) @@ -66,16 +66,16 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { BeforeEach(func() { // Make the NodePool requirements fully flexible, so we can match well-known label keys nodePool = test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpExists, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpExists, }, }, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceGeneration, - Operator: v1.NodeSelectorOpExists, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceGeneration, + Operator: corev1.NodeSelectorOpExists, }, }, ) @@ -85,42 +85,42 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { }) AfterAll(func() { // Ensure that we're exercising all well known labels - Expect(lo.Keys(selectors)).To(ContainElements(append(corev1beta1.WellKnownLabels.UnsortedList(), lo.Keys(corev1beta1.NormalizedLabels)...))) + Expect(lo.Keys(selectors)).To(ContainElements(append(karpv1.WellKnownLabels.UnsortedList(), lo.Keys(karpv1.NormalizedLabels)...))) }) It("should apply annotations to the node", func() { nodePool.Spec.Template.Annotations = map[string]string{ - "foo": "bar", - corev1beta1.DoNotDisruptAnnotationKey: "true", + "foo": "bar", + karpv1.DoNotDisruptAnnotationKey: "true", } pod := test.Pod() env.ExpectCreated(nodeClass, nodePool, pod) env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) - Expect(env.GetNode(pod.Spec.NodeName).Annotations).To(And(HaveKeyWithValue("foo", "bar"), HaveKeyWithValue(corev1beta1.DoNotDisruptAnnotationKey, "true"))) + Expect(env.GetNode(pod.Spec.NodeName).Annotations).To(And(HaveKeyWithValue("foo", "bar"), HaveKeyWithValue(karpv1.DoNotDisruptAnnotationKey, "true"))) }) Context("Labels", func() { It("should support well-known labels for instance type selection", func() { nodeSelector := map[string]string{ // Well Known - corev1beta1.NodePoolLabelKey: nodePool.Name, - v1.LabelInstanceTypeStable: "c5.large", + karpv1.NodePoolLabelKey: nodePool.Name, + corev1.LabelInstanceTypeStable: "c5.large", // Well Known to AWS - v1beta1.LabelInstanceHypervisor: "nitro", - v1beta1.LabelInstanceCategory: "c", - v1beta1.LabelInstanceGeneration: "5", - v1beta1.LabelInstanceFamily: "c5", - v1beta1.LabelInstanceSize: "large", - v1beta1.LabelInstanceCPU: "2", - v1beta1.LabelInstanceCPUManufacturer: "intel", - v1beta1.LabelInstanceMemory: "4096", - v1beta1.LabelInstanceEBSBandwidth: "4750", - v1beta1.LabelInstanceNetworkBandwidth: "750", + v1.LabelInstanceHypervisor: "nitro", + v1.LabelInstanceCategory: "c", + v1.LabelInstanceGeneration: "5", + v1.LabelInstanceFamily: "c5", + v1.LabelInstanceSize: "large", + v1.LabelInstanceCPU: "2", + v1.LabelInstanceCPUManufacturer: "intel", + v1.LabelInstanceMemory: "4096", + v1.LabelInstanceEBSBandwidth: "4750", + v1.LabelInstanceNetworkBandwidth: "750", } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -132,12 +132,12 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { env.ExpectCreatedNodeCount("==", 1) }) It("should support well-known labels for zone id selection", func() { - selectors.Insert(v1beta1.LabelTopologyZoneID) // Add node selector keys to selectors used in testing to ensure we test all labels + selectors.Insert(v1.LabelTopologyZoneID) // Add node selector keys to selectors used in testing to ensure we test all labels deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ - NodeRequirements: []v1.NodeSelectorRequirement{ + NodeRequirements: []corev1.NodeSelectorRequirement{ { - Key: v1beta1.LabelTopologyZoneID, - Operator: v1.NodeSelectorOpIn, + Key: v1.LabelTopologyZoneID, + Operator: corev1.NodeSelectorOpIn, Values: []string{env.GetSubnetInfo(map[string]string{"karpenter.sh/discovery": env.ClusterName})[0].ZoneInfo.ZoneID}, }, }, @@ -147,19 +147,19 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { env.ExpectCreatedNodeCount("==", 1) }) It("should support well-known labels for local NVME storage", func() { - selectors.Insert(v1beta1.LabelInstanceLocalNVME) // Add node selector keys to selectors used in testing to ensure we test all labels + selectors.Insert(v1.LabelInstanceLocalNVME) // Add node selector keys to selectors used in testing to ensure we test all labels deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ - NodePreferences: []v1.NodeSelectorRequirement{ + NodePreferences: []corev1.NodeSelectorRequirement{ { - Key: v1beta1.LabelInstanceLocalNVME, - Operator: v1.NodeSelectorOpGt, + Key: v1.LabelInstanceLocalNVME, + Operator: corev1.NodeSelectorOpGt, Values: []string{"0"}, }, }, - NodeRequirements: []v1.NodeSelectorRequirement{ + NodeRequirements: []corev1.NodeSelectorRequirement{ { - Key: v1beta1.LabelInstanceLocalNVME, - Operator: v1.NodeSelectorOpGt, + Key: v1.LabelInstanceLocalNVME, + Operator: corev1.NodeSelectorOpGt, Values: []string{"0"}, }, }, @@ -169,19 +169,19 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { env.ExpectCreatedNodeCount("==", 1) }) It("should support well-known labels for encryption in transit", func() { - selectors.Insert(v1beta1.LabelInstanceEncryptionInTransitSupported) // Add node selector keys to selectors used in testing to ensure we test all labels + selectors.Insert(v1.LabelInstanceEncryptionInTransitSupported) // Add node selector keys to selectors used in testing to ensure we test all labels deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ - NodePreferences: []v1.NodeSelectorRequirement{ + NodePreferences: []corev1.NodeSelectorRequirement{ { - Key: v1beta1.LabelInstanceEncryptionInTransitSupported, - Operator: v1.NodeSelectorOpIn, + Key: v1.LabelInstanceEncryptionInTransitSupported, + Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}, }, }, - NodeRequirements: []v1.NodeSelectorRequirement{ + NodeRequirements: []corev1.NodeSelectorRequirement{ { - Key: v1beta1.LabelInstanceEncryptionInTransitSupported, - Operator: v1.NodeSelectorOpIn, + Key: v1.LabelInstanceEncryptionInTransitSupported, + Operator: corev1.NodeSelectorOpIn, Values: []string{"true"}, }, }, @@ -193,17 +193,17 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { It("should support well-known deprecated labels", func() { nodeSelector := map[string]string{ // Deprecated Labels - v1.LabelFailureDomainBetaRegion: env.Region, - v1.LabelFailureDomainBetaZone: fmt.Sprintf("%sa", env.Region), - "topology.ebs.csi.aws.com/zone": fmt.Sprintf("%sa", env.Region), + corev1.LabelFailureDomainBetaRegion: env.Region, + corev1.LabelFailureDomainBetaZone: fmt.Sprintf("%sa", env.Region), + "topology.ebs.csi.aws.com/zone": fmt.Sprintf("%sa", env.Region), "beta.kubernetes.io/arch": "amd64", "beta.kubernetes.io/os": "linux", - v1.LabelInstanceType: "c5.large", + corev1.LabelInstanceType: "c5.large", } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -217,16 +217,16 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { It("should support well-known labels for topology and architecture", func() { nodeSelector := map[string]string{ // Well Known - corev1beta1.NodePoolLabelKey: nodePool.Name, - v1.LabelTopologyRegion: env.Region, - v1.LabelTopologyZone: fmt.Sprintf("%sa", env.Region), - v1.LabelOSStable: "linux", - v1.LabelArchStable: "amd64", - corev1beta1.CapacityTypeLabelKey: corev1beta1.CapacityTypeOnDemand, + karpv1.NodePoolLabelKey: nodePool.Name, + corev1.LabelTopologyRegion: env.Region, + corev1.LabelTopologyZone: fmt.Sprintf("%sa", env.Region), + corev1.LabelOSStable: "linux", + corev1.LabelArchStable: "amd64", + karpv1.CapacityTypeLabelKey: karpv1.CapacityTypeOnDemand, } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -239,14 +239,14 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { }) It("should support well-known labels for a gpu (nvidia)", func() { nodeSelector := map[string]string{ - v1beta1.LabelInstanceGPUName: "t4", - v1beta1.LabelInstanceGPUMemory: "16384", - v1beta1.LabelInstanceGPUManufacturer: "nvidia", - v1beta1.LabelInstanceGPUCount: "1", + v1.LabelInstanceGPUName: "t4", + v1.LabelInstanceGPUMemory: "16384", + v1.LabelInstanceGPUManufacturer: "nvidia", + v1.LabelInstanceGPUCount: "1", } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -259,13 +259,13 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { }) It("should support well-known labels for an accelerator (inferentia)", func() { nodeSelector := map[string]string{ - v1beta1.LabelInstanceAcceleratorName: "inferentia", - v1beta1.LabelInstanceAcceleratorManufacturer: "aws", - v1beta1.LabelInstanceAcceleratorCount: "1", + v1.LabelInstanceAcceleratorName: "inferentia", + v1.LabelInstanceAcceleratorManufacturer: "aws", + v1.LabelInstanceAcceleratorCount: "1", } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -289,12 +289,12 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { nodeSelector := map[string]string{ // Well Known - v1.LabelWindowsBuild: v1beta1.Windows2022Build, - v1.LabelOSStable: string(v1.Windows), // Specify the OS to enable vpc-resource-controller to inject the PrivateIPv4Address resource + corev1.LabelWindowsBuild: v1.Windows2022Build, + corev1.LabelOSStable: string(corev1.Windows), // Specify the OS to enable vpc-resource-controller to inject the PrivateIPv4Address resource } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -302,13 +302,13 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { NodeRequirements: requirements, Image: environmentaws.WindowsDefaultImage, }}) - nodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyWindows2022 + nodeClass.Spec.AMIFamily = &v1.AMIFamilyWindows2022 test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Windows)}, + karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Windows)}, }, }, ) @@ -319,9 +319,9 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { DescribeTable("should support restricted label domain exceptions", func(domain string) { // Assign labels to the nodepool so that it has known values test.ReplaceRequirements(nodePool, - corev1beta1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: domain + "/team", Operator: v1.NodeSelectorOpExists}}, - corev1beta1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: domain + "/custom-label", Operator: v1.NodeSelectorOpExists}}, - corev1beta1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "subdomain." + domain + "/custom-label", Operator: v1.NodeSelectorOpExists}}, + karpv1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: domain + "/team", Operator: corev1.NodeSelectorOpExists}}, + karpv1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: domain + "/custom-label", Operator: corev1.NodeSelectorOpExists}}, + karpv1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: "subdomain." + domain + "/custom-label", Operator: corev1.NodeSelectorOpExists}}, ) nodeSelector := map[string]string{ domain + "/team": "team-1", @@ -329,8 +329,8 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { "subdomain." + domain + "/custom-label": "custom-value", } selectors.Insert(lo.Keys(nodeSelector)...) // Add node selector keys to selectors used in testing to ensure we test all labels - requirements := lo.MapToSlice(nodeSelector, func(key string, value string) v1.NodeSelectorRequirement { - return v1.NodeSelectorRequirement{Key: key, Operator: v1.NodeSelectorOpIn, Values: []string{value}} + requirements := lo.MapToSlice(nodeSelector, func(key string, value string) corev1.NodeSelectorRequirement { + return corev1.NodeSelectorRequirement{Key: key, Operator: corev1.NodeSelectorOpIn, Values: []string{value}} }) deployment := test.Deployment(test.DeploymentOptions{Replicas: 1, PodOptions: test.PodOptions{ NodeSelector: nodeSelector, @@ -374,10 +374,10 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { ObjectMeta: metav1.ObjectMeta{ Labels: podLabels, }, - PodRequirements: []v1.PodAffinityTerm{ + PodRequirements: []corev1.PodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{MatchLabels: podLabels}, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }, }, }, @@ -396,11 +396,11 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { ObjectMeta: metav1.ObjectMeta{ Labels: podLabels, }, - TopologySpreadConstraints: []v1.TopologySpreadConstraint{ + TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { MaxSkew: 1, - TopologyKey: v1.LabelTopologyZone, - WhenUnsatisfiable: v1.DoNotSchedule, + TopologyKey: corev1.LabelTopologyZone, + WhenUnsatisfiable: corev1.DoNotSchedule, LabelSelector: &metav1.LabelSelector{MatchLabels: podLabels}, MinDomains: lo.ToPtr(int32(3)), }, @@ -416,28 +416,28 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { env.EventuallyExpectNodeCount("==", 3) }) It("should provision a node using a NodePool with higher priority", func() { - nodePoolLowPri := test.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ + nodePoolLowPri := test.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ Weight: lo.ToPtr(int32(10)), - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Linux)}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Linux)}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"t3.nano"}, }, }, @@ -446,28 +446,28 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { }, }, }) - nodePoolHighPri := test.NodePool(corev1beta1.NodePool{ - Spec: corev1beta1.NodePoolSpec{ + nodePoolHighPri := test.NodePool(karpv1.NodePool{ + Spec: karpv1.NodePoolSpec{ Weight: lo.ToPtr(int32(100)), - Template: corev1beta1.NodeClaimTemplate{ - Spec: corev1beta1.NodeClaimSpec{ - NodeClassRef: &corev1beta1.NodeClassReference{ - APIVersion: object.GVK(nodeClass).GroupVersion().String(), - Kind: object.GVK(nodeClass).Kind, - Name: nodeClass.Name, + Template: karpv1.NodeClaimTemplate{ + Spec: karpv1.NodeClaimSpec{ + NodeClassRef: &karpv1.NodeClassReference{ + Group: object.GVK(nodeClass).Group, + Kind: object.GVK(nodeClass).Kind, + Name: nodeClass.Name, }, - Requirements: []corev1beta1.NodeSelectorRequirementWithMinValues{ + Requirements: []karpv1.NodeSelectorRequirementWithMinValues{ { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelOSStable, - Operator: v1.NodeSelectorOpIn, - Values: []string{string(v1.Linux)}, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelOSStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{string(corev1.Linux)}, }, }, { - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1.LabelInstanceTypeStable, - Operator: v1.NodeSelectorOpIn, + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, Values: []string{"c5.large"}, }, }, @@ -481,12 +481,12 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { env.EventuallyExpectHealthy(pod) env.ExpectCreatedNodeCount("==", 1) Expect(lo.FromPtr(env.GetInstance(pod.Spec.NodeName).InstanceType)).To(Equal("c5.large")) - Expect(env.GetNode(pod.Spec.NodeName).Labels[corev1beta1.NodePoolLabelKey]).To(Equal(nodePoolHighPri.Name)) + Expect(env.GetNode(pod.Spec.NodeName).Labels[karpv1.NodePoolLabelKey]).To(Equal(nodePoolHighPri.Name)) }) DescribeTable( "should provision a right-sized node when a pod has InitContainers (cpu)", - func(expectedNodeCPU string, containerRequirements v1.ResourceRequirements, initContainers ...v1.Container) { + func(expectedNodeCPU string, containerRequirements corev1.ResourceRequirements, initContainers ...corev1.Container) { if env.K8sMinorVersion() < 29 { Skip("native sidecar containers are only enabled on EKS 1.29+") } @@ -498,15 +498,15 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { ObjectMeta: metav1.ObjectMeta{ Labels: labels, }, - PodRequirements: []v1.PodAffinityTerm{{ + PodRequirements: []corev1.PodAffinityTerm{{ LabelSelector: &metav1.LabelSelector{ MatchLabels: labels, }, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }}, - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: func() resource.Quantity { + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: func() resource.Quantity { dsOverhead := env.GetDaemonSetOverhead(nodePool) base := lo.ToPtr(resource.MustParse("3")) base.Sub(*dsOverhead.Cpu()) @@ -516,16 +516,16 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { }, }) - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCPU, - Operator: v1.NodeSelectorOpIn, + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCPU, + Operator: corev1.NodeSelectorOpIn, Values: []string{"4", "8"}, }, - }, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpNotIn, + }, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpNotIn, Values: []string{"t"}, }, }) @@ -533,11 +533,11 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { ObjectMeta: metav1.ObjectMeta{ Labels: labels, }, - PodRequirements: []v1.PodAffinityTerm{{ + PodRequirements: []corev1.PodAffinityTerm{{ LabelSelector: &metav1.LabelSelector{ MatchLabels: labels, }, - TopologyKey: v1.LabelHostname, + TopologyKey: corev1.LabelHostname, }}, InitContainers: initContainers, ResourceRequirements: containerRequirements, @@ -545,45 +545,45 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { env.ExpectCreated(nodePool, nodeClass, dsBufferPod, pod) env.EventuallyExpectHealthy(pod) node := env.ExpectCreatedNodeCount("==", 1)[0] - Expect(node.ObjectMeta.GetLabels()[v1beta1.LabelInstanceCPU]).To(Equal(expectedNodeCPU)) + Expect(node.ObjectMeta.GetLabels()[v1.LabelInstanceCPU]).To(Equal(expectedNodeCPU)) }, - Entry("sidecar requirements + later init requirements do exceed container requirements", "8", v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("400m")}, - }, ephemeralInitContainer(v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("300m")}, - }), v1.Container{ - RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways), - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("350m")}, + Entry("sidecar requirements + later init requirements do exceed container requirements", "8", corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("400m")}, + }, ephemeralInitContainer(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("300m")}, + }), corev1.Container{ + RestartPolicy: lo.ToPtr(corev1.ContainerRestartPolicyAlways), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("350m")}, }, - }, ephemeralInitContainer(v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}, + }, ephemeralInitContainer(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1")}, })), - Entry("sidecar requirements + later init requirements do not exceed container requirements", "4", v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("400m")}, - }, ephemeralInitContainer(v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("300m")}, - }), v1.Container{ - RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways), - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("350m")}, + Entry("sidecar requirements + later init requirements do not exceed container requirements", "4", corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("400m")}, + }, ephemeralInitContainer(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("300m")}, + }), corev1.Container{ + RestartPolicy: lo.ToPtr(corev1.ContainerRestartPolicyAlways), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("350m")}, }, - }, ephemeralInitContainer(v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("300m")}, + }, ephemeralInitContainer(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("300m")}, })), - Entry("init container requirements exceed all later requests", "8", v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("400m")}, - }, v1.Container{ - RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways), - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + Entry("init container requirements exceed all later requests", "8", corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("400m")}, + }, corev1.Container{ + RestartPolicy: lo.ToPtr(corev1.ContainerRestartPolicyAlways), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, }, - }, ephemeralInitContainer(v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1500m")}, - }), v1.Container{ - RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways), - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("100m")}, + }, ephemeralInitContainer(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1500m")}, + }), corev1.Container{ + RestartPolicy: lo.ToPtr(corev1.ContainerRestartPolicyAlways), + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, }, }), ) @@ -591,30 +591,30 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { if env.K8sMinorVersion() < 29 { Skip("native sidecar containers are only enabled on EKS 1.29+") } - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ - Key: v1beta1.LabelInstanceCategory, - Operator: v1.NodeSelectorOpNotIn, + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: v1.LabelInstanceCategory, + Operator: corev1.NodeSelectorOpNotIn, Values: []string{"t"}, }, }) pod := test.Pod(test.PodOptions{ - InitContainers: []v1.Container{ + InitContainers: []corev1.Container{ { - RestartPolicy: lo.ToPtr(v1.ContainerRestartPolicyAlways), - Resources: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("100m"), - v1.ResourceMemory: resource.MustParse("128Mi"), + RestartPolicy: lo.ToPtr(corev1.ContainerRestartPolicyAlways), + Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), }}, }, - ephemeralInitContainer(v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("50m"), - v1.ResourceMemory: resource.MustParse("4Gi"), + ephemeralInitContainer(corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("4Gi"), }}), }, - ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("100m"), - v1.ResourceMemory: resource.MustParse("128Mi"), + ResourceRequirements: corev1.ResourceRequirements{Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), }}, }) env.ExpectCreated(nodePool, nodeClass, pod) @@ -630,23 +630,23 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { // Create a pod with 'overlapping' zone and zone-id requirements. With two options for each label, but only one pair of zone-zoneID that maps to the // same AZ, we will always expect the pod to be scheduled to that AZ. In this case, this is the mapping at zone[1]. pod := test.Pod(test.PodOptions{ - NodeRequirements: []v1.NodeSelectorRequirement{ + NodeRequirements: []corev1.NodeSelectorRequirement{ { - Key: v1.LabelTopologyZone, - Operator: v1.NodeSelectorOpIn, + Key: corev1.LabelTopologyZone, + Operator: corev1.NodeSelectorOpIn, Values: lo.Map(subnetInfo[0:2], func(info environmentaws.SubnetInfo, _ int) string { return info.Zone }), }, { - Key: v1beta1.LabelTopologyZoneID, - Operator: v1.NodeSelectorOpIn, + Key: v1.LabelTopologyZoneID, + Operator: corev1.NodeSelectorOpIn, Values: lo.Map(subnetInfo[1:3], func(info environmentaws.SubnetInfo, _ int) string { return info.ZoneID }), }, }, }) env.ExpectCreated(nodePool, nodeClass, pod) node := env.EventuallyExpectInitializedNodeCount("==", 1)[0] - Expect(node.Labels[v1.LabelTopologyZone]).To(Equal(subnetInfo[1].Zone)) - Expect(node.Labels[v1beta1.LabelTopologyZoneID]).To(Equal(subnetInfo[1].ZoneID)) + Expect(node.Labels[corev1.LabelTopologyZone]).To(Equal(subnetInfo[1].Zone)) + Expect(node.Labels[v1.LabelTopologyZoneID]).To(Equal(subnetInfo[1].ZoneID)) }) It("should provision nodes for pods with zone-id requirements in the correct zone", func() { // Each pod specifies a requirement on this expected zone, where the value is the matching zone for the @@ -655,27 +655,27 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { // succeed even if Karpenter doesn't add the label and /or incorrectly generated offerings on k8s 1.30 and // above. This is an unlikely scenario, and adding this check is a defense in depth measure. const expectedZoneLabel = "expected-zone-label" - test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ - NodeSelectorRequirement: v1.NodeSelectorRequirement{ + test.ReplaceRequirements(nodePool, karpv1.NodeSelectorRequirementWithMinValues{ + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ Key: expectedZoneLabel, - Operator: v1.NodeSelectorOpExists, + Operator: corev1.NodeSelectorOpExists, }, }) subnetInfo := lo.UniqBy(env.GetSubnetInfo(map[string]string{"karpenter.sh/discovery": env.ClusterName}), func(s environmentaws.SubnetInfo) string { return s.Zone }) - pods := lo.Map(subnetInfo, func(info environmentaws.SubnetInfo, _ int) *v1.Pod { + pods := lo.Map(subnetInfo, func(info environmentaws.SubnetInfo, _ int) *corev1.Pod { return test.Pod(test.PodOptions{ - NodeRequirements: []v1.NodeSelectorRequirement{ + NodeRequirements: []corev1.NodeSelectorRequirement{ { Key: expectedZoneLabel, - Operator: v1.NodeSelectorOpIn, + Operator: corev1.NodeSelectorOpIn, Values: []string{info.Zone}, }, { - Key: v1beta1.LabelTopologyZoneID, - Operator: v1.NodeSelectorOpIn, + Key: v1.LabelTopologyZoneID, + Operator: corev1.NodeSelectorOpIn, Values: []string{info.ZoneID}, }, }, @@ -690,19 +690,19 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() { for _, node := range nodes { expectedZone, ok := node.Labels[expectedZoneLabel] Expect(ok).To(BeTrue()) - Expect(node.Labels[v1.LabelTopologyZone]).To(Equal(expectedZone)) + Expect(node.Labels[corev1.LabelTopologyZone]).To(Equal(expectedZone)) zoneInfo, ok := lo.Find(subnetInfo, func(info environmentaws.SubnetInfo) bool { return info.Zone == expectedZone }) Expect(ok).To(BeTrue()) - Expect(node.Labels[v1beta1.LabelTopologyZoneID]).To(Equal(zoneInfo.ZoneID)) + Expect(node.Labels[v1.LabelTopologyZoneID]).To(Equal(zoneInfo.ZoneID)) } }) }) }) -func ephemeralInitContainer(requirements v1.ResourceRequirements) v1.Container { - return v1.Container{ +func ephemeralInitContainer(requirements corev1.ResourceRequirements) corev1.Container { + return corev1.Container{ Image: environmentaws.EphemeralInitContainerImage, Command: []string{"/bin/sh"}, Args: []string{"-c", "sleep 5"}, diff --git a/test/suites/storage/suite_test.go b/test/suites/storage/suite_test.go index d67331bc92a3..ccdb15a2d96d 100644 --- a/test/suites/storage/suite_test.go +++ b/test/suites/storage/suite_test.go @@ -20,16 +20,16 @@ import ( "testing" awssdk "github.com/aws/aws-sdk-go/aws" - corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" + karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1" - "github.com/aws/karpenter-provider-aws/pkg/apis/v1beta1" + v1 "github.com/aws/karpenter-provider-aws/pkg/apis/v1" environmentaws "github.com/aws/karpenter-provider-aws/test/pkg/environment/aws" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,8 +43,8 @@ import ( ) var env *environmentaws.Environment -var nodeClass *v1beta1.EC2NodeClass -var nodePool *corev1beta1.NodePool +var nodeClass *v1.EC2NodeClass +var nodePool *karpv1.NodePool func TestStorage(t *testing.T) { RegisterFailHandler(Fail) @@ -176,8 +176,8 @@ var _ = Describe("Persistent Volumes", func() { subnets := env.GetSubnets(map[string]string{"karpenter.sh/discovery": env.ClusterName}) shuffledAZs := lo.Shuffle(lo.Keys(subnets)) - storageClass.AllowedTopologies = []v1.TopologySelectorTerm{{ - MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{{ + storageClass.AllowedTopologies = []corev1.TopologySelectorTerm{{ + MatchLabelExpressions: []corev1.TopologySelectorLabelRequirement{{ Key: "topology.ebs.csi.aws.com/zone", Values: []string{shuffledAZs[0]}, }}, @@ -201,12 +201,12 @@ var _ = Describe("Persistent Volumes", func() { }) count := 2 - pvcs := lo.Times(count, func(_ int) *v1.PersistentVolumeClaim { + pvcs := lo.Times(count, func(_ int) *corev1.PersistentVolumeClaim { return test.PersistentVolumeClaim(test.PersistentVolumeClaimOptions{ StorageClassName: &storageClass.Name, }) }) - pods := lo.Map(pvcs, func(pvc *v1.PersistentVolumeClaim, _ int) *v1.Pod { + pods := lo.Map(pvcs, func(pvc *corev1.PersistentVolumeClaim, _ int) *corev1.Pod { return test.Pod(test.PodOptions{ PersistentVolumeClaims: []string{pvc.Name}, }) @@ -238,12 +238,12 @@ var _ = Describe("Persistent Volumes", func() { var _ = Describe("Ephemeral Storage", func() { It("should run a pod with instance-store ephemeral storage that exceeds EBS root block device mappings", func() { - nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1beta1.InstanceStorePolicyRAID0) + nodeClass.Spec.InstanceStorePolicy = lo.ToPtr(v1.InstanceStorePolicyRAID0) pod := test.Pod(test.PodOptions{ - ResourceRequirements: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceEphemeralStorage: resource.MustParse("100Gi"), + ResourceRequirements: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceEphemeralStorage: resource.MustParse("100Gi"), }, }, })