From 49d5e8a4a9b0910d2005dc0650abcc1a2111a135 Mon Sep 17 00:00:00 2001 From: Amanuel Engeda Date: Mon, 12 Aug 2024 10:16:06 -0700 Subject: [PATCH] Update nilable duration --- go.mod | 2 +- go.sum | 4 ++-- test/pkg/environment/common/environment.go | 4 ++-- test/suites/chaos/suite_test.go | 4 ++-- test/suites/consolidation/suite_test.go | 10 ++++----- test/suites/expiration/suite_test.go | 24 +++++++++++----------- test/suites/integration/emptiness_test.go | 6 +++--- test/suites/integration/validation_test.go | 5 ++--- test/suites/scale/deprovisioning_test.go | 13 ++++++------ 9 files changed, 35 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index 04bf0e16eb53..c4be63c3b4c8 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( k8s.io/utils v0.0.0-20240102154912-e7106e64919e knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/karpenter v0.36.3-0.20240812071216-7aa2a5c37de7 + sigs.k8s.io/karpenter v0.36.3-0.20240812192326-63bfb721100e sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index efe5352239d2..5843823f53b3 100644 --- a/go.sum +++ b/go.sum @@ -761,10 +761,10 @@ sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHv sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/karpenter v0.36.3-0.20240808011113-f3cd14f1ba19 h1:cvG3uvaF82AZV2g14GMjIhk2Z+Lp3TS/A4vIJ+b2IIw= -sigs.k8s.io/karpenter v0.36.3-0.20240808011113-f3cd14f1ba19/go.mod h1:vZfbuD5UQJfNdaNwiUVgCUJ/yYOVjYbKosr8b971CAM= sigs.k8s.io/karpenter v0.36.3-0.20240812071216-7aa2a5c37de7 h1:XuaHMCSRIH6NxuXEnJ5XHjU5QdXSS7WNhQz3M74pf/c= sigs.k8s.io/karpenter v0.36.3-0.20240812071216-7aa2a5c37de7/go.mod h1:vZfbuD5UQJfNdaNwiUVgCUJ/yYOVjYbKosr8b971CAM= +sigs.k8s.io/karpenter v0.36.3-0.20240812192326-63bfb721100e h1:rRUqXMzFCmhyTkJ0LINSOyGK7abS3RHfTp5ZgFbFWN8= +sigs.k8s.io/karpenter v0.36.3-0.20240812192326-63bfb721100e/go.mod h1:vZfbuD5UQJfNdaNwiUVgCUJ/yYOVjYbKosr8b971CAM= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/test/pkg/environment/common/environment.go b/test/pkg/environment/common/environment.go index 908a734904be..e032d0e599f6 100644 --- a/test/pkg/environment/common/environment.go +++ b/test/pkg/environment/common/environment.go @@ -180,8 +180,8 @@ func (env *Environment) DefaultNodePool(nodeClass *v1beta1.EC2NodeClass) *corev1 }, }, } - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} - nodePool.Spec.Disruption.ExpireAfter.Duration = nil + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")) + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") nodePool.Spec.Limits = corev1beta1.Limits(v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1000"), v1.ResourceMemory: resource.MustParse("1000Gi"), diff --git a/test/suites/chaos/suite_test.go b/test/suites/chaos/suite_test.go index 9011876d9a36..038a9458f283 100644 --- a/test/suites/chaos/suite_test.go +++ b/test/suites/chaos/suite_test.go @@ -82,7 +82,7 @@ var _ = Describe("Chaos", func() { }, }) nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized - nodePool.Spec.Disruption.ConsolidateAfter = nil + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")) numPods := 1 dep := coretest.Deployment(coretest.DeploymentOptions{ @@ -113,7 +113,7 @@ var _ = Describe("Chaos", func() { defer cancel() nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(30 * time.Second)} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("30s")) numPods := 1 dep := coretest.Deployment(coretest.DeploymentOptions{ Replicas: int32(numPods), diff --git a/test/suites/consolidation/suite_test.go b/test/suites/consolidation/suite_test.go index 3ed5408c5aa0..ec25e3bdd196 100644 --- a/test/suites/consolidation/suite_test.go +++ b/test/suites/consolidation/suite_test.go @@ -151,7 +151,7 @@ var _ = Describe("Consolidation", func() { }) It("should respect budgets for non-empty delete consolidation", func() { // This test will hold consolidation until we are ready to execute it - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")) nodePool = test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ @@ -220,7 +220,7 @@ var _ = Describe("Consolidation", func() { It("should respect budgets for non-empty replace consolidation", func() { appLabels := map[string]string{"app": "large-app"} // This test will hold consolidation until we are ready to execute it - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")) nodePool = test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ @@ -403,7 +403,7 @@ var _ = Describe("Consolidation", func() { Disruption: corev1beta1.Disruption{ ConsolidationPolicy: corev1beta1.ConsolidationPolicyWhenUnderutilized, // Disable Consolidation until we're ready - ConsolidateAfter: &corev1beta1.NillableDuration{}, + ConsolidateAfter: lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")), }, Template: corev1beta1.NodeClaimTemplate{ Spec: corev1beta1.NodeClaimSpec{ @@ -482,7 +482,7 @@ var _ = Describe("Consolidation", func() { Disruption: corev1beta1.Disruption{ ConsolidationPolicy: corev1beta1.ConsolidationPolicyWhenUnderutilized, // Disable Consolidation until we're ready - ConsolidateAfter: &corev1beta1.NillableDuration{}, + ConsolidateAfter: lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")), }, Template: corev1beta1.NodeClaimTemplate{ Spec: corev1beta1.NodeClaimSpec{ @@ -618,7 +618,7 @@ var _ = Describe("Consolidation", func() { Disruption: corev1beta1.Disruption{ ConsolidationPolicy: corev1beta1.ConsolidationPolicyWhenUnderutilized, // Disable Consolidation until we're ready - ConsolidateAfter: &corev1beta1.NillableDuration{}, + ConsolidateAfter: lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")), }, Template: corev1beta1.NodeClaimTemplate{ Spec: corev1beta1.NodeClaimSpec{ diff --git a/test/suites/expiration/suite_test.go b/test/suites/expiration/suite_test.go index 168381716de6..a8a8e5c36e82 100644 --- a/test/suites/expiration/suite_test.go +++ b/test/suites/expiration/suite_test.go @@ -63,7 +63,7 @@ var _ = BeforeEach(func() { env.BeforeEach() nodeClass = env.DefaultEC2NodeClass() nodePool = env.DefaultNodePool(nodeClass) - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Second * 30)} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("30s") }) var _ = AfterEach(func() { env.Cleanup() }) @@ -108,7 +108,7 @@ var _ = Describe("Expiration", func() { nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ Nodes: "100%", }} - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") // Create a deployment with one pod to create one node. dep = coretest.Deployment(coretest.DeploymentOptions{ @@ -157,7 +157,7 @@ var _ = Describe("Expiration", func() { env.ExpectUpdated(node) By("enabling expiration") - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Second * 30)} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("30s") env.ExpectUpdated(nodePool) // Expect that both of the nodes are expired, but not being disrupted @@ -191,7 +191,7 @@ var _ = Describe("Expiration", func() { nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{{ Nodes: "50%", }} - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") numPods = 6 dep = coretest.Deployment(coretest.DeploymentOptions{ @@ -232,7 +232,7 @@ var _ = Describe("Expiration", func() { env.ExpectDeleted(dep) By("enabling expiration") - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Second * 30)} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("30s") env.ExpectUpdated(nodePool) env.EventuallyExpectExpired(nodeClaims...) @@ -272,7 +272,7 @@ var _ = Describe("Expiration", func() { Nodes: "50%", }} // disable expiration so that we can enable it later when we want. - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") numPods = 9 dep = coretest.Deployment(coretest.DeploymentOptions{ Replicas: int32(numPods), @@ -318,7 +318,7 @@ var _ = Describe("Expiration", func() { By("expiring the nodes") // expire the nodeclaims - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Second * 30)} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("30s") env.ExpectUpdated(nodePool) env.EventuallyExpectExpired(nodeClaims...) @@ -408,7 +408,7 @@ var _ = Describe("Expiration", func() { } By("enabling expiration") - nodePool.Spec.Disruption.ExpireAfter = corev1beta1.NillableDuration{Duration: lo.ToPtr(30 * time.Second)} + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("30s") env.ExpectUpdated(nodePool) // Ensure that we get two nodes tainted, and they have overlap during the expiration @@ -420,7 +420,7 @@ var _ = Describe("Expiration", func() { // Set the expireAfter to "Never" to make sure new node isn't deleted // This is CRITICAL since it prevents nodes that are immediately spun up from immediately being expired and // racing at the end of the E2E test, leaking node resources into subsequent tests - nodePool.Spec.Disruption.ExpireAfter.Duration = nil + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") env.ExpectUpdated(nodePool) for _, node := range nodes { @@ -497,7 +497,7 @@ var _ = Describe("Expiration", func() { // Set the expireAfter to "Never" to make sure new node isn't deleted // This is CRITICAL since it prevents nodes that are immediately spun up from immediately being expired and // racing at the end of the E2E test, leaking node resources into subsequent tests - nodePool.Spec.Disruption.ExpireAfter.Duration = nil + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") env.ExpectUpdated(nodePool) // After the deletion timestamp is set and all pods are drained @@ -538,7 +538,7 @@ var _ = Describe("Expiration", func() { env.Monitor.Reset() // Reset the monitor so that we can expect a single node to be spun up after expiration // Set the expireAfter value to get the node deleted - nodePool.Spec.Disruption.ExpireAfter.Duration = lo.ToPtr(time.Minute) + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("1m") env.ExpectUpdated(nodePool) env.EventuallyExpectExpired(nodeClaim) @@ -561,7 +561,7 @@ var _ = Describe("Expiration", func() { // Set the expireAfter to "Never" to make sure new node isn't deleted // This is CRITICAL since it prevents nodes that are immediately spun up from immediately being expired and // racing at the end of the E2E test, leaking node resources into subsequent tests - nodePool.Spec.Disruption.ExpireAfter.Duration = nil + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("1m") env.ExpectUpdated(nodePool) // After the deletion timestamp is set and all pods are drained diff --git a/test/suites/integration/emptiness_test.go b/test/suites/integration/emptiness_test.go index a4aca9792b0d..599468aa429c 100644 --- a/test/suites/integration/emptiness_test.go +++ b/test/suites/integration/emptiness_test.go @@ -39,7 +39,7 @@ var _ = Describe("Emptiness", func() { var numPods int BeforeEach(func() { nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Duration(0))} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("0s")) numPods = 1 dep = test.Deployment(test.DeploymentOptions{ @@ -97,7 +97,7 @@ var _ = Describe("Emptiness", func() { }) }) It("should terminate an empty node", func() { - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Hour * 300)} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("300h")) const numPods = 1 deployment := test.Deployment(test.DeploymentOptions{Replicas: numPods}) @@ -116,7 +116,7 @@ var _ = Describe("Emptiness", func() { env.EventuallyExpectEmpty(nodeClaim) By("waiting for the nodeclaim to deprovision when past its ConsolidateAfter timeout of 0") - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Duration(0))} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("0s")) env.ExpectUpdated(nodePool) env.EventuallyExpectNotFound(nodeClaim, node) diff --git a/test/suites/integration/validation_test.go b/test/suites/integration/validation_test.go index 790a880ce29b..b20abc660b50 100644 --- a/test/suites/integration/validation_test.go +++ b/test/suites/integration/validation_test.go @@ -16,7 +16,6 @@ package integration_test import ( "fmt" - "time" "github.com/samber/lo" v1 "k8s.io/api/core/v1" @@ -104,12 +103,12 @@ var _ = Describe("Validation", func() { }) It("should error when ttlSecondAfterEmpty is negative", func() { nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(-time.Second)} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("-1s")) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error when ConsolidationPolicy=WhenUnderutilized is used with consolidateAfter", func() { nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized - nodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{Duration: lo.ToPtr(time.Minute)} + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("1m")) Expect(env.Client.Create(env.Context, nodePool)).ToNot(Succeed()) }) It("should error if imageGCHighThresholdPercent is less than imageGCLowThresholdPercent", func() { diff --git a/test/suites/scale/deprovisioning_test.go b/test/suites/scale/deprovisioning_test.go index cb7086bbee08..23ed7a3bd271 100644 --- a/test/suites/scale/deprovisioning_test.go +++ b/test/suites/scale/deprovisioning_test.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" - "knative.dev/pkg/ptr" corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/test" @@ -236,8 +235,8 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), // Enable consolidation, emptiness, and expiration nodePoolMap[consolidationValue].Spec.Disruption.ConsolidateAfter = nil nodePoolMap[emptinessValue].Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePoolMap[emptinessValue].Spec.Disruption.ConsolidateAfter.Duration = ptr.Duration(0) - nodePoolMap[expirationValue].Spec.Disruption.ExpireAfter.Duration = ptr.Duration(0) + nodePoolMap[emptinessValue].Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("0s")) + nodePoolMap[expirationValue].Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("0s") nodePoolMap[expirationValue].Spec.Limits = disableProvisioningLimits // Update the drift NodeClass to start drift on Nodes assigned to this NodeClass driftNodeClass.Spec.AMIFamily = &v1beta1.AMIFamilyBottlerocket @@ -545,7 +544,7 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("kicking off deprovisioning emptiness by setting the ttlSecondsAfterEmpty value on the nodePool") nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty - nodePool.Spec.Disruption.ConsolidateAfter.Duration = ptr.Duration(0) + nodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("0s")) env.ExpectCreatedOrUpdated(nodePool) env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount) @@ -598,13 +597,13 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), // Change limits so that replacement nodes will use another nodePool. nodePool.Spec.Limits = disableProvisioningLimits // Enable Expiration - nodePool.Spec.Disruption.ExpireAfter.Duration = ptr.Duration(0) + nodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("0s") noExpireNodePool := test.NodePool(*nodePool.DeepCopy()) // Disable Expiration - noExpireNodePool.Spec.Disruption.ConsolidateAfter = &corev1beta1.NillableDuration{} - noExpireNodePool.Spec.Disruption.ExpireAfter.Duration = nil + noExpireNodePool.Spec.Disruption.ConsolidateAfter = lo.ToPtr(corev1beta1.MustParseNillableDuration("Never")) + noExpireNodePool.Spec.Disruption.ExpireAfter = corev1beta1.MustParseNillableDuration("Never") noExpireNodePool.ObjectMeta = metav1.ObjectMeta{Name: test.RandomName()} noExpireNodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{