From 355af820c61d57432e355cb64db60884853319bc Mon Sep 17 00:00:00 2001 From: jigisha620 Date: Thu, 4 Jul 2024 15:36:04 -0700 Subject: [PATCH] Fix time out in scale tests --- test/suites/scale/deprovisioning_test.go | 45 ++++++++++++++++++++++++ test/suites/scale/provisioning_test.go | 5 +++ 2 files changed, 50 insertions(+) diff --git a/test/suites/scale/deprovisioning_test.go b/test/suites/scale/deprovisioning_test.go index c86f08d1c85f..65156543d9ef 100644 --- a/test/suites/scale/deprovisioning_test.go +++ b/test/suites/scale/deprovisioning_test.go @@ -297,6 +297,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("enabling deprovisioning across nodePools") for _, p := range nodePoolMap { + p.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } env.ExpectCreatedOrUpdated(p) } env.ExpectUpdated(driftNodeClass) @@ -378,6 +383,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool") nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } env.ExpectUpdated(nodePool) env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount) @@ -431,6 +441,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), env.MeasureDeprovisioningDurationFor(func() { By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool") nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } nodePool.Spec.Disruption.ConsolidateAfter = nil env.ExpectUpdated(nodePool) @@ -499,6 +514,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), // the requirements wide-open should cause deletes and increase our utilization on the cluster nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized nodePool.Spec.Disruption.ConsolidateAfter = nil + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } nodePool.Spec.Template.Spec.Requirements = lo.Reject(nodePool.Spec.Template.Spec.Requirements, func(r corev1beta1.NodeSelectorRequirementWithMinValues, _ int) bool { return r.Key == v1beta1.LabelInstanceSize }) @@ -559,6 +579,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), By("kicking off deprovisioning emptiness by setting the ttlSecondsAfterEmpty value on the nodePool") nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty nodePool.Spec.Disruption.ConsolidateAfter.Duration = lo.ToPtr(time.Duration(0)) + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } env.ExpectCreatedOrUpdated(nodePool) env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount) @@ -612,6 +637,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), nodePool.Spec.Limits = disableProvisioningLimits // Enable Expiration nodePool.Spec.Disruption.ExpireAfter.Duration = lo.ToPtr(time.Duration(0)) + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } noExpireNodePool := test.NodePool(*nodePool.DeepCopy()) @@ -624,6 +654,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } noExpireNodePool.Spec.Limits = nil + noExpireNodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } env.ExpectCreatedOrUpdated(nodePool, noExpireNodePool) env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount) @@ -650,6 +685,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } By("waiting for the deployment to deploy all of its pods") env.ExpectCreated(deployment) @@ -701,6 +741,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents), nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{ MaxPods: lo.ToPtr[int32](int32(maxPodDensity)), } + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } By("waiting for the deployment to deploy all of its pods") env.ExpectCreated(deployment) diff --git a/test/suites/scale/provisioning_test.go b/test/suites/scale/provisioning_test.go index 44fea2a8d659..cffd85133962 100644 --- a/test/suites/scale/provisioning_test.go +++ b/test/suites/scale/provisioning_test.go @@ -48,6 +48,11 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu nodeClass = env.DefaultEC2NodeClass() nodePool = env.DefaultNodePool(nodeClass) nodePool.Spec.Limits = nil + nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{ + { + Nodes: "70%", + }, + } test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{ NodeSelectorRequirement: v1.NodeSelectorRequirement{ Key: v1beta1.LabelInstanceHypervisor,