Skip to content

Commit

Permalink
Fix time out in scale tests
Browse files Browse the repository at this point in the history
  • Loading branch information
jigisha620 committed Jul 9, 2024
1 parent 6a01acc commit 355af82
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 0 deletions.
45 changes: 45 additions & 0 deletions test/suites/scale/deprovisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
env.MeasureDeprovisioningDurationFor(func() {
By("enabling deprovisioning across nodePools")
for _, p := range nodePoolMap {
p.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
env.ExpectCreatedOrUpdated(p)
}
env.ExpectUpdated(driftNodeClass)
Expand Down Expand Up @@ -378,6 +383,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool")
nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized
nodePool.Spec.Disruption.ConsolidateAfter = nil
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
env.ExpectUpdated(nodePool)

env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount)
Expand Down Expand Up @@ -431,6 +441,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
env.MeasureDeprovisioningDurationFor(func() {
By("kicking off deprovisioning by setting the consolidation enabled value on the nodePool")
nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
nodePool.Spec.Disruption.ConsolidateAfter = nil
env.ExpectUpdated(nodePool)

Expand Down Expand Up @@ -499,6 +514,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
// the requirements wide-open should cause deletes and increase our utilization on the cluster
nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenUnderutilized
nodePool.Spec.Disruption.ConsolidateAfter = nil
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
nodePool.Spec.Template.Spec.Requirements = lo.Reject(nodePool.Spec.Template.Spec.Requirements, func(r corev1beta1.NodeSelectorRequirementWithMinValues, _ int) bool {
return r.Key == v1beta1.LabelInstanceSize
})
Expand Down Expand Up @@ -559,6 +579,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
By("kicking off deprovisioning emptiness by setting the ttlSecondsAfterEmpty value on the nodePool")
nodePool.Spec.Disruption.ConsolidationPolicy = corev1beta1.ConsolidationPolicyWhenEmpty
nodePool.Spec.Disruption.ConsolidateAfter.Duration = lo.ToPtr(time.Duration(0))
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
env.ExpectCreatedOrUpdated(nodePool)

env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount)
Expand Down Expand Up @@ -612,6 +637,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
nodePool.Spec.Limits = disableProvisioningLimits
// Enable Expiration
nodePool.Spec.Disruption.ExpireAfter.Duration = lo.ToPtr(time.Duration(0))
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}

noExpireNodePool := test.NodePool(*nodePool.DeepCopy())

Expand All @@ -624,6 +654,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
MaxPods: lo.ToPtr[int32](int32(maxPodDensity)),
}
noExpireNodePool.Spec.Limits = nil
noExpireNodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
env.ExpectCreatedOrUpdated(nodePool, noExpireNodePool)

env.EventuallyExpectDeletedNodeCount("==", expectedNodeCount)
Expand All @@ -650,6 +685,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{
MaxPods: lo.ToPtr[int32](int32(maxPodDensity)),
}
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}

By("waiting for the deployment to deploy all of its pods")
env.ExpectCreated(deployment)
Expand Down Expand Up @@ -701,6 +741,11 @@ var _ = Describe("Deprovisioning", Label(debug.NoWatch), Label(debug.NoEvents),
nodePool.Spec.Template.Spec.Kubelet = &corev1beta1.KubeletConfiguration{
MaxPods: lo.ToPtr[int32](int32(maxPodDensity)),
}
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}

By("waiting for the deployment to deploy all of its pods")
env.ExpectCreated(deployment)
Expand Down
5 changes: 5 additions & 0 deletions test/suites/scale/provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@ var _ = Describe("Provisioning", Label(debug.NoWatch), Label(debug.NoEvents), fu
nodeClass = env.DefaultEC2NodeClass()
nodePool = env.DefaultNodePool(nodeClass)
nodePool.Spec.Limits = nil
nodePool.Spec.Disruption.Budgets = []corev1beta1.Budget{
{
Nodes: "70%",
},
}
test.ReplaceRequirements(nodePool, corev1beta1.NodeSelectorRequirementWithMinValues{
NodeSelectorRequirement: v1.NodeSelectorRequirement{
Key: v1beta1.LabelInstanceHypervisor,
Expand Down

0 comments on commit 355af82

Please sign in to comment.