Skip to content

Commit

Permalink
PR comments
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-innis committed Jan 26, 2024
1 parent c192b0e commit 2258029
Show file tree
Hide file tree
Showing 6 changed files with 225 additions and 166 deletions.
322 changes: 186 additions & 136 deletions pkg/apis/crds/karpenter.k8s.aws_ec2nodeclasses.yaml

Large diffs are not rendered by default.

21 changes: 14 additions & 7 deletions test/pkg/environment/common/expectations.go
Original file line number Diff line number Diff line change
Expand Up @@ -491,22 +491,22 @@ func NodeNames(nodes []*v1.Node) []string {
})
}

func (env *Environment) ConsistentlyExpectNodeCount(comparator string, count int, duration string) []*v1.Node {
func (env *Environment) ConsistentlyExpectNodeCount(comparator string, count int, duration time.Duration) []*v1.Node {
GinkgoHelper()
By(fmt.Sprintf("expecting nodes to be %s to %d for %s", comparator, count, duration))
nodeList := &v1.NodeList{}
Consistently(func(g Gomega) {
g.Expect(env.Client.List(env, nodeList, client.HasLabels{test.DiscoveryLabel})).To(Succeed())
g.Expect(len(nodeList.Items)).To(BeNumerically(comparator, count),
fmt.Sprintf("expected %d nodes, had %d (%v) for %s", count, len(nodeList.Items), NodeNames(lo.ToSlicePtr(nodeList.Items)), duration))
}, duration).Should(Succeed())
}, duration.String()).Should(Succeed())
return lo.ToSlicePtr(nodeList.Items)
}

// ConsistentlyExpectNoDisruptions ensures that the state of the cluster is not changed within a passed duration
// Specifically, we check if the cluster size in terms of nodes is the same as the passed-in size and we validate
// that no disrupting taints are added throughout the window
func (env *Environment) ConsistentlyExpectNoDisruptions(nodeCount int, duration string) {
func (env *Environment) ConsistentlyExpectNoDisruptions(nodeCount int, duration time.Duration) {
GinkgoHelper()
Consistently(func(g Gomega) {
// Ensure we don't change our NodeClaims
Expand All @@ -524,10 +524,10 @@ func (env *Environment) ConsistentlyExpectNoDisruptions(nodeCount int, duration
})
g.Expect(ok).To(BeFalse())
}
}, duration).Should(Succeed())
}, duration.String()).Should(Succeed())
}

func (env *Environment) ConsistentlyExpectTaintedNodeCount(comparator string, count int, duration string) []*v1.Node {
func (env *Environment) ConsistentlyExpectTaintedNodeCount(comparator string, count int, duration time.Duration) []*v1.Node {
GinkgoHelper()

By(fmt.Sprintf("checking for tainted nodes to be %s to %d for %s", comparator, count, duration))
Expand All @@ -536,7 +536,7 @@ func (env *Environment) ConsistentlyExpectTaintedNodeCount(comparator string, co
g.Expect(env.Client.List(env, nodeList, client.MatchingFields{"spec.taints[*].karpenter.sh/disruption": "disrupting"})).To(Succeed())
g.Expect(len(nodeList.Items)).To(BeNumerically(comparator, count),
fmt.Sprintf("expected %d tainted nodes, had %d (%v)", count, len(nodeList.Items), NodeNames(lo.ToSlicePtr(nodeList.Items))))
}, duration).Should(Succeed())
}, duration.String()).Should(Succeed())
return lo.ToSlicePtr(nodeList.Items)
}

Expand Down Expand Up @@ -798,6 +798,7 @@ func (env *Environment) ForcePodsToSpread(nodes ...*v1.Node) {
maxPodsPerNode := int(math.Ceil(float64(podCount) / float64(len(nodes))))

By(fmt.Sprintf("forcing %d pods to spread across %d nodes", podCount, len(nodes)))
start := time.Now()
for {
var nodePods []*v1.Pod
node, found := lo.Find(nodes, func(n *v1.Node) bool {
Expand All @@ -818,6 +819,13 @@ func (env *Environment) ForcePodsToSpread(nodes ...*v1.Node) {
Eventually(func(g Gomega) {
g.Expect(len(env.ExpectActivePodsForNode(node.Name))).To(Or(Equal(maxPodsPerNode), Equal(maxPodsPerNode-1)))
}).WithTimeout(5 * time.Second).Should(Succeed())

// TODO: Consider moving this time check to an Eventually poll. This gets a little tricker with helper functions
// since you need to make sure that your Expectation helper functions are scoped to to your "g Gomega" scope
// so that you don't fail the first time you get a failure on your expectation
if time.Since(start) > time.Minute*15 {
Fail("forcing pods to spread failed due to a timeout")
}
}
for _, n := range nodes {
stored := n.DeepCopy()
Expand All @@ -831,7 +839,6 @@ func (env *Environment) ExpectActivePodsForNode(nodeName string) []*v1.Pod {
podList := &v1.PodList{}
Expect(env.Client.List(env, podList, client.MatchingFields{"spec.nodeName": nodeName}, client.HasLabels{test.DiscoveryLabel})).To(Succeed())

// Return the healthy pods
return lo.Filter(lo.ToSlicePtr(podList.Items), func(p *v1.Pod, _ int) bool {
return p.DeletionTimestamp.IsZero()
})
Expand Down
16 changes: 9 additions & 7 deletions test/suites/consolidation/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ var _ = Describe("Consolidation", func() {
env.ExpectUpdated(dep)

// Ensure that we get two nodes tainted, and they have overlap during the drift
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.ConsistentlyExpectTaintedNodeCount("==", 2, "5s")
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

// Remove the finalizer from each node so that we can terminate
for _, node := range nodes {
Expand All @@ -138,7 +138,9 @@ var _ = Describe("Consolidation", func() {
env.EventuallyExpectNotFound(nodes[0], nodes[1])

// This check ensures that we are consolidating nodes at the same time
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

for _, node := range nodes {
Expect(env.ExpectTestingFinalizerRemoved(node)).To(Succeed())
}
Expand Down Expand Up @@ -205,8 +207,8 @@ var _ = Describe("Consolidation", func() {
env.ExpectUpdated(nodePool)

// Ensure that we get two nodes tainted, and they have overlap during the drift
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.ConsistentlyExpectTaintedNodeCount("==", 2, "5s")
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

for _, node := range nodes {
Expect(env.ExpectTestingFinalizerRemoved(node)).To(Succeed())
Expand Down Expand Up @@ -242,7 +244,7 @@ var _ = Describe("Consolidation", func() {
// Update the deployment to only contain 1 replica.
env.ExpectUpdated(dep)

env.ConsistentlyExpectNoDisruptions(5, "1m")
env.ConsistentlyExpectNoDisruptions(5, time.Minute)
})
It("should not allow consolidation if the budget is fully blocking during a scheduled time", func() {
// We're going to define a budget that doesn't allow any drift to happen
Expand Down Expand Up @@ -278,7 +280,7 @@ var _ = Describe("Consolidation", func() {
// Update the deployment to only contain 1 replica.
env.ExpectUpdated(dep)

env.ConsistentlyExpectNoDisruptions(5, "1m")
env.ConsistentlyExpectNoDisruptions(5, time.Minute)
})
})
DescribeTable("should consolidate nodes (delete)", Label(debug.NoWatch), Label(debug.NoEvents),
Expand Down
14 changes: 7 additions & 7 deletions test/suites/drift/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,8 +158,8 @@ var _ = Describe("Drift", func() {
env.EventuallyExpectDrifted(nodeClaims...)

// Ensure that we get two nodes tainted, and they have overlap during the drift
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.ConsistentlyExpectTaintedNodeCount("==", 2, "5s")
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

// Remove the finalizer from each node so that we can terminate
for _, node := range nodes {
Expand Down Expand Up @@ -250,8 +250,8 @@ var _ = Describe("Drift", func() {
env.ExpectUpdated(nodes[0])

// Ensure that we get two nodes tainted, and they have overlap during the drift
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.ConsistentlyExpectTaintedNodeCount("==", 2, "5s")
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

By("removing the finalizer from the nodes")
Expect(env.ExpectTestingFinalizerRemoved(nodes[0])).To(Succeed())
Expand Down Expand Up @@ -280,7 +280,7 @@ var _ = Describe("Drift", func() {
env.ExpectUpdated(nodePool)

env.EventuallyExpectDrifted(nodeClaim)
env.ConsistentlyExpectNoDisruptions(1, "1m")
env.ConsistentlyExpectNoDisruptions(1, time.Minute)
})
It("should not allow drift if the budget is fully blocking during a scheduled time", func() {
// We're going to define a budget that doesn't allow any drift to happen
Expand All @@ -307,7 +307,7 @@ var _ = Describe("Drift", func() {
env.ExpectUpdated(nodePool)

env.EventuallyExpectDrifted(nodeClaim)
env.ConsistentlyExpectNoDisruptions(1, "1m")
env.ConsistentlyExpectNoDisruptions(1, time.Minute)
})
})
It("should disrupt nodes that have drifted due to AMIs", func() {
Expand Down Expand Up @@ -782,7 +782,7 @@ var _ = Describe("Drift", func() {
env.ExpectUpdated(nodePool)

env.EventuallyExpectDrifted(nodeClaims...)
env.ConsistentlyExpectNoDisruptions(int(numPods), "1m")
env.ConsistentlyExpectNoDisruptions(int(numPods), time.Minute)
})
})
})
14 changes: 7 additions & 7 deletions test/suites/expiration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ var _ = Describe("Expiration", func() {
env.EventuallyExpectExpired(nodeClaims...)

// Expect that two nodes are tainted.
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.ConsistentlyExpectTaintedNodeCount("==", 2, "5s")
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

// Remove finalizers
for _, node := range nodes {
Expand Down Expand Up @@ -248,8 +248,8 @@ var _ = Describe("Expiration", func() {
env.ExpectUpdated(nodes[0])

// Ensure that we get two nodes tainted, and they have overlap during the expiration
nodes = env.EventuallyExpectTaintedNodeCount("==", 2)
env.ConsistentlyExpectTaintedNodeCount("==", 2, "5s")
env.EventuallyExpectTaintedNodeCount("==", 2)
nodes = env.ConsistentlyExpectTaintedNodeCount("==", 2, time.Second*5)

By("removing the finalizer from the nodes")
Expect(env.ExpectTestingFinalizerRemoved(nodes[0])).To(Succeed())
Expand All @@ -273,7 +273,7 @@ var _ = Describe("Expiration", func() {
env.EventuallyExpectHealthyPodCount(selector, numPods)

env.EventuallyExpectExpired(nodeClaim)
env.ConsistentlyExpectNoDisruptions(1, "1m")
env.ConsistentlyExpectNoDisruptions(1, time.Minute)
})
It("should not allow expiration if the budget is fully blocking during a scheduled time", func() {
// We're going to define a budget that doesn't allow any expirations to happen
Expand All @@ -295,7 +295,7 @@ var _ = Describe("Expiration", func() {
env.EventuallyExpectHealthyPodCount(selector, numPods)

env.EventuallyExpectExpired(nodeClaim)
env.ConsistentlyExpectNoDisruptions(1, "1m")
env.ConsistentlyExpectNoDisruptions(1, time.Minute)
})
})
It("should expire the node after the expiration is reached", func() {
Expand Down Expand Up @@ -560,7 +560,7 @@ var _ = Describe("Expiration", func() {
env.EventuallyExpectBoundPodCount(selector, int(numPods))

env.EventuallyExpectExpired(nodeClaims...)
env.ConsistentlyExpectNoDisruptions(int(numPods), "1m")
env.ConsistentlyExpectNoDisruptions(int(numPods), time.Minute)
})
})
})
4 changes: 2 additions & 2 deletions test/suites/integration/emptiness_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ var _ = Describe("Emptiness", func() {
env.ExpectDeleted(dep)

env.EventuallyExpectEmpty(nodeClaim)
env.ConsistentlyExpectNoDisruptions(1, "1m")
env.ConsistentlyExpectNoDisruptions(1, time.Minute)
})
It("should not allow emptiness if the budget is fully blocking during a scheduled time", func() {
// We're going to define a budget that doesn't allow any emptiness disruption to happen
Expand All @@ -93,7 +93,7 @@ var _ = Describe("Emptiness", func() {
env.ExpectDeleted(dep)

env.EventuallyExpectEmpty(nodeClaim)
env.ConsistentlyExpectNoDisruptions(1, "1m")
env.ConsistentlyExpectNoDisruptions(1, time.Minute)
})
})
It("should terminate an empty node", func() {
Expand Down

0 comments on commit 2258029

Please sign in to comment.