From f7abd625189c70de01d76a4da3d0b7142af73783 Mon Sep 17 00:00:00 2001 From: edibble21 <85638465+edibble21@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:09:25 -0700 Subject: [PATCH] Implemented UnschedulablePodsCount metric (#1698) --- pkg/controllers/provisioning/provisioner.go | 3 ++ .../provisioning/scheduling/metrics.go | 13 ++++++- .../provisioning/scheduling/scheduler.go | 4 +- .../provisioning/scheduling/suite_test.go | 38 +++++++++++++++++++ 4 files changed, 56 insertions(+), 2 deletions(-) diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 344cb8397..79eb8da63 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -349,6 +349,9 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) { return scheduler.Results{}, fmt.Errorf("creating scheduler, %w", err) } results := s.Solve(ctx, pods).TruncateInstanceTypes(scheduler.MaxInstanceTypes) + scheduler.UnschedulablePodsCount.With( + prometheus.Labels{scheduler.ControllerLabel: injection.GetControllerName(ctx)}, + ).Set(float64(len(results.PodErrors))) if len(results.NewNodeClaims) > 0 { log.FromContext(ctx).WithValues("Pods", pretty.Slice(lo.Map(pods, func(p *corev1.Pod, _ int) string { return klog.KRef(p.Namespace, p.Name).String() }), 5), "duration", time.Since(start)).Info("found provisionable pod(s)") } diff --git a/pkg/controllers/provisioning/scheduling/metrics.go b/pkg/controllers/provisioning/scheduling/metrics.go index 1c3328761..9f6d9cfd1 100644 --- a/pkg/controllers/provisioning/scheduling/metrics.go +++ b/pkg/controllers/provisioning/scheduling/metrics.go @@ -24,7 +24,7 @@ import ( ) func init() { - crmetrics.Registry.MustRegister(SchedulingDurationSeconds, QueueDepth, IgnoredPodCount) + crmetrics.Registry.MustRegister(SchedulingDurationSeconds, QueueDepth, IgnoredPodCount, UnschedulablePodsCount) } const ( @@ -65,4 +65,15 @@ var ( Help: "Number of pods ignored during scheduling by Karpenter", }, ) + UnschedulablePodsCount = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metrics.Namespace, + Subsystem: schedulerSubsystem, + Name: "unschedulable_pods_count", + Help: "The number of unschedulable Pods.", + }, + []string{ + ControllerLabel, + }, + ) ) diff --git a/pkg/controllers/provisioning/scheduling/scheduler.go b/pkg/controllers/provisioning/scheduling/scheduler.go index dcc867190..65a65d607 100644 --- a/pkg/controllers/provisioning/scheduling/scheduler.go +++ b/pkg/controllers/provisioning/scheduling/scheduler.go @@ -207,7 +207,9 @@ func (s *Scheduler) Solve(ctx context.Context, pods []*corev1.Pod) Results { // had 5xA pods and 5xB pods were they have a zonal topology spread, but A can only go in one zone and B in another. // We need to schedule them alternating, A, B, A, B, .... and this solution also solves that as well. errors := map[*corev1.Pod]error{} - QueueDepth.DeletePartialMatch(prometheus.Labels{ControllerLabel: injection.GetControllerName(ctx)}) // Reset the metric for the controller, so we don't keep old ids around + // Reset the metric for the controller, so we don't keep old ids around + UnschedulablePodsCount.DeletePartialMatch(prometheus.Labels{ControllerLabel: injection.GetControllerName(ctx)}) + QueueDepth.DeletePartialMatch(prometheus.Labels{ControllerLabel: injection.GetControllerName(ctx)}) q := NewQueue(pods...) startTime := s.clock.Now() diff --git a/pkg/controllers/provisioning/scheduling/suite_test.go b/pkg/controllers/provisioning/scheduling/suite_test.go index d4a7a88ff..bcb65dc96 100644 --- a/pkg/controllers/provisioning/scheduling/suite_test.go +++ b/pkg/controllers/provisioning/scheduling/suite_test.go @@ -112,6 +112,7 @@ var _ = AfterEach(func() { cluster.Reset() scheduling.QueueDepth.Reset() scheduling.SchedulingDurationSeconds.Reset() + scheduling.UnschedulablePodsCount.Reset() }) var _ = Context("Scheduling", func() { @@ -3676,6 +3677,43 @@ var _ = Context("Scheduling", func() { s.Solve(injection.WithControllerName(ctx, "provisioner"), pods) wg.Wait() }) + It("should surface the UnschedulablePodsCount metric while executing the scheduling loop", func() { + nodePool := test.NodePool(v1.NodePool{ + Spec: v1.NodePoolSpec{ + Template: v1.NodeClaimTemplate{ + Spec: v1.NodeClaimTemplateSpec{ + Requirements: []v1.NodeSelectorRequirementWithMinValues{ + { + NodeSelectorRequirement: corev1.NodeSelectorRequirement{ + Key: corev1.LabelInstanceTypeStable, + Operator: corev1.NodeSelectorOpIn, + Values: []string{ + "default-instance-type", + }, + }, + }, + }, + }, + }, + }, + }) + ExpectApplied(ctx, env.Client, nodePool) + //Creates 15 pods, 5 schedulable and 10 unschedulable + podsUnschedulable := test.UnschedulablePods(test.PodOptions{NodeSelector: map[string]string{corev1.LabelInstanceTypeStable: "unknown"}}, 10) + podsSchedulable := test.UnschedulablePods(test.PodOptions{NodeSelector: map[string]string{corev1.LabelInstanceTypeStable: "default-instance-type"}}, 5) + pods := append(podsUnschedulable, podsSchedulable...) + ExpectApplied(ctx, env.Client, nodePool) + //Adds UID to pods for queue in solve. Solve pushes any unschedulable pod back onto the queue and + //then maps the current length of the queue to the pod using the UID + for _, i := range pods { + ExpectApplied(ctx, env.Client, i) + } + _, err := prov.Schedule(injection.WithControllerName(ctx, "provisioner")) + m, ok := FindMetricWithLabelValues("karpenter_scheduler_unschedulable_pods_count", map[string]string{"controller": "provisioner"}) + Expect(ok).To(BeTrue()) + Expect(lo.FromPtr(m.Gauge.Value)).To(BeNumerically("==", 10)) + Expect(err).To(BeNil()) + }) It("should surface the schedulingDuration metric after executing a scheduling loop", func() { nodePool := test.NodePool() ExpectApplied(ctx, env.Client, nodePool)