Skip to content

Commit

Permalink
chore: Drop custom workqueue metrics provider (#1607)
Browse files Browse the repository at this point in the history
  • Loading branch information
jigisha620 authored Aug 29, 2024
1 parent 9df2d5f commit ee2f7d5
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 126 deletions.
3 changes: 1 addition & 2 deletions pkg/controllers/disruption/orchestration/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ func NewQueue(kubeClient client.Client, recorder events.Recorder, cluster *state
RateLimitingInterface: workqueue.NewRateLimitingQueueWithConfig(
workqueue.NewItemExponentialFailureRateLimiter(queueBaseDelay, queueMaxDelay),
workqueue.RateLimitingQueueConfig{
Name: "disruption.workqueue",
MetricsProvider: metrics.WorkqueueMetricsProvider{},
Name: "disruption.workqueue",
}),
providerIDToCommand: map[string]*Command{},
kubeClient: kubeClient,
Expand Down
6 changes: 3 additions & 3 deletions pkg/controllers/node/termination/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -861,13 +861,13 @@ var _ = Describe("Termination", func() {
}})
ExpectApplied(ctx, env.Client, lo.Map(pods, func(p *corev1.Pod, _ int) client.Object { return p })...)

wqDepthBefore, _ := FindMetricWithLabelValues("karpenter_workqueue_depth", map[string]string{"name": "eviction.workqueue"})
wqDepthBefore, _ := FindMetricWithLabelValues("workqueue_adds_total", map[string]string{"name": "eviction.workqueue"})
Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectObjectReconciled(ctx, env.Client, terminationController, node)
wqDepthAfter, ok := FindMetricWithLabelValues("karpenter_workqueue_depth", map[string]string{"name": "eviction.workqueue"})
wqDepthAfter, ok := FindMetricWithLabelValues("workqueue_adds_total", map[string]string{"name": "eviction.workqueue"})
Expect(ok).To(BeTrue())
Expect(lo.FromPtr(wqDepthAfter.GetGauge().Value) - lo.FromPtr(wqDepthBefore.GetGauge().Value)).To(BeNumerically("==", 5))
Expect(lo.FromPtr(wqDepthAfter.GetCounter().Value) - lo.FromPtr(wqDepthBefore.GetCounter().Value)).To(BeNumerically("==", 5))
})
})
})
Expand Down
8 changes: 2 additions & 6 deletions pkg/controllers/node/termination/terminator/eviction.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ import (
"sync"
"time"

"sigs.k8s.io/karpenter/pkg/metrics"

"github.com/awslabs/operatorpkg/singleton"
"github.com/samber/lo"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -95,8 +93,7 @@ func NewQueue(kubeClient client.Client, recorder events.Recorder) *Queue {
RateLimitingInterface: workqueue.NewRateLimitingQueueWithConfig(
workqueue.NewItemExponentialFailureRateLimiter(evictionQueueBaseDelay, evictionQueueMaxDelay),
workqueue.RateLimitingQueueConfig{
Name: "eviction.workqueue",
MetricsProvider: metrics.WorkqueueMetricsProvider{},
Name: "eviction.workqueue",
}),
set: sets.New[QueueKey](),
kubeClient: kubeClient,
Expand Down Expand Up @@ -206,8 +203,7 @@ func (q *Queue) Reset() {
q.RateLimitingInterface = workqueue.NewRateLimitingQueueWithConfig(
workqueue.NewItemExponentialFailureRateLimiter(evictionQueueBaseDelay, evictionQueueMaxDelay),
workqueue.RateLimitingQueueConfig{
Name: "eviction.workqueue",
MetricsProvider: metrics.WorkqueueMetricsProvider{},
Name: "eviction.workqueue",
})
q.set = sets.New[QueueKey]()
}
2 changes: 1 addition & 1 deletion pkg/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,5 +93,5 @@ var (

func init() {
crmetrics.Registry.MustRegister(NodeClaimsCreatedTotal, NodeClaimsTerminatedTotal, NodeClaimsDisruptedTotal,
NodesCreatedTotal, NodesTerminatedTotal, workqueueDepth, workqueueAdds, workqueueLatency, workDuration, workqueueUnfinished, workqueueLongestRunningProcessor, workqueueRetries)
NodesCreatedTotal, NodesTerminatedTotal)
}
114 changes: 0 additions & 114 deletions pkg/metrics/workqueue_provider.go

This file was deleted.

0 comments on commit ee2f7d5

Please sign in to comment.