Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

perf: Make list calls more consistent #239

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pkg/controllers/machine/terminator/terminator.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ func (t *Terminator) Drain(ctx context.Context, node *v1.Node) error {

// getPods returns a list of evictable pods for the node
func (t *Terminator) getPods(ctx context.Context, node *v1.Node) ([]*v1.Pod, error) {
podList := &v1.PodList{}
if err := t.kubeClient.List(ctx, podList, client.MatchingFields{"spec.nodeName": node.Name}); err != nil {
podList := v1.PodList{}
if err := t.kubeClient.List(ctx, &podList, client.MatchingFields{"spec.nodeName": node.Name}); err != nil {
return nil, fmt.Errorf("listing pods on node, %w", err)
}
var pods []*v1.Pod
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/node/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,8 @@ func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontrol
// Reconcile all nodes related to a provisioner when it changes.
&source.Kind{Type: &v1alpha5.Provisioner{}},
handler.EnqueueRequestsFromMapFunc(func(o client.Object) (requests []reconcile.Request) {
nodes := &v1.NodeList{}
if err := c.kubeClient.List(ctx, nodes, client.MatchingLabels(map[string]string{v1alpha5.ProvisionerNameLabelKey: o.GetName()})); err != nil {
nodes := v1.NodeList{}
if err := c.kubeClient.List(ctx, &nodes, client.MatchingLabels(map[string]string{v1alpha5.ProvisionerNameLabelKey: o.GetName()})); err != nil {
logging.FromContext(ctx).Errorf("Failed to list nodes when mapping expiration watch events, %s", err)
return requests
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/node/emptiness.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisi
}

func (r *Emptiness) isEmpty(ctx context.Context, n *v1.Node) (bool, error) {
pods := &v1.PodList{}
if err := r.kubeClient.List(ctx, pods, client.MatchingFields{"spec.nodeName": n.Name}); err != nil {
pods := v1.PodList{}
if err := r.kubeClient.List(ctx, &pods, client.MatchingFields{"spec.nodeName": n.Name}); err != nil {
return false, fmt.Errorf("listing pods for node, %w", err)
}
for i := range pods.Items {
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/provisioning/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -327,8 +327,8 @@ func (p *Provisioner) Launch(ctx context.Context, m *scheduler.Machine, opts ...
}

func (p *Provisioner) getDaemonSetPods(ctx context.Context) ([]*v1.Pod, error) {
daemonSetList := &appsv1.DaemonSetList{}
if err := p.kubeClient.List(ctx, daemonSetList); err != nil {
daemonSetList := appsv1.DaemonSetList{}
if err := p.kubeClient.List(ctx, &daemonSetList); err != nil {
return nil, fmt.Errorf("listing daemonsets, %w", err)
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/provisioning/scheduling/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,13 +229,13 @@ func (t *Topology) updateInverseAntiAffinity(ctx context.Context, pod *v1.Pod, d
// countDomains initializes the topology group by registereding any well known domains and performing pod counts
// against the cluster for any existing pods.
func (t *Topology) countDomains(ctx context.Context, tg *TopologyGroup) error {
podList := &v1.PodList{}
podList := v1.PodList{}

// collect the pods from all the specified namespaces (don't see a way to query multiple namespaces
// simultaneously)
var pods []v1.Pod
for _, ns := range tg.namespaces.UnsortedList() {
if err := t.kubeClient.List(ctx, podList, TopologyListOptions(ns, tg.selector)); err != nil {
if err := t.kubeClient.List(ctx, &podList, TopologyListOptions(ns, tg.selector)); err != nil {
return fmt.Errorf("listing pods, %w", err)
}
pods = append(pods, podList.Items...)
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/state/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ func NewCluster(clk clock.Clock, client client.Client, cp cloudprovider.CloudPro
// of the cluster is as close to correct as it can be when we begin to perform operations
// utilizing the cluster state as our source of truth
func (c *Cluster) Synced(ctx context.Context) bool {
machineList := &v1alpha5.MachineList{}
if err := c.kubeClient.List(ctx, machineList); err != nil {
machineList := v1alpha5.MachineList{}
if err := c.kubeClient.List(ctx, &machineList); err != nil {
logging.FromContext(ctx).Errorf("checking cluster state sync, %v", err)
return false
}
Expand Down Expand Up @@ -310,8 +310,8 @@ func (c *Cluster) GetDaemonSetPod(daemonset *appsv1.DaemonSet) *v1.Pod {
}

func (c *Cluster) UpdateDaemonSet(ctx context.Context, daemonset *appsv1.DaemonSet) error {
pods := &v1.PodList{}
err := c.kubeClient.List(ctx, pods, client.InNamespace(daemonset.Namespace))
pods := v1.PodList{}
err := c.kubeClient.List(ctx, &pods, client.InNamespace(daemonset.Namespace))
if err != nil {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/termination/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ func (c *Controller) Finalize(ctx context.Context, node *v1.Node) (reconcile.Res
}

func (c *Controller) ensureMachinesRemoved(ctx context.Context, n *v1.Node) (allRemoved bool, err error) {
machineList := &v1alpha5.MachineList{}
if err = c.kubeClient.List(ctx, machineList, client.MatchingFields{"status.providerID": n.Spec.ProviderID}); err != nil {
machineList := v1alpha5.MachineList{}
if err = c.kubeClient.List(ctx, &machineList, client.MatchingFields{"status.providerID": n.Spec.ProviderID}); err != nil {
return false, err
}
if len(machineList.Items) == 0 {
Expand Down