From 3bfc525a464a7cde78d0d6acf072b01d6974710e Mon Sep 17 00:00:00 2001 From: stijndehaes Date: Mon, 13 Mar 2023 12:59:42 +0100 Subject: [PATCH] Make list calls more consistent This has the added benefit that the list object is created on the stack and not on the heap --- pkg/controllers/machine/terminator/terminator.go | 4 ++-- pkg/controllers/node/controller.go | 4 ++-- pkg/controllers/node/emptiness.go | 4 ++-- pkg/controllers/provisioning/provisioner.go | 4 ++-- pkg/controllers/provisioning/scheduling/topology.go | 4 ++-- pkg/controllers/state/cluster.go | 8 ++++---- pkg/controllers/termination/controller.go | 4 ++-- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pkg/controllers/machine/terminator/terminator.go b/pkg/controllers/machine/terminator/terminator.go index 61eecfda3a..c30757cc55 100644 --- a/pkg/controllers/machine/terminator/terminator.go +++ b/pkg/controllers/machine/terminator/terminator.go @@ -96,8 +96,8 @@ func (t *Terminator) Drain(ctx context.Context, node *v1.Node) error { // getPods returns a list of evictable pods for the node func (t *Terminator) getPods(ctx context.Context, node *v1.Node) ([]*v1.Pod, error) { - podList := &v1.PodList{} - if err := t.kubeClient.List(ctx, podList, client.MatchingFields{"spec.nodeName": node.Name}); err != nil { + podList := v1.PodList{} + if err := t.kubeClient.List(ctx, &podList, client.MatchingFields{"spec.nodeName": node.Name}); err != nil { return nil, fmt.Errorf("listing pods on node, %w", err) } var pods []*v1.Pod diff --git a/pkg/controllers/node/controller.go b/pkg/controllers/node/controller.go index 3304ed9ec5..a10b4c58ed 100644 --- a/pkg/controllers/node/controller.go +++ b/pkg/controllers/node/controller.go @@ -118,8 +118,8 @@ func (c *Controller) Builder(ctx context.Context, m manager.Manager) corecontrol // Reconcile all nodes related to a provisioner when it changes. &source.Kind{Type: &v1alpha5.Provisioner{}}, handler.EnqueueRequestsFromMapFunc(func(o client.Object) (requests []reconcile.Request) { - nodes := &v1.NodeList{} - if err := c.kubeClient.List(ctx, nodes, client.MatchingLabels(map[string]string{v1alpha5.ProvisionerNameLabelKey: o.GetName()})); err != nil { + nodes := v1.NodeList{} + if err := c.kubeClient.List(ctx, &nodes, client.MatchingLabels(map[string]string{v1alpha5.ProvisionerNameLabelKey: o.GetName()})); err != nil { logging.FromContext(ctx).Errorf("Failed to list nodes when mapping expiration watch events, %s", err) return requests } diff --git a/pkg/controllers/node/emptiness.go b/pkg/controllers/node/emptiness.go index eeb3656dae..2e1725ba5d 100644 --- a/pkg/controllers/node/emptiness.go +++ b/pkg/controllers/node/emptiness.go @@ -73,8 +73,8 @@ func (r *Emptiness) Reconcile(ctx context.Context, provisioner *v1alpha5.Provisi } func (r *Emptiness) isEmpty(ctx context.Context, n *v1.Node) (bool, error) { - pods := &v1.PodList{} - if err := r.kubeClient.List(ctx, pods, client.MatchingFields{"spec.nodeName": n.Name}); err != nil { + pods := v1.PodList{} + if err := r.kubeClient.List(ctx, &pods, client.MatchingFields{"spec.nodeName": n.Name}); err != nil { return false, fmt.Errorf("listing pods for node, %w", err) } for i := range pods.Items { diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 44340bced6..8c704c8b14 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -327,8 +327,8 @@ func (p *Provisioner) Launch(ctx context.Context, m *scheduler.Machine, opts ... } func (p *Provisioner) getDaemonSetPods(ctx context.Context) ([]*v1.Pod, error) { - daemonSetList := &appsv1.DaemonSetList{} - if err := p.kubeClient.List(ctx, daemonSetList); err != nil { + daemonSetList := appsv1.DaemonSetList{} + if err := p.kubeClient.List(ctx, &daemonSetList); err != nil { return nil, fmt.Errorf("listing daemonsets, %w", err) } diff --git a/pkg/controllers/provisioning/scheduling/topology.go b/pkg/controllers/provisioning/scheduling/topology.go index 3a4cbc11a3..91108e39fe 100644 --- a/pkg/controllers/provisioning/scheduling/topology.go +++ b/pkg/controllers/provisioning/scheduling/topology.go @@ -229,13 +229,13 @@ func (t *Topology) updateInverseAntiAffinity(ctx context.Context, pod *v1.Pod, d // countDomains initializes the topology group by registereding any well known domains and performing pod counts // against the cluster for any existing pods. func (t *Topology) countDomains(ctx context.Context, tg *TopologyGroup) error { - podList := &v1.PodList{} + podList := v1.PodList{} // collect the pods from all the specified namespaces (don't see a way to query multiple namespaces // simultaneously) var pods []v1.Pod for _, ns := range tg.namespaces.UnsortedList() { - if err := t.kubeClient.List(ctx, podList, TopologyListOptions(ns, tg.selector)); err != nil { + if err := t.kubeClient.List(ctx, &podList, TopologyListOptions(ns, tg.selector)); err != nil { return fmt.Errorf("listing pods, %w", err) } pods = append(pods, podList.Items...) diff --git a/pkg/controllers/state/cluster.go b/pkg/controllers/state/cluster.go index 0e27aac08e..856ed4ff34 100644 --- a/pkg/controllers/state/cluster.go +++ b/pkg/controllers/state/cluster.go @@ -80,8 +80,8 @@ func NewCluster(clk clock.Clock, client client.Client, cp cloudprovider.CloudPro // of the cluster is as close to correct as it can be when we begin to perform operations // utilizing the cluster state as our source of truth func (c *Cluster) Synced(ctx context.Context) bool { - machineList := &v1alpha5.MachineList{} - if err := c.kubeClient.List(ctx, machineList); err != nil { + machineList := v1alpha5.MachineList{} + if err := c.kubeClient.List(ctx, &machineList); err != nil { logging.FromContext(ctx).Errorf("checking cluster state sync, %v", err) return false } @@ -310,8 +310,8 @@ func (c *Cluster) GetDaemonSetPod(daemonset *appsv1.DaemonSet) *v1.Pod { } func (c *Cluster) UpdateDaemonSet(ctx context.Context, daemonset *appsv1.DaemonSet) error { - pods := &v1.PodList{} - err := c.kubeClient.List(ctx, pods, client.InNamespace(daemonset.Namespace)) + pods := v1.PodList{} + err := c.kubeClient.List(ctx, &pods, client.InNamespace(daemonset.Namespace)) if err != nil { return err } diff --git a/pkg/controllers/termination/controller.go b/pkg/controllers/termination/controller.go index c39021b1e4..91576c3f80 100644 --- a/pkg/controllers/termination/controller.go +++ b/pkg/controllers/termination/controller.go @@ -113,8 +113,8 @@ func (c *Controller) Finalize(ctx context.Context, node *v1.Node) (reconcile.Res } func (c *Controller) ensureMachinesRemoved(ctx context.Context, n *v1.Node) (allRemoved bool, err error) { - machineList := &v1alpha5.MachineList{} - if err = c.kubeClient.List(ctx, machineList, client.MatchingFields{"status.providerID": n.Spec.ProviderID}); err != nil { + machineList := v1alpha5.MachineList{} + if err = c.kubeClient.List(ctx, &machineList, client.MatchingFields{"status.providerID": n.Spec.ProviderID}); err != nil { return false, err } if len(machineList.Items) == 0 {