From 2d339d65dca18fc89cbca5edd7d671ba606efc46 Mon Sep 17 00:00:00 2001 From: hjiajing Date: Wed, 3 Jan 2024 17:14:59 +0800 Subject: [PATCH] Upgrade k8s libraries to v0.29.2 1. Upgrade k8s libraries to v0.29.2 2. Upgrade controller-runtime to v0.16.3 Signed-off-by: hjiajing --- build/images/codegen/Dockerfile | 4 +- build/images/codegen/README.md | 1 + cmd/antrea-agent-simulator/simulator.go | 4 +- cmd/antrea-agent/agent.go | 4 +- cmd/antrea-agent/options.go | 8 +- cmd/antrea-agent/options_test.go | 12 +- go.mod | 101 +++--- go.sum | 321 +++++++----------- hack/update-codegen.sh | 2 +- .../v1alpha1/multiclusterconfig_types.go | 37 +- .../v1alpha1/zz_generated.deepcopy.go | 1 - .../antrea-multicluster-leader-global.yml | 113 +++--- .../antrea-multicluster-leader-namespaced.yml | 4 +- .../yamls/antrea-multicluster-leader.yml | 117 ++++--- .../yamls/antrea-multicluster-member.yml | 4 +- .../clusterset_webhook_test.go | 6 +- .../cmd/multicluster-controller/controller.go | 48 ++- .../gateway_webhook_test.go | 6 +- .../cmd/multicluster-controller/leader.go | 2 +- .../multicluster-controller/leader_test.go | 10 +- .../memberclusterannounce_webhook_test.go | 6 +- .../cmd/multicluster-controller/options.go | 46 ++- .../antrea-mc-config-with-empty-podcidrs.yml | 2 - ...-mc-config-with-invalid-endpointiptype.yml | 2 - ...antrea-mc-config-with-invalid-podcidrs.yml | 2 - .../antrea-mc-config-with-valid-podcidrs.yml | 2 - ...cluster.crd.antrea.io_resourceexports.yaml | 71 ++-- ...cluster.crd.antrea.io_resourceimports.yaml | 42 +-- .../configmap/controller_manager_config.yaml | 2 - .../commonarea/remote_common_area.go | 14 +- .../leader/clusterset_controller_test.go | 2 +- .../leader/resourceexport_controller_test.go | 7 +- .../multicluster/leader/stale_controller.go | 2 +- .../leader/stale_controller_test.go | 4 +- .../member/clusterset_controller_test.go | 4 +- .../multicluster/member/gateway_controller.go | 6 +- .../member/gateway_controller_test.go | 16 +- .../member/labelidentity_controller.go | 10 +- .../member/labelidentity_controller_test.go | 9 +- .../multicluster/member/node_controller.go | 6 +- .../member/node_controller_test.go | 9 +- .../member/serviceexport_controller.go | 20 +- .../member/serviceexport_controller_test.go | 28 +- .../multicluster/member/stale_controller.go | 2 +- multicluster/hack/update-codegen.sh | 2 +- multicluster/test/integration/suite_test.go | 2 +- .../mocks/mock_controller_runtime_manager.go | 142 +++++--- pkg/agent/agent.go | 79 ++--- pkg/agent/agent_linux.go | 24 +- pkg/agent/agent_test.go | 2 +- pkg/agent/apiserver/apiserver.go | 6 + .../interface_configuration_windows.go | 42 +-- pkg/agent/cniserver/ipam/antrea_ipam.go | 3 +- pkg/agent/cniserver/ipam/antrea_ipam_test.go | 24 +- pkg/agent/cniserver/server_windows_test.go | 15 +- .../egress/egress_controller_test.go | 41 +-- .../l7_flow_export_controller_test.go | 2 +- .../networkpolicy/allocator_test.go | 15 +- .../networkpolicy/l7engine/reconciler.go | 3 +- .../networkpolicy/networkpolicy_controller.go | 4 +- .../networkpolicy/status_controller_test.go | 16 +- pkg/agent/controller/traceflow/packetin.go | 4 +- .../controller/traceflow/packetin_test.go | 4 +- .../traceflow/traceflow_controller_test.go | 4 +- .../controller/trafficcontrol/controller.go | 22 +- .../trafficcontrol/controller_test.go | 2 +- .../externalnode/external_node_controller.go | 7 +- pkg/agent/multicast/mcast_controller_test.go | 2 +- .../multicluster/pod_route_controller_test.go | 4 +- .../stretched_networkpolicy_controller.go | 2 +- ...stretched_networkpolicy_controller_test.go | 8 +- pkg/agent/nodeportlocal/npl_agent_test.go | 4 +- pkg/agent/proxy/proxier_test.go | 14 +- pkg/agent/proxy/topology.go | 2 +- .../podwatch/controller_test.go | 13 +- pkg/agent/util/iptables/lock.go | 14 +- pkg/agent/util/net_linux.go | 18 +- pkg/agent/util/net_windows.go | 9 +- pkg/antctl/raw/multicluster/common/common.go | 5 +- pkg/antctl/raw/multicluster/join.go | 5 +- pkg/antctl/raw/traceflow/command.go | 4 +- .../transform/networkpolicy/transform_test.go | 14 +- pkg/apiserver/certificate/certificate.go | 22 +- pkg/apiserver/certificate/certificate_test.go | 2 +- .../registry/system/supportbundle/rest.go | 4 + .../ipsec_csr_signing_controller_test.go | 23 +- pkg/controller/egress/controller_test.go | 15 +- .../externalippool/controller_test.go | 2 +- .../externalnode/controller_test.go | 8 +- pkg/controller/grouping/controller.go | 5 +- pkg/controller/grouping/controller_test.go | 7 +- .../ipam/antrea_ipam_controller_test.go | 38 ++- pkg/controller/labelidentity/controller.go | 5 +- .../labelidentity/controller_test.go | 7 +- .../networkpolicy_controller_test.go | 8 +- pkg/controller/stats/aggregator_test.go | 6 +- .../controller_test.go | 4 +- pkg/controller/traceflow/controller_test.go | 4 +- .../clickhouseclient/clickhouseclient.go | 2 +- .../clickhouseclient/clickhouseclient_test.go | 4 +- pkg/flowaggregator/exporter/clickhouse.go | 3 +- pkg/flowaggregator/exporter/utils.go | 3 +- pkg/ipam/poolallocator/allocator_test.go | 5 +- pkg/monitor/controller.go | 4 +- pkg/ovs/ovsctl/ovsctl_others.go | 2 +- pkg/util/channel/channel_test.go | 9 +- test/e2e/antreaipam_test.go | 4 +- test/e2e/antreapolicy_test.go | 18 +- test/e2e/basic_test.go | 17 +- test/e2e/batch_test.go | 9 +- test/e2e/connectivity_test.go | 2 +- test/e2e/egress_test.go | 138 ++++---- test/e2e/flowaggregator_test.go | 8 +- test/e2e/framework.go | 52 +-- test/e2e/ipsec_test.go | 9 +- test/e2e/k8s_util.go | 4 +- test/e2e/l7networkpolicy_test.go | 46 +-- test/e2e/multicast_test.go | 6 +- test/e2e/networkpolicy_test.go | 4 +- test/e2e/nodeportlocal_test.go | 7 +- test/e2e/performance_test.go | 2 +- test/e2e/prometheus_test.go | 4 +- test/e2e/security_test.go | 6 +- test/e2e/service_externalip_test.go | 35 +- test/e2e/supportbundle_test.go | 4 +- test/e2e/traceflow_test.go | 6 +- test/e2e/vmagent_test.go | 18 +- test/integration/agent/route_test.go | 5 +- test/integration/ovs/ofctrl_test.go | 34 +- test/integration/ovs/openflow_test_utils.go | 40 +-- .../ipam/nodeipam/ipam/cidr_allocator.go | 27 +- third_party/proxy/service.go | 7 +- third_party/proxy/types.go | 2 +- 133 files changed, 1280 insertions(+), 1128 deletions(-) diff --git a/build/images/codegen/Dockerfile b/build/images/codegen/Dockerfile index a195c6b5306..b2c4863c04b 100644 --- a/build/images/codegen/Dockerfile +++ b/build/images/codegen/Dockerfile @@ -32,11 +32,11 @@ LABEL description="A Docker image based on the golang image, which includes code ENV GO111MODULE=on -ARG K8S_VERSION=1.26.4 +ARG K8S_VERSION=1.29.0 # The k8s.io/kube-openapi repo does not have tag, using a workable commit hash. # We use the version that is referenced in the Kubernetes go.mod (for the # correct K8s version). -ARG KUBEOPENAPI_VERSION=v0.0.0-20221012153701-172d655c2280 +ARG KUBEOPENAPI_VERSION=v0.0.0-20231010175941-2dd684a91f00 RUN go install k8s.io/code-generator/cmd/client-gen@kubernetes-$K8S_VERSION && \ go install k8s.io/code-generator/cmd/deepcopy-gen@kubernetes-$K8S_VERSION && \ diff --git a/build/images/codegen/README.md b/build/images/codegen/README.md index cfd31cefcd9..530a6265617 100644 --- a/build/images/codegen/README.md +++ b/build/images/codegen/README.md @@ -20,6 +20,7 @@ Here is the table of codegen images that have been uploaded: | Tag | Change | | :----------------------------- | ---------------------------------------------------- | +| kubernetes-1.29.0 | Upgraded K8s libraries to v1.29.0 | | kubernetes-1.26.4-build.1 | Replace github.com/golang/mock with go.uber.org/mock | | kubernetes-1.26.4-build.0 | Upgraded Go to v1.21 | | kubernetes-1.26.4 | Upgraded K8s libraries to v1.26.4 | diff --git a/cmd/antrea-agent-simulator/simulator.go b/cmd/antrea-agent-simulator/simulator.go index ec9ce22edf1..755642e6ac3 100644 --- a/cmd/antrea-agent-simulator/simulator.go +++ b/cmd/antrea-agent-simulator/simulator.go @@ -67,7 +67,7 @@ func run() error { // Add loop to check whether client is ready attempts := 0 - if err := wait.PollImmediateUntil(200*time.Millisecond, func() (bool, error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), 200*time.Millisecond, true, func(ctx context.Context) (bool, error) { if attempts%10 == 0 { klog.Info("Waiting for Antrea client to be ready") } @@ -76,7 +76,7 @@ func run() error { return false, nil } return true, nil - }, stopCh); err != nil { + }); err != nil { klog.Info("Stopped waiting for Antrea client") return err } diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index dbefa8530ba..7c663f57e89 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -778,10 +778,10 @@ func run(o *Options) error { // Service would fail. if o.config.AntreaProxy.ProxyAll { klog.InfoS("Waiting for AntreaProxy to be ready") - if err := wait.PollUntil(time.Second, func() (bool, error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, false, func(ctx context.Context) (bool, error) { klog.V(2).InfoS("Checking if AntreaProxy is ready") return proxier.GetProxyProvider().SyncedOnce(), nil - }, stopCh); err != nil { + }); err != nil { return fmt.Errorf("error when waiting for AntreaProxy to be ready: %v", err) } klog.InfoS("AntreaProxy is ready") diff --git a/cmd/antrea-agent/options.go b/cmd/antrea-agent/options.go index 732a6abfb6c..63bf3c28760 100644 --- a/cmd/antrea-agent/options.go +++ b/cmd/antrea-agent/options.go @@ -27,7 +27,7 @@ import ( cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/featuregate" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/apis" @@ -411,10 +411,10 @@ func (o *Options) setK8sNodeDefaultOptions() { o.config.HostProcPathPrefix = defaultHostProcPathPrefix } if o.config.AntreaProxy.Enable == nil { - o.config.AntreaProxy.Enable = pointer.Bool(true) + o.config.AntreaProxy.Enable = ptr.To(true) } if o.config.AntreaProxy.ProxyLoadBalancerIPs == nil { - o.config.AntreaProxy.ProxyLoadBalancerIPs = pointer.Bool(true) + o.config.AntreaProxy.ProxyLoadBalancerIPs = ptr.To(true) } if o.config.ServiceCIDR == "" { //It's okay to set the default value of this field even when AntreaProxy is enabled and the field is not used. @@ -427,7 +427,7 @@ func (o *Options) setK8sNodeDefaultOptions() { o.config.ClusterMembershipPort = apis.AntreaAgentClusterMembershipPort } if o.config.EnablePrometheusMetrics == nil { - o.config.EnablePrometheusMetrics = pointer.Bool(true) + o.config.EnablePrometheusMetrics = ptr.To(true) } if o.config.WireGuard.Port == 0 { o.config.WireGuard.Port = apis.WireGuardListenPort diff --git a/cmd/antrea-agent/options_test.go b/cmd/antrea-agent/options_test.go index 22c612d7c5f..083d2e48eec 100644 --- a/cmd/antrea-agent/options_test.go +++ b/cmd/antrea-agent/options_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" featuregatetesting "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "antrea.io/antrea/pkg/agent/config" agentconfig "antrea.io/antrea/pkg/config/agent" @@ -93,7 +93,7 @@ func TestOptionsValidateAntreaProxyConfig(t *testing.T) { name: "default", trafficEncapMode: config.TrafficEncapModeEncap, antreaProxyConfig: agentconfig.AntreaProxyConfig{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), DefaultLoadBalancerMode: config.LoadBalancerModeNAT.String(), }, expectedDefaultLoadBalancerMode: config.LoadBalancerModeNAT, @@ -103,7 +103,7 @@ func TestOptionsValidateAntreaProxyConfig(t *testing.T) { enabledDSR: true, trafficEncapMode: config.TrafficEncapModeEncap, antreaProxyConfig: agentconfig.AntreaProxyConfig{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), DefaultLoadBalancerMode: config.LoadBalancerModeDSR.String(), }, expectedDefaultLoadBalancerMode: config.LoadBalancerModeDSR, @@ -111,7 +111,7 @@ func TestOptionsValidateAntreaProxyConfig(t *testing.T) { { name: "LoadBalancerModeDSR disabled", antreaProxyConfig: agentconfig.AntreaProxyConfig{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), DefaultLoadBalancerMode: config.LoadBalancerModeDSR.String(), }, trafficEncapMode: config.TrafficEncapModeEncap, @@ -121,7 +121,7 @@ func TestOptionsValidateAntreaProxyConfig(t *testing.T) { name: "unsupported encap mode", enabledDSR: true, antreaProxyConfig: agentconfig.AntreaProxyConfig{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), DefaultLoadBalancerMode: config.LoadBalancerModeDSR.String(), }, trafficEncapMode: config.TrafficEncapModeNoEncap, @@ -131,7 +131,7 @@ func TestOptionsValidateAntreaProxyConfig(t *testing.T) { name: "invalid LoadBalancerMode", trafficEncapMode: config.TrafficEncapModeEncap, antreaProxyConfig: agentconfig.AntreaProxyConfig{ - Enable: pointer.Bool(true), + Enable: ptr.To(true), DefaultLoadBalancerMode: "drs", }, expectedErr: "LoadBalancerMode drs is unknown", diff --git a/go.mod b/go.mod index 95977e605da..264ae7ea8f9 100644 --- a/go.mod +++ b/go.mod @@ -67,19 +67,19 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.26.4 - k8s.io/apiextensions-apiserver v0.26.4 - k8s.io/apimachinery v0.26.4 - k8s.io/apiserver v0.26.4 - k8s.io/client-go v0.26.4 - k8s.io/component-base v0.26.4 - k8s.io/klog/v2 v2.100.1 - k8s.io/kube-aggregator v0.26.4 - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 - k8s.io/kubectl v0.26.4 - k8s.io/kubelet v0.26.4 - k8s.io/utils v0.0.0-20230209194617-a36077c30491 - sigs.k8s.io/controller-runtime v0.14.6 + k8s.io/api v0.29.2 + k8s.io/apiextensions-apiserver v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/apiserver v0.29.2 + k8s.io/client-go v0.29.2 + k8s.io/component-base v0.29.2 + k8s.io/klog/v2 v2.110.1 + k8s.io/kube-aggregator v0.29.2 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/kubectl v0.29.2 + k8s.io/kubelet v0.29.2 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime v0.16.3 sigs.k8s.io/mcs-api v0.1.0 sigs.k8s.io/network-policy-api v0.1.1 sigs.k8s.io/yaml v1.3.0 @@ -93,8 +93,9 @@ require ( github.com/VividCortex/ewma v1.2.0 // indirect github.com/alexflint/go-filemutex v1.2.0 // indirect github.com/andybalholm/brotli v1.0.4 // indirect - github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.12 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 // indirect @@ -120,31 +121,31 @@ require ( github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/containerd v1.6.26 // indirect github.com/contiv/libovsdb v0.0.0-20170227191248-d0061a53e358 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fvbommel/sortorder v1.0.1 // indirect - github.com/go-errors/errors v1.0.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fvbommel/sortorder v1.1.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.6.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/cel-go v0.12.6 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/cel-go v0.17.7 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect @@ -170,9 +171,9 @@ require ( github.com/mdlayher/genetlink v1.0.0 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect @@ -198,40 +199,40 @@ require ( github.com/stoewer/go-strcase v1.2.0 // indirect github.com/ti-mo/netfilter v0.5.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/xlab/treeprint v1.1.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f // indirect - go.etcd.io/etcd/api/v3 v3.5.5 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect - go.etcd.io/etcd/client/v3 v3.5.5 // indirect + go.etcd.io/etcd/api/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect - go.opentelemetry.io/otel v1.20.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect - go.opentelemetry.io/otel/metric v1.20.0 // indirect - go.opentelemetry.io/otel/sdk v1.20.0 // indirect - go.opentelemetry.io/otel/trace v1.20.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - go.uber.org/zap v1.24.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.25.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/term v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.zx2c4.com/wireguard v0.0.0-20210427022245-097af6e1351b // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/cli-runtime v0.26.4 // indirect - k8s.io/kms v0.26.4 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.36 // indirect + k8s.io/cli-runtime v0.29.2 // indirect + k8s.io/kms v0.29.2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 5b438c02894..d5079e0f293 100644 --- a/go.sum +++ b/go.sum @@ -50,25 +50,22 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v1.2.0 h1:1v0TJPDtlhgpW4nJ+GvxCLSlUDC3+gW0CQQvlmfDR/s= github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= -github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go-v2 v1.16.10 h1:+yDD0tcuHRQZgqONkpDwzepqmElQaSlFPymHRHR9mrc= github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= @@ -107,8 +104,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 h1:YU9UHPukkCCnETHEExOptF/BxPv github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4= github.com/aws/smithy-go v1.12.1 h1:yQRC55aXN/y1W10HgwHle01DRuV9Dpf31iGkotjt3Ag= github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -129,7 +126,6 @@ github.com/cenkalti/rpc2 v0.0.0-20180727162946-9642ea02d0aa h1:t+iWhuJE2aropY4ux github.com/cenkalti/rpc2 v0.0.0-20180727162946-9642ea02d0aa/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -141,8 +137,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -165,12 +159,13 @@ github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsa github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -179,8 +174,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -196,27 +191,20 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= -github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= @@ -225,44 +213,39 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/gammazero/deque v0.1.2 h1:WvbDJ3YaT4ELf9+Cq9lv4Ef0aPRyZeEpIoVkjOw9kes= github.com/gammazero/deque v0.1.2/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI= github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -283,8 +266,8 @@ github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -323,6 +306,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= @@ -337,7 +322,6 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -354,16 +338,17 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= -github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= +github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -371,6 +356,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -396,8 +382,9 @@ github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1a github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -457,7 +444,6 @@ github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/rasw github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= @@ -469,12 +455,10 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.3.0 h1:MjRRgZyTGo90G+UrwlDQjU+uG4Z7By65qvQxGoILT/8= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.3.0/go.mod h1:nqCI7aelBJU61wiBeeZWJ6oi4bJy5nrjkM6lWIMA4j0= github.com/k8snetworkplumbingwg/sriov-cni v2.1.0+incompatible h1:5comk9qUB9j99Oc+rvnm92RWWe9urdJ1TP3cXM3fmmc= @@ -489,12 +473,10 @@ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCy github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -562,13 +544,13 @@ github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPk github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -582,7 +564,6 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -644,8 +625,6 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -657,24 +636,18 @@ github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -695,7 +668,6 @@ github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -747,8 +719,8 @@ github.com/ti-mo/netfilter v0.5.0/go.mod h1:nt+8B9hx/QpqHr7Hazq+2qMCCA8u2OTkyc/7 github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -763,79 +735,72 @@ github.com/vmware/go-ipfix v0.9.0 h1:4/N5eFliqULEaCUQV0lafOpN/1bItPE9OTAPGhrIXus github.com/vmware/go-ipfix v0.9.0/go.mod h1:MYEdL6Uel2ufOZyVCKvIAaw9hwnewK8aPr7rnwRbxMY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= -go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= -go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= -go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= -go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= -go.etcd.io/etcd/pkg/v3 v3.5.5 h1:Ablg7T7OkR+AeeeU32kdVhw/AGDsitkKPl7aW73ssjU= -go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= -go.etcd.io/etcd/raft/v3 v3.5.5 h1:Ibz6XyZ60OYyRopu73lLM/P+qco3YtlZMOhnXNS051I= -go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= -go.etcd.io/etcd/server/v3 v3.5.5 h1:jNjYm/9s+f9A9r6+SC4RvNaz6AqixpOvhrFdT0PvIj0= -go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= -go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= -go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= -go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= -go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= -go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= -go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= -go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= -go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -856,15 +821,14 @@ golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= @@ -894,8 +858,6 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -903,7 +865,6 @@ golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210504132125-bbd867fde50d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -914,7 +875,6 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -925,7 +885,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= @@ -950,7 +909,6 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -964,8 +922,6 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602100848-8d3cce7afc34/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -978,12 +934,8 @@ golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309040221-94ec62e08169/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1002,6 +954,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= @@ -1011,7 +964,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= @@ -1036,15 +988,12 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= @@ -1057,8 +1006,8 @@ golang.zx2c4.com/wireguard v0.0.0-20210427022245-097af6e1351b/go.mod h1:a057zjmo golang.zx2c4.com/wireguard/wgctrl v0.0.0-20210506160403-92e472f520a5 h1:LpEwXnbN4q2EIPkqbG9KHBUrducJYDOOdL+eMcJAlFo= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20210506160403-92e472f520a5/go.mod h1:+1XihzyZUBJcSc5WO9SwNA7v26puQwOEDwanaxfNXPQ= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1069,10 +1018,7 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= @@ -1086,11 +1032,7 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1101,7 +1043,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -1128,9 +1069,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1139,92 +1078,86 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.18.4/go.mod h1:lOIQAKYgai1+vz9J7YcDZwC26Z0zQewYOGWdyIPUUQ4= -k8s.io/api v0.26.4 h1:qSG2PmtcD23BkYiWfoYAcak870eF/hE7NNYBYavTT94= -k8s.io/api v0.26.4/go.mod h1:WwKEXU3R1rgCZ77AYa7DFksd9/BAIKyOmRlbVxgvjCk= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= k8s.io/apiextensions-apiserver v0.18.4/go.mod h1:NYeyeYq4SIpFlPxSAB6jHPIdvu3hL0pc36wuRChybio= -k8s.io/apiextensions-apiserver v0.26.4 h1:9D2RTxYGxrG5uYg6D7QZRcykXvavBvcA59j5kTaedQI= -k8s.io/apiextensions-apiserver v0.26.4/go.mod h1:cd4uGFGIgzEqUghWpRsr9KE8j2KNTjY8Ji8pnMMazyw= +k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= +k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.26.4 h1:rZccKdBLg9vP6J09JD+z8Yr99Ce8gk3Lbi9TCx05Jzs= -k8s.io/apimachinery v0.26.4/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/apiserver v0.18.4/go.mod h1:q+zoFct5ABNnYkGIaGQ3bcbUNdmPyOCoEBcg51LChY8= -k8s.io/apiserver v0.26.4 h1:3Oq4mnJv0mzVX7BR/Nod+8KjlELf/3Ljvu9ZWDyLUoA= -k8s.io/apiserver v0.26.4/go.mod h1:yAY3O1vBM4/0OIGAGeWcdfzQvgdwJ188VirLcuSAVnw= -k8s.io/cli-runtime v0.26.4 h1:MgSU871KDzBDX7V9GtuqS6Ai9lhQCHgRzkurnXOWtZ0= -k8s.io/cli-runtime v0.26.4/go.mod h1:MjJ2DXMChw2zcG0/agzm17xwKpfVxOfuoCdfY9iOCOE= +k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= +k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= +k8s.io/cli-runtime v0.29.2 h1:smfsOcT4QujeghsNjECKN3lwyX9AwcFU0nvJ7sFN3ro= +k8s.io/cli-runtime v0.29.2/go.mod h1:KLisYYfoqeNfO+MkTWvpqIyb1wpJmmFJhioA0xd4MW8= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.18.4/go.mod h1:f5sXwL4yAZRkAtzOxRWUhA/N8XzGCb+nPZI8PfobZ9g= -k8s.io/client-go v0.26.4 h1:/7P/IbGBuT73A+G97trf44NTPSNqvuBREpOfdLbHvD4= -k8s.io/client-go v0.26.4/go.mod h1:6qOItWm3EwxJdl/8p5t7FWtWUOwyMdA8N9ekbW4idpI= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.4/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= k8s.io/component-base v0.18.4/go.mod h1:7jr/Ef5PGmKwQhyAz/pjByxJbC58mhKAhiaDu0vXfPk= -k8s.io/component-base v0.26.4 h1:Bg2xzyXNKL3eAuiTEu3XE198d6z22ENgFgGQv2GGOUk= -k8s.io/component-base v0.26.4/go.mod h1:lTuWL1Xz/a4e80gmIC3YZG2JCO4xNwtKWHJWeJmsq20= +k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= +k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.26.4 h1:mQ+DeOvgAHC6+heZcozPkEd3rWtP4DVVjo1hLSih9w4= -k8s.io/kms v0.26.4/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg= -k8s.io/kube-aggregator v0.26.4 h1:iGljhq5exQkbuc3bnkwUx95RPCBDExg7DkX9XaYhg6w= -k8s.io/kube-aggregator v0.26.4/go.mod h1:eWfg4tU0+l57ebWiS5THOANIJUrKRxudSVDJ+63bqvQ= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= +k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= +k8s.io/kube-aggregator v0.29.2 h1:z9qJn5wlGmGaX6EfM7OEhr6fq6SBjDKR6tPRZ/qgxeY= +k8s.io/kube-aggregator v0.29.2/go.mod h1:QEuwzmMJJsg0eg1Gv+u4cWcYeJG2+8vN8/nTXBzopUo= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubectl v0.26.4 h1:A0Oa0u/po4KxXnXsNCOwLojAe9cQR3TJNJabEIf7U1w= -k8s.io/kubectl v0.26.4/go.mod h1:cWtp/+I4p+h5En3s2zO1zCry9v3/6h37EQ2tF3jNRnM= -k8s.io/kubelet v0.26.4 h1:SEQPfjN4lu4uL9O8NdeN7Aum3liQ4kOnp/yC3jMRMUo= -k8s.io/kubelet v0.26.4/go.mod h1:ZMPGTCnrQ5UOlC7igXhbW9cgna1LtTRWLaHub4dA2FU= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.2 h1:uaDYaBhumvkwz0S2XHt36fK0v5IdNgL7HyUniwb2IUo= +k8s.io/kubectl v0.29.2/go.mod h1:BhizuYBGcKaHWyq+G7txGw2fXg576QbPrrnQdQDZgqI= +k8s.io/kubelet v0.29.2 h1:bQ2StqkUqPCFNLtGLsb3v3O2LKQHXNMju537zOGboRg= +k8s.io/kubelet v0.29.2/go.mod h1:i5orNPqW/fAMrqptbCXFW/vLBBP12TZZc41IrrvF7SY= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= -k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.36 h1:PUuX1qIFv309AT8hF/CdPKDmsG/hn/L8zRX7VvISM3A= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.36/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/mcs-api v0.1.0 h1:edDbg0oRGfXw8TmZjKYep06LcJLv/qcYLidejnUp0PM= sigs.k8s.io/mcs-api v0.1.0/go.mod h1:gGiAryeFNB4GBsq2LBmVqSgKoobLxt+p7ii/WG5QYYw= sigs.k8s.io/network-policy-api v0.1.1 h1:KDW+AkvCCQI3h8yH8j0hurhvPLNtLeVvmZoqtMaG9ew= sigs.k8s.io/network-policy-api v0.1.1/go.mod h1:F7S5fsb7QEzlLjuMgTGfUT4LRHylRbx2xDDpHfJKKEs= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 719d6b549c4..778ff753bb1 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -18,7 +18,7 @@ set -o errexit set -o pipefail ANTREA_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )" -IMAGE_NAME="antrea/codegen:kubernetes-1.26.4-build.1" +IMAGE_NAME="antrea/codegen:kubernetes-1.29.0" # Recent versions of Git will not access .git directories which are owned by # another user (as a security measure), unless the directories are explicitly diff --git a/multicluster/apis/multicluster/v1alpha1/multiclusterconfig_types.go b/multicluster/apis/multicluster/v1alpha1/multiclusterconfig_types.go index af0ed8c8c3c..b66ce52639a 100644 --- a/multicluster/apis/multicluster/v1alpha1/multiclusterconfig_types.go +++ b/multicluster/apis/multicluster/v1alpha1/multiclusterconfig_types.go @@ -18,7 +18,6 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - config "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" ) // Precedence defines the precedence of Node IP type. @@ -37,8 +36,12 @@ const ( // +kubebuilder:printcolumn:name="Service CIDR",type=string,JSONPath=`.serviceCIDR`,description="Manually specified Service CIDR" type MultiClusterConfig struct { metav1.TypeMeta `json:",inline"` - // ControllerManagerConfigurationSpec defines the contfigurations for controllers. - config.ControllerManagerConfigurationSpec `json:",inline"` + // Metrics contains the controller metrics configuration + Metrics ControllerMetrics `json:"metrics,omitempty"` + // Health contains the controller health configuration + Health ControllerHealth `json:"health,omitempty"` + // Webhook contains the controllers webhook configuration + Webhook ControllerWebhook `json:"webhook,omitempty"` // ServiceCIDR allows user to set the ClusterIP range of the cluster manually. ServiceCIDR string `json:"serviceCIDR,omitempty"` // PodCIDRs is the Pod IP address CIDRs. @@ -59,6 +62,34 @@ type MultiClusterConfig struct { EnableStretchedNetworkPolicy bool `json:"enableStretchedNetworkPolicy,omitempty"` } +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + BindAddress string `json:"bindAddress,omitempty"` +} + +type ControllerWebhook struct { + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + Port *int `json:"port,omitempty"` + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host. + Host string `json:"host,omitempty"` + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + CertDir string `json:"certDir,omitempty"` +} + +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // It can be set to "0" or "" to disable serving the health probe. + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` +} + func init() { SchemeBuilder.Register(&MultiClusterConfig{}) } diff --git a/multicluster/apis/multicluster/v1alpha1/zz_generated.deepcopy.go b/multicluster/apis/multicluster/v1alpha1/zz_generated.deepcopy.go index ebebbdea7eb..21554204f37 100644 --- a/multicluster/apis/multicluster/v1alpha1/zz_generated.deepcopy.go +++ b/multicluster/apis/multicluster/v1alpha1/zz_generated.deepcopy.go @@ -626,7 +626,6 @@ func (in *MemberClusterAnnounceList) DeepCopyObject() runtime.Object { func (in *MultiClusterConfig) DeepCopyInto(out *MultiClusterConfig) { *out = *in out.TypeMeta = in.TypeMeta - in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) if in.PodCIDRs != nil { in, out := &in.PodCIDRs, &out.PodCIDRs *out = make([]string, len(*in)) diff --git a/multicluster/build/yamls/antrea-multicluster-leader-global.yml b/multicluster/build/yamls/antrea-multicluster-leader-global.yml index ca5fcd07e5b..d282ae78aab 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader-global.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader-global.yml @@ -2703,13 +2703,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2777,13 +2774,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2845,12 +2839,22 @@ spec: single port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port. This must match @@ -3136,11 +3140,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future API - version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -3159,12 +3162,22 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names (as + per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as + described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. @@ -5614,13 +5627,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -5688,13 +5698,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -5756,12 +5763,22 @@ spec: single port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port. This must match diff --git a/multicluster/build/yamls/antrea-multicluster-leader-namespaced.yml b/multicluster/build/yamls/antrea-multicluster-leader-namespaced.yml index c1c8bb56785..b9f7e243bd8 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader-namespaced.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader-namespaced.yml @@ -298,8 +298,6 @@ data: bindAddress: "0" webhook: port: 9443 - leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "" @@ -356,7 +354,7 @@ spec: template: metadata: annotations: - checksum/config: 7eb0f1e65f7eb3e35b0739d6064b92b7621af0f4e41813c35bfdee71ceaefbe2 + checksum/config: 81ec1a33aace39ae40ac2f5d909b5d1d0208bbe6a1e8d1d9ada232bcc583b76a labels: app: antrea component: antrea-mc-controller diff --git a/multicluster/build/yamls/antrea-multicluster-leader.yml b/multicluster/build/yamls/antrea-multicluster-leader.yml index 8c8a4c10ad8..2e7e449c5f5 100644 --- a/multicluster/build/yamls/antrea-multicluster-leader.yml +++ b/multicluster/build/yamls/antrea-multicluster-leader.yml @@ -2703,13 +2703,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2777,13 +2774,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2845,12 +2839,22 @@ spec: single port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port. This must match @@ -3136,11 +3140,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future API - version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -3159,12 +3162,22 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names (as + per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as + described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. @@ -5614,13 +5627,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -5688,13 +5698,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -5756,12 +5763,22 @@ spec: single port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port. This must match @@ -6334,8 +6351,6 @@ data: bindAddress: "0" webhook: port: 9443 - leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "" @@ -6392,7 +6407,7 @@ spec: template: metadata: annotations: - checksum/config: 7eb0f1e65f7eb3e35b0739d6064b92b7621af0f4e41813c35bfdee71ceaefbe2 + checksum/config: 81ec1a33aace39ae40ac2f5d909b5d1d0208bbe6a1e8d1d9ada232bcc583b76a labels: app: antrea component: antrea-mc-controller diff --git a/multicluster/build/yamls/antrea-multicluster-member.yml b/multicluster/build/yamls/antrea-multicluster-member.yml index 47f0adfc8c7..5642c80ac57 100644 --- a/multicluster/build/yamls/antrea-multicluster-member.yml +++ b/multicluster/build/yamls/antrea-multicluster-member.yml @@ -1152,8 +1152,6 @@ data: bindAddress: "0" webhook: port: 9443 - leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "" @@ -1199,7 +1197,7 @@ spec: template: metadata: annotations: - checksum/config: 7eb0f1e65f7eb3e35b0739d6064b92b7621af0f4e41813c35bfdee71ceaefbe2 + checksum/config: 81ec1a33aace39ae40ac2f5d909b5d1d0208bbe6a1e8d1d9ada232bcc583b76a labels: app: antrea component: antrea-mc-controller diff --git a/multicluster/cmd/multicluster-controller/clusterset_webhook_test.go b/multicluster/cmd/multicluster-controller/clusterset_webhook_test.go index e8de60a93f6..8e23f008544 100644 --- a/multicluster/cmd/multicluster-controller/clusterset_webhook_test.go +++ b/multicluster/cmd/multicluster-controller/clusterset_webhook_test.go @@ -25,7 +25,6 @@ import ( v1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -181,10 +180,7 @@ func TestWebhookClusterSetEvents(t *testing.T) { }, } - decoder, err := admission.NewDecoder(common.TestScheme) - if err != nil { - klog.ErrorS(err, "Error constructing a decoder") - } + decoder := admission.NewDecoder(common.TestScheme) for _, tt := range tests { objects := []client.Object{} diff --git a/multicluster/cmd/multicluster-controller/controller.go b/multicluster/cmd/multicluster-controller/controller.go index bf86deb45be..dee4249578c 100644 --- a/multicluster/cmd/multicluster-controller/controller.go +++ b/multicluster/cmd/multicluster-controller/controller.go @@ -18,7 +18,7 @@ import ( "fmt" "time" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc. // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -35,8 +35,10 @@ import ( aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" + controllerruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" k8smcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" mcv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" @@ -148,7 +150,7 @@ func setupManagerAndCertController(isLeader bool, o *Options) (manager.Manager, secureServing := genericoptions.NewSecureServingOptions().WithLoopback() caCertController, err := certificate.ApplyServerCert(o.SelfSignedCert, client, aggregatorClient, apiExtensionClient, - secureServing, getCaConfig(isLeader, o.options.Namespace)) + secureServing, getCaConfig(isLeader, o.Namespace)) if err != nil { return nil, fmt.Errorf("error applying server cert: %v", err) } @@ -157,25 +159,31 @@ func setupManagerAndCertController(isLeader bool, o *Options) (manager.Manager, } if o.SelfSignedCert { - o.options.CertDir = selfSignedCertDir + o.options.Metrics.CertDir = selfSignedCertDir + o.WebhookConfig.CertDir = selfSignedCertDir } else { - o.options.CertDir = certDir + o.options.Metrics.CertDir = certDir + o.WebhookConfig.CertDir = certDir } + o.options.WebhookServer = webhook.NewServer(webhook.Options{ + Port: *o.WebhookConfig.Port, + Host: o.WebhookConfig.Host, + CertDir: o.WebhookConfig.CertDir, + }) namespaceFieldSelector := fields.SelectorFromSet(fields.Set{"metadata.namespace": env.GetPodNamespace()}) - o.options.NewCache = cache.BuilderWithOptions(cache.Options{ - SelectorsByObject: cache.SelectorsByObject{ - &mcv1alpha1.Gateway{}: { - Field: namespaceFieldSelector, - }, - &mcv1alpha2.ClusterSet{}: { - Field: namespaceFieldSelector, - }, - &mcv1alpha1.MemberClusterAnnounce{}: { - Field: namespaceFieldSelector, - }, + o.options.Cache.DefaultFieldSelector = namespaceFieldSelector + o.options.Cache.ByObject = map[controllerruntimeclient.Object]cache.ByObject{ + &mcv1alpha1.Gateway{}: { + Field: namespaceFieldSelector, }, - }) + &mcv1alpha2.ClusterSet{}: { + Field: namespaceFieldSelector, + }, + &mcv1alpha1.MemberClusterAnnounce{}: { + Field: namespaceFieldSelector, + }, + } // EndpointSlice is enabled in AntreaProxy by default since v1.11, so Antrea MC // will use EndpointSlice API by default to keep consistent with AntreaProxy. @@ -198,9 +206,13 @@ func setupManagerAndCertController(isLeader bool, o *Options) (manager.Manager, } o.ClusterCalimCRDAvailable = clusterClaimCRDAvailable - mgr, err := ctrl.NewManager(k8sConfig, o.options) + mgr, err := ctrl.NewManager(k8sConfig, manager.Options{ + Scheme: o.options.Scheme, + Metrics: o.options.Metrics, + HealthProbeBindAddress: o.options.HealthProbeBindAddress, + }) if err != nil { - return nil, fmt.Errorf("error starting manager: %v", err) + return nil, fmt.Errorf("error creating manager: %v", err) } //+kubebuilder:scaffold:builder diff --git a/multicluster/cmd/multicluster-controller/gateway_webhook_test.go b/multicluster/cmd/multicluster-controller/gateway_webhook_test.go index 210eb67e71f..b2bec75ca66 100644 --- a/multicluster/cmd/multicluster-controller/gateway_webhook_test.go +++ b/multicluster/cmd/multicluster-controller/gateway_webhook_test.go @@ -26,7 +26,6 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -173,10 +172,7 @@ func TestWebhookGatewayEvents(t *testing.T) { }, } - decoder, err := admission.NewDecoder(common.TestScheme) - if err != nil { - klog.ErrorS(err, "Error constructing a decoder") - } + decoder := admission.NewDecoder(common.TestScheme) for _, tt := range tests { fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects().Build() if tt.existingGateway != nil { diff --git a/multicluster/cmd/multicluster-controller/leader.go b/multicluster/cmd/multicluster-controller/leader.go index 2fee6958c43..a9ad3a01cf4 100644 --- a/multicluster/cmd/multicluster-controller/leader.go +++ b/multicluster/cmd/multicluster-controller/leader.go @@ -52,7 +52,7 @@ func newLeaderCommand() *cobra.Command { func runLeader(o *Options) error { // on the leader we want the reconciler to run for a given Namespace instead of cluster scope podNamespace := env.GetPodNamespace() - o.options.Namespace = podNamespace + o.Namespace = podNamespace stopCh := signals.RegisterSignalHandlers() mgr, err := setupManagerAndCertControllerFunc(true, o) diff --git a/multicluster/cmd/multicluster-controller/leader_test.go b/multicluster/cmd/multicluster-controller/leader_test.go index 26660b139f6..bdf73b033dc 100644 --- a/multicluster/cmd/multicluster-controller/leader_test.go +++ b/multicluster/cmd/multicluster-controller/leader_test.go @@ -29,7 +29,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/webhook" "antrea.io/antrea/multicluster/controllers/multicluster/common" @@ -39,13 +39,13 @@ import ( func initMockManager(mockManager *mocks.MockManager) { fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects().Build() - mockManager.EXPECT().GetWebhookServer().Return(&webhook.Server{}).AnyTimes() - mockManager.EXPECT().GetWebhookServer().Return(&webhook.Server{}).AnyTimes() + mockManager.EXPECT().GetWebhookServer().Return(&webhook.DefaultServer{}).AnyTimes() + mockManager.EXPECT().GetWebhookServer().Return(&webhook.DefaultServer{}).AnyTimes() mockManager.EXPECT().GetClient().Return(fakeClient).AnyTimes() mockManager.EXPECT().GetScheme().Return(common.TestScheme).AnyTimes() - mockManager.EXPECT().GetControllerOptions().Return(v1alpha1.ControllerConfigurationSpec{}).AnyTimes() + mockManager.EXPECT().GetControllerOptions().Return(config.Controller{}).AnyTimes() + mockManager.EXPECT().GetCache().Return(&informertest.FakeInformers{}).AnyTimes() mockManager.EXPECT().GetLogger().Return(klog.NewKlogr()).AnyTimes() - mockManager.EXPECT().SetFields(gomock.Any()).Return(nil).AnyTimes() mockManager.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() mockManager.EXPECT().Start(gomock.Any()).Return(nil).AnyTimes() mockManager.EXPECT().GetConfig().Return(&rest.Config{}).AnyTimes() diff --git a/multicluster/cmd/multicluster-controller/memberclusterannounce_webhook_test.go b/multicluster/cmd/multicluster-controller/memberclusterannounce_webhook_test.go index 3d5bb19f7df..640e8e0b129 100644 --- a/multicluster/cmd/multicluster-controller/memberclusterannounce_webhook_test.go +++ b/multicluster/cmd/multicluster-controller/memberclusterannounce_webhook_test.go @@ -27,7 +27,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -252,10 +251,7 @@ func TestMemberClusterAnnounceWebhook(t *testing.T) { }, } - decoder, err := admission.NewDecoder(common.TestScheme) - if err != nil { - klog.ErrorS(err, "Error constructing a decoder") - } + decoder := admission.NewDecoder(common.TestScheme) for _, tt := range tests { fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects().WithLists(existingServiceAccounts).Build() if tt.existingClusterSet != nil { diff --git a/multicluster/cmd/multicluster-controller/options.go b/multicluster/cmd/multicluster-controller/options.go index f5b8358a489..f28e6439e52 100644 --- a/multicluster/cmd/multicluster-controller/options.go +++ b/multicluster/cmd/multicluster-controller/options.go @@ -17,10 +17,15 @@ package main import ( "fmt" "net" + "os" "github.com/spf13/pflag" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" mcsv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" "antrea.io/antrea/multicluster/controllers/multicluster/common" @@ -31,6 +36,7 @@ type Options struct { configFile string SelfSignedCert bool options ctrl.Options + Namespace string // The Service ClusterIP range used in the member cluster. ServiceCIDR string // PodCIDRs is the Pod IP address CIDRs of the member cluster. @@ -49,6 +55,8 @@ type Options struct { // ClusterCalimCRDAvailable indicates if the ClusterClaim CRD is available or not // in the cluster. ClusterCalimCRDAvailable bool + // WebhookConfig contains the controllers webhook configuration + WebhookConfig mcsv1alpha1.ControllerWebhook } func newOptions() *Options { @@ -64,10 +72,8 @@ func (o *Options) complete(args []string) error { ctrlConfig := &mcsv1alpha1.MultiClusterConfig{} if len(o.configFile) > 0 { klog.InfoS("Loading config", "file", o.configFile) - options, err = options.AndFrom(ctrl.ConfigFile().AtPath(o.configFile).OfKind(ctrlConfig)) - if err != nil { - klog.ErrorS(err, "Failed to load options") - return fmt.Errorf("failed to load options from configuration file %s", o.configFile) + if err = o.loadConfigFromFile(ctrlConfig); err != nil { + return err } o.options = options if ctrlConfig.ServiceCIDR != "" { @@ -87,6 +93,7 @@ func (o *Options) complete(args []string) error { o.ServiceCIDR = ctrlConfig.ServiceCIDR o.PodCIDRs = cidrs o.GatewayIPPrecedence = ctrlConfig.GatewayIPPrecedence + o.WebhookConfig = ctrlConfig.Webhook if ctrlConfig.EndpointIPType == "" { o.EndpointIPType = common.EndpointIPTypeClusterIP } else { @@ -110,10 +117,33 @@ func (o *Options) addFlags(fs *pflag.FlagSet) { func (o *Options) setDefaults() { o.options = ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: "0", - Port: 9443, + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, HealthProbeBindAddress: ":8080", - LeaderElection: false, } } + +func (o *Options) loadConfigFromFile(multiclusterConfig *mcsv1alpha1.MultiClusterConfig) error { + data, err := os.ReadFile(o.configFile) + if err != nil { + return err + } + codecs := serializer.NewCodecFactory(scheme) + if err := yaml.Unmarshal(data, multiclusterConfig); err != nil { + return err + } + if err = runtime.DecodeInto(codecs.UniversalDecoder(), data, multiclusterConfig); err != nil { + return err + } + + if multiclusterConfig.Metrics.BindAddress != "" { + o.options.Metrics.BindAddress = multiclusterConfig.Metrics.BindAddress + } + if multiclusterConfig.Health.HealthProbeBindAddress != "" { + o.options.HealthProbeBindAddress = multiclusterConfig.Health.HealthProbeBindAddress + } + + return nil +} diff --git a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-empty-podcidrs.yml b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-empty-podcidrs.yml index decbb468ccb..a3fe15cadf3 100644 --- a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-empty-podcidrs.yml +++ b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-empty-podcidrs.yml @@ -6,8 +6,6 @@ metrics: bindAddress: "0" webhook: port: 9443 -leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "" diff --git a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-endpointiptype.yml b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-endpointiptype.yml index ccaeb9a2c25..de97c067924 100644 --- a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-endpointiptype.yml +++ b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-endpointiptype.yml @@ -6,8 +6,6 @@ metrics: bindAddress: "0" webhook: port: 9443 -leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "10.10.0.0/16" diff --git a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-podcidrs.yml b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-podcidrs.yml index 5a84017356e..18d827dcac4 100644 --- a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-podcidrs.yml +++ b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-invalid-podcidrs.yml @@ -6,8 +6,6 @@ metrics: bindAddress: "0" webhook: port: 9443 -leaderElection: - leaderElect: false serviceCIDR: "10.100.0.0/16" podCIDRs: - "10.10a.0.0/16" diff --git a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-valid-podcidrs.yml b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-valid-podcidrs.yml index 0638f8f284f..539ece2c0a7 100644 --- a/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-valid-podcidrs.yml +++ b/multicluster/cmd/multicluster-controller/testdata/antrea-mc-config-with-valid-podcidrs.yml @@ -6,8 +6,6 @@ metrics: bindAddress: "0" webhook: port: 9443 -leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "10.10.0.0/16" diff --git a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml index 9140f80d34f..00aaf827a9b 100644 --- a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml +++ b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceexports.yaml @@ -2293,13 +2293,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2367,13 +2364,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2435,12 +2429,22 @@ spec: single port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port. This must match @@ -2726,11 +2730,10 @@ spec: supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was - under-specified and its meaning varies across implementations, - and it cannot support dual-stack. As of Kubernetes v1.24, - users are encouraged to use implementation-specific annotations - when available. This field may be removed in a future API - version.' + under-specified and its meaning varies across implementations. + Using it is non-portable and it may not support dual-stack. + Users are encouraged to use implementation-specific annotations + when available.' type: string loadBalancerSourceRanges: description: 'If specified and supported by the platform, @@ -2749,12 +2752,22 @@ spec: port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard service - names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names such - as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names (as + per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext as + described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port within the service. diff --git a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml index fafe9bec89f..34a5b22ab0a 100644 --- a/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml +++ b/multicluster/config/crd/bases/multicluster.crd.antrea.io_resourceimports.yaml @@ -2291,13 +2291,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2365,13 +2362,10 @@ spec: description: The Hostname of this endpoint type: string ip: - description: 'The IP of this endpoint. May not be - loopback (127.0.0.0/8), link-local (169.254.0.0/16), - or link-local multicast ((224.0.0.0/24). IPv6 is - also accepted but not fully supported on all platforms. - Also, certain kubernetes components, like kube-proxy, - are not IPv6 ready. TODO: This should allow hostname - or IP, See #4447.' + description: The IP of this endpoint. May not be loopback + (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 + or fe80::/10), or link-local multicast (224.0.0.0/24 + or ff02::/16). type: string nodeName: description: 'Optional: Node hosting this endpoint. @@ -2433,12 +2427,22 @@ spec: single port. properties: appProtocol: - description: The application protocol for this port. + description: "The application protocol for this port. + This is used as a hint for implementations to offer + richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. - Un-prefixed names are reserved for IANA standard - service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). - Non-standard protocols should use prefixed names - such as mycompany.com/my-custom-protocol. + Valid values are either: \n * Un-prefixed protocol + names - reserved for IANA standard service names + (as per RFC-6335 and https://www.iana.org/assignments/service-names). + \n * Kubernetes-defined prefixed names: * 'kubernetes.io/h2c' + - HTTP/2 prior knowledge over cleartext as described + in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + * 'kubernetes.io/ws' - WebSocket over cleartext + as described in https://www.rfc-editor.org/rfc/rfc6455 + * 'kubernetes.io/wss' - WebSocket over TLS as described + in https://www.rfc-editor.org/rfc/rfc6455 \n * Other + protocols should use implementation-defined prefixed + names such as mycompany.com/my-custom-protocol." type: string name: description: The name of this port. This must match diff --git a/multicluster/config/default/configmap/controller_manager_config.yaml b/multicluster/config/default/configmap/controller_manager_config.yaml index 853e3d117a2..f811380d396 100644 --- a/multicluster/config/default/configmap/controller_manager_config.yaml +++ b/multicluster/config/default/configmap/controller_manager_config.yaml @@ -6,8 +6,6 @@ metrics: bindAddress: "0" webhook: port: 9443 -leaderElection: - leaderElect: false serviceCIDR: "" podCIDRs: - "" diff --git a/multicluster/controllers/multicluster/commonarea/remote_common_area.go b/multicluster/controllers/multicluster/commonarea/remote_common_area.go index 82d7e88df28..3f6384035d8 100644 --- a/multicluster/controllers/multicluster/commonarea/remote_common_area.go +++ b/multicluster/controllers/multicluster/commonarea/remote_common_area.go @@ -31,8 +31,10 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" mcv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" mcv1alpha2 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha2" @@ -153,9 +155,15 @@ func GetRemoteConfigAndClient(secretObj *v1.Secret, url string, clusterID common config.QPS = common.ResourceExchangeQPS config.Burst = common.ResourceExchangeBurst remoteCommonAreaMgr, err := ctrl.NewManager(config, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: "0", - Namespace: clusterSet.Spec.Namespace, + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + clusterSet.Spec.Namespace: {}, + }, + }, }) if err != nil { klog.ErrorS(err, "Error creating manager for RemoteCommonArea", "cluster", clusterID) diff --git a/multicluster/controllers/multicluster/leader/clusterset_controller_test.go b/multicluster/controllers/multicluster/leader/clusterset_controller_test.go index 0c6a0045264..c2d79465cff 100644 --- a/multicluster/controllers/multicluster/leader/clusterset_controller_test.go +++ b/multicluster/controllers/multicluster/leader/clusterset_controller_test.go @@ -81,7 +81,7 @@ var ( func createMockClients(t *testing.T, objects ...client.Object) (client.Client, *MockMemberClusterStatusManager) { fakeRemoteClient := fake.NewClientBuilder().WithScheme(common.TestScheme). - WithObjects(objects...).Build() + WithObjects(objects...).WithStatusSubresource(objects...).Build() mockCtrl := gomock.NewController(t) mockStatusManager := NewMockMemberClusterStatusManager(mockCtrl) diff --git a/multicluster/controllers/multicluster/leader/resourceexport_controller_test.go b/multicluster/controllers/multicluster/leader/resourceexport_controller_test.go index bc720ccf35b..c678eb270b2 100644 --- a/multicluster/controllers/multicluster/leader/resourceexport_controller_test.go +++ b/multicluster/controllers/multicluster/leader/resourceexport_controller_test.go @@ -167,7 +167,8 @@ func TestResourceExportReconciler_handleEndpointsExportDeleteEvent(t *testing.T) } expectedSubsets := common.EPNginxSubset2 namespacedName := types.NamespacedName{Namespace: "default", Name: "default-nginx-endpoints"} - fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(existingResExport1, existingResExport2, existResImport).Build() + fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(existingResExport1, existingResExport2, existResImport). + WithStatusSubresource(existingResExport1, existingResExport2, existResImport).Build() r := NewResourceExportReconciler(fakeClient, common.TestScheme) if _, err := r.Reconcile(common.TestCtx, epResReq); err != nil { t.Errorf("ResourceExport Reconciler should handle Endpoints ResourceExport delete event successfully but got error = %v", err) @@ -383,7 +384,7 @@ func TestResourceExportReconciler_handleSingleServiceUpdateEvent(t *testing.T) { } namespacedName := types.NamespacedName{Namespace: "default", Name: "default-nginx-service"} fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme). - WithObjects(newResExport, existResImport).Build() + WithObjects(newResExport, existResImport).WithStatusSubresource(newResExport, existResImport).Build() r := NewResourceExportReconciler(fakeClient, common.TestScheme) if _, err := r.Reconcile(common.TestCtx, svcResReq); err != nil { t.Errorf("ResourceExport Reconciler should handle Service ResourceExport update event successfully but got error = %v", err) @@ -419,7 +420,7 @@ func TestResourceExportReconciler_handleServiceUpdateEvent(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme). - WithObjects(newResExport, existingResExport2, existResImport).Build() + WithObjects(newResExport, existingResExport2, existResImport).WithStatusSubresource(newResExport, existingResExport2, existResImport).Build() r := NewResourceExportReconciler(fakeClient, common.TestScheme) if _, err := r.Reconcile(common.TestCtx, svcResReq); err != nil { if !assert.Contains(t, err.Error(), "don't match existing") { diff --git a/multicluster/controllers/multicluster/leader/stale_controller.go b/multicluster/controllers/multicluster/leader/stale_controller.go index 6b4fd9892a7..cf4ef7bee3c 100644 --- a/multicluster/controllers/multicluster/leader/stale_controller.go +++ b/multicluster/controllers/multicluster/leader/stale_controller.go @@ -97,7 +97,7 @@ func (c *StaleResCleanupController) Run(stopCh <-chan struct{}) { klog.InfoS("Starting StaleResCleanupController") defer klog.InfoS("Shutting down StaleResCleanupController") - ctx, _ := wait.ContextForChannel(stopCh) + ctx := wait.ContextForChannel(stopCh) go wait.UntilWithContext(ctx, c.cleanUpExpiredMemberClusterAnnounces, memberClusterAnnounceStaleTime/2) <-stopCh } diff --git a/multicluster/controllers/multicluster/leader/stale_controller_test.go b/multicluster/controllers/multicluster/leader/stale_controller_test.go index a16a66aaefc..8b54dc17879 100644 --- a/multicluster/controllers/multicluster/leader/stale_controller_test.go +++ b/multicluster/controllers/multicluster/leader/stale_controller_test.go @@ -76,6 +76,7 @@ func TestReconcile(t *testing.T) { Name: "member-announce-from-cluster-1", Namespace: "default", DeletionTimestamp: &now, + Finalizers: []string{"test-membercluster-announce-finalizer"}, }, ClusterID: "cluster-1", } @@ -120,7 +121,8 @@ func TestReconcile(t *testing.T) { defer func() { getResourceExportsByClusterIDFunc = getResourceExportsByClusterID }() - fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithLists(tt.existingResExports).WithObjects(tt.existingMemberAnnounce).Build() + fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithLists(tt.existingResExports). + WithObjects(tt.existingMemberAnnounce).WithStatusSubresource(tt.existingMemberAnnounce).Build() c := NewStaleResCleanupController(fakeClient, common.TestScheme) ctx := context.Background() _, err := c.Reconcile(ctx, ctrl.Request{ diff --git a/multicluster/controllers/multicluster/member/clusterset_controller_test.go b/multicluster/controllers/multicluster/member/clusterset_controller_test.go index f4abea90432..e4440ee8818 100644 --- a/multicluster/controllers/multicluster/member/clusterset_controller_test.go +++ b/multicluster/controllers/multicluster/member/clusterset_controller_test.go @@ -114,8 +114,8 @@ func TestMemberClusterStatus(t *testing.T) { ObservedGeneration: 1, }, } - fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(existingClusterSet).Build() - fakeRemoteClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(existingClusterSet).Build() + fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(existingClusterSet).WithStatusSubresource(existingClusterSet).Build() + fakeRemoteClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(existingClusterSet).WithStatusSubresource(existingClusterSet).Build() conditions := []mcv1alpha2.ClusterCondition{ { Message: "Member Connected", diff --git a/multicluster/controllers/multicluster/member/gateway_controller.go b/multicluster/controllers/multicluster/member/gateway_controller.go index a2e561f20f3..7c2ed5f5ef7 100644 --- a/multicluster/controllers/multicluster/member/gateway_controller.go +++ b/multicluster/controllers/multicluster/member/gateway_controller.go @@ -31,7 +31,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "antrea.io/antrea/multicluster/apis/multicluster/constants" mcv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" @@ -186,7 +185,7 @@ func (r *GatewayReconciler) createResourceExport(ctx context.Context, req ctrl.R func (r *GatewayReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&mcv1alpha1.Gateway{}). - Watches(&source.Kind{Type: &mcv1alpha2.ClusterSet{}}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), + Watches(&mcv1alpha2.ClusterSet{}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), builder.WithPredicates(statusReadyPredicate)). WithOptions(controller.Options{ // TODO: add a lock for r.serviceCIDR and r.localClusterID if @@ -196,13 +195,12 @@ func (r *GatewayReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *GatewayReconciler) clusterSetMapFunc(a client.Object) []reconcile.Request { +func (r *GatewayReconciler) clusterSetMapFunc(ctx context.Context, a client.Object) []reconcile.Request { clusterSet := &mcv1alpha2.ClusterSet{} requests := []reconcile.Request{} if a.GetNamespace() != r.namespace { return requests } - ctx := context.TODO() err := r.Client.Get(ctx, types.NamespacedName{Namespace: a.GetNamespace(), Name: a.GetName()}, clusterSet) if err == nil { if len(clusterSet.Status.Conditions) > 0 && clusterSet.Status.Conditions[0].Status == v1.ConditionTrue { diff --git a/multicluster/controllers/multicluster/member/gateway_controller_test.go b/multicluster/controllers/multicluster/member/gateway_controller_test.go index d8b11f28b35..95361c0a328 100644 --- a/multicluster/controllers/multicluster/member/gateway_controller_test.go +++ b/multicluster/controllers/multicluster/member/gateway_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package member import ( + "context" "reflect" "testing" "time" @@ -81,6 +82,7 @@ func TestGatewayReconciler(t *testing.T) { gwNode1New.GatewayIP = "10.10.10.12" staleExistingResExport := existingResExport.DeepCopy() staleExistingResExport.DeletionTimestamp = &metav1.Time{Time: time.Now()} + staleExistingResExport.Finalizers = append(staleExistingResExport.Finalizers, constants.ResourceExportFinalizer) tests := []struct { name string namespacedName types.NamespacedName @@ -148,10 +150,10 @@ func TestGatewayReconciler(t *testing.T) { node := n obj = append(obj, &node) } - fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(obj...).Build() - fakeRemoteClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects().Build() + fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(obj...).WithStatusSubresource(obj...).Build() + fakeRemoteClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects().WithStatusSubresource().Build() if tt.resExport != nil { - fakeRemoteClient = fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(tt.resExport).Build() + fakeRemoteClient = fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(tt.resExport).WithStatusSubresource(tt.resExport).Build() } commonArea := commonarea.NewFakeRemoteCommonArea(fakeRemoteClient, "leader-cluster", common.LocalClusterID, common.LeaderNamespace, nil) mcReconciler := NewMemberClusterSetReconciler(fakeClient, common.TestScheme, "default", false, false, make(chan struct{})) @@ -254,15 +256,17 @@ func TestClusterSetMapFunc_Gateway(t *testing.T) { }, }, } + ctx := context.Background() + fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(clusterSet, gw1).Build() r := NewGatewayReconciler(fakeClient, common.TestScheme, "default", []string{"10.200.1.1/16"}, nil) - requests := r.clusterSetMapFunc(clusterSet) + requests := r.clusterSetMapFunc(ctx, clusterSet) assert.Equal(t, expectedReqs, requests) - requests = r.clusterSetMapFunc(deletedClusterSet) + requests = r.clusterSetMapFunc(ctx, deletedClusterSet) assert.Equal(t, []reconcile.Request{}, requests) r = NewGatewayReconciler(fakeClient, common.TestScheme, "mismatch_ns", []string{"10.200.1.1/16"}, nil) - requests = r.clusterSetMapFunc(clusterSet) + requests = r.clusterSetMapFunc(ctx, clusterSet) assert.Equal(t, []reconcile.Request{}, requests) } diff --git a/multicluster/controllers/multicluster/member/labelidentity_controller.go b/multicluster/controllers/multicluster/member/labelidentity_controller.go index 6d333e25021..254287d5af3 100644 --- a/multicluster/controllers/multicluster/member/labelidentity_controller.go +++ b/multicluster/controllers/multicluster/member/labelidentity_controller.go @@ -38,7 +38,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "antrea.io/antrea/multicluster/apis/multicluster/constants" mcv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" @@ -135,10 +134,10 @@ func (r *LabelIdentityReconciler) checkRemoteCommonArea() bool { func (r *LabelIdentityReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&v1.Pod{}, builder.WithPredicates(predicate.LabelChangedPredicate{})). - Watches(&source.Kind{Type: &v1.Namespace{}}, + Watches(&v1.Namespace{}, handler.EnqueueRequestsFromMapFunc(r.namespaceMapFunc), builder.WithPredicates(predicate.LabelChangedPredicate{})). - Watches(&source.Kind{Type: &mcv1alpha2.ClusterSet{}}, + Watches(&mcv1alpha2.ClusterSet{}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), builder.WithPredicates(statusReadyPredicate)). WithOptions(controller.Options{ @@ -147,13 +146,12 @@ func (r *LabelIdentityReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *LabelIdentityReconciler) clusterSetMapFunc(a client.Object) []reconcile.Request { +func (r *LabelIdentityReconciler) clusterSetMapFunc(ctx context.Context, a client.Object) []reconcile.Request { clusterSet := &mcv1alpha2.ClusterSet{} requests := []reconcile.Request{} if a.GetNamespace() != r.namespace { return requests } - ctx := context.TODO() err := r.Client.Get(ctx, types.NamespacedName{Namespace: a.GetNamespace(), Name: a.GetName()}, clusterSet) if err == nil { if len(clusterSet.Status.Conditions) > 0 && clusterSet.Status.Conditions[0].Status == v1.ConditionTrue { @@ -181,7 +179,7 @@ func (r *LabelIdentityReconciler) clusterSetMapFunc(a client.Object) []reconcile // namespaceMapFunc handles Namespace update events (Namespace label change) by enqueuing // all Pods in the Namespace into the reconciler processing queue. -func (r *LabelIdentityReconciler) namespaceMapFunc(ns client.Object) []reconcile.Request { +func (r *LabelIdentityReconciler) namespaceMapFunc(ctx context.Context, ns client.Object) []reconcile.Request { podList := &v1.PodList{} r.Client.List(context.TODO(), podList, client.InNamespace(ns.GetName())) requests := make([]reconcile.Request, len(podList.Items)) diff --git a/multicluster/controllers/multicluster/member/labelidentity_controller_test.go b/multicluster/controllers/multicluster/member/labelidentity_controller_test.go index 98b63ec22e2..dd3dedfc4a4 100644 --- a/multicluster/controllers/multicluster/member/labelidentity_controller_test.go +++ b/multicluster/controllers/multicluster/member/labelidentity_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package member import ( + "context" "reflect" "testing" "time" @@ -246,7 +247,7 @@ func TestNamespaceMapFunc(t *testing.T) { mcReconciler.SetRemoteCommonArea(commonArea) r := NewLabelIdentityReconciler(fakeClient, common.TestScheme, mcReconciler, "default") - actualReq := r.namespaceMapFunc(ns) + actualReq := r.namespaceMapFunc(context.TODO(), ns) assert.ElementsMatch(t, expReq, actualReq) } @@ -342,18 +343,18 @@ func TestClusterSetMapFunc_LabelIdentity(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(clusterSet).WithLists(pods).Build() r := NewLabelIdentityReconciler(fakeClient, common.TestScheme, nil, clusterSet.Namespace) - requests := r.clusterSetMapFunc(clusterSet) + requests := r.clusterSetMapFunc(context.TODO(), clusterSet) assert.Equal(t, expectedReqs, requests) r = NewLabelIdentityReconciler(fakeClient, common.TestScheme, nil, "mismatch_ns") - requests = r.clusterSetMapFunc(clusterSet) + requests = r.clusterSetMapFunc(context.TODO(), clusterSet) assert.Equal(t, []reconcile.Request{}, requests) // non-existing ClusterSet r = NewLabelIdentityReconciler(fakeClient, common.TestScheme, nil, "default") r.labelToPodsCache["label"] = sets.New[string]("default/nginx") r.podLabelCache["default/nginx"] = "label" - requests = r.clusterSetMapFunc(clusterSet2) + requests = r.clusterSetMapFunc(context.TODO(), clusterSet2) assert.Equal(t, []reconcile.Request{}, requests) assert.Equal(t, 0, len(r.labelToPodsCache)) assert.Equal(t, 0, len(r.labelToPodsCache)) diff --git a/multicluster/controllers/multicluster/member/node_controller.go b/multicluster/controllers/multicluster/member/node_controller.go index 52a1eb14884..2a883943b67 100644 --- a/multicluster/controllers/multicluster/member/node_controller.go +++ b/multicluster/controllers/multicluster/member/node_controller.go @@ -36,7 +36,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" mcv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" mcv1alpha2 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha2" @@ -373,7 +372,7 @@ func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error { } return ctrl.NewControllerManagedBy(mgr). For(&corev1.Node{}). - Watches(&source.Kind{Type: &mcv1alpha2.ClusterSet{}}, + Watches(&mcv1alpha2.ClusterSet{}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), builder.WithPredicates(statusReadyPredicate)). WithOptions(controller.Options{ @@ -382,13 +381,12 @@ func (r *NodeReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *NodeReconciler) clusterSetMapFunc(a client.Object) []reconcile.Request { +func (r *NodeReconciler) clusterSetMapFunc(ctx context.Context, a client.Object) []reconcile.Request { clusterSet := &mcv1alpha2.ClusterSet{} requests := []reconcile.Request{} if a.GetNamespace() != r.namespace { return requests } - ctx := context.TODO() err := r.Client.Get(ctx, types.NamespacedName{Namespace: a.GetNamespace(), Name: a.GetName()}, clusterSet) if err == nil { if len(clusterSet.Status.Conditions) > 0 && clusterSet.Status.Conditions[0].Status == corev1.ConditionTrue { diff --git a/multicluster/controllers/multicluster/member/node_controller_test.go b/multicluster/controllers/multicluster/member/node_controller_test.go index 0484498960d..e8ac2c3a68f 100644 --- a/multicluster/controllers/multicluster/member/node_controller_test.go +++ b/multicluster/controllers/multicluster/member/node_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package member import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -373,16 +374,18 @@ func TestClusterSetMapFunc(t *testing.T) { }, }, } + ctx := context.Background() + fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(clusterSet, node1).Build() r := NewNodeReconciler(fakeClient, common.TestScheme, "default", "10.200.1.1/16", "", nil) - requests := r.clusterSetMapFunc(clusterSet) + requests := r.clusterSetMapFunc(ctx, clusterSet) assert.Equal(t, expectedReqs, requests) - requests = r.clusterSetMapFunc(deletedClusterSet) + requests = r.clusterSetMapFunc(ctx, deletedClusterSet) assert.Equal(t, []reconcile.Request{}, requests) r = NewNodeReconciler(fakeClient, common.TestScheme, "mismatch_ns", "10.200.1.1/16", "", nil) - requests = r.clusterSetMapFunc(clusterSet) + requests = r.clusterSetMapFunc(ctx, clusterSet) assert.Equal(t, []reconcile.Request{}, requests) } diff --git a/multicluster/controllers/multicluster/member/serviceexport_controller.go b/multicluster/controllers/multicluster/member/serviceexport_controller.go index 3f8635008a8..557ad9faeb7 100644 --- a/multicluster/controllers/multicluster/member/serviceexport_controller.go +++ b/multicluster/controllers/multicluster/member/serviceexport_controller.go @@ -39,7 +39,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" k8smcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" "antrea.io/antrea/multicluster/apis/multicluster/constants" @@ -506,9 +505,9 @@ func (r *ServiceExportReconciler) SetupWithManager(mgr ctrl.Manager) error { if r.endpointSliceEnabled { return ctrl.NewControllerManagedBy(mgr). For(&k8smcsv1alpha1.ServiceExport{}, versionChangePredicates). - Watches(&source.Kind{Type: &corev1.Service{}}, handler.EnqueueRequestsFromMapFunc(objectMapFunc), versionChangePredicates). - Watches(&source.Kind{Type: &discovery.EndpointSlice{}}, handler.EnqueueRequestsFromMapFunc(endpointSliceMapFunc), versionChangePredicates). - Watches(&source.Kind{Type: &mcv1alpha2.ClusterSet{}}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(objectMapFunc), versionChangePredicates). + Watches(&discovery.EndpointSlice{}, handler.EnqueueRequestsFromMapFunc(endpointSliceMapFunc), versionChangePredicates). + Watches(&mcv1alpha2.ClusterSet{}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), builder.WithPredicates(statusReadyPredicate)). WithOptions(controller.Options{ MaxConcurrentReconciles: common.DefaultWorkerCount, @@ -517,9 +516,9 @@ func (r *ServiceExportReconciler) SetupWithManager(mgr ctrl.Manager) error { } return ctrl.NewControllerManagedBy(mgr). For(&k8smcsv1alpha1.ServiceExport{}, versionChangePredicates). - Watches(&source.Kind{Type: &corev1.Service{}}, handler.EnqueueRequestsFromMapFunc(objectMapFunc), versionChangePredicates). - Watches(&source.Kind{Type: &corev1.Endpoints{}}, handler.EnqueueRequestsFromMapFunc(objectMapFunc), versionChangePredicates). - Watches(&source.Kind{Type: &mcv1alpha2.ClusterSet{}}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(objectMapFunc), versionChangePredicates). + Watches(&corev1.Endpoints{}, handler.EnqueueRequestsFromMapFunc(objectMapFunc), versionChangePredicates). + Watches(&mcv1alpha2.ClusterSet{}, handler.EnqueueRequestsFromMapFunc(r.clusterSetMapFunc), builder.WithPredicates(statusReadyPredicate)). WithOptions(controller.Options{ MaxConcurrentReconciles: common.DefaultWorkerCount, @@ -529,14 +528,13 @@ func (r *ServiceExportReconciler) SetupWithManager(mgr ctrl.Manager) error { // clusterSetMapFunc handles ClusterSet events by enqueuing all ServiceExports // into the reconciler processing queue. -func (r *ServiceExportReconciler) clusterSetMapFunc(a client.Object) []reconcile.Request { +func (r *ServiceExportReconciler) clusterSetMapFunc(ctx context.Context, a client.Object) []reconcile.Request { clusterSet := &mcv1alpha2.ClusterSet{} requests := []reconcile.Request{} if a.GetNamespace() != r.namespace { return requests } - ctx := context.TODO() err := r.Client.Get(ctx, types.NamespacedName{Namespace: a.GetNamespace(), Name: a.GetName()}, clusterSet) if err == nil { if len(clusterSet.Status.Conditions) > 0 && clusterSet.Status.Conditions[0].Status == corev1.ConditionTrue { @@ -566,7 +564,7 @@ func (r *ServiceExportReconciler) clusterSetMapFunc(a client.Object) []reconcile // When there are any Service or Endpoints changes, it might be reflected in ResourceExport // in leader cluster as well, so ServiceExportReconciler also needs to watch // Service and Endpoints events. -func objectMapFunc(a client.Object) []reconcile.Request { +func objectMapFunc(ctx context.Context, a client.Object) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ @@ -577,7 +575,7 @@ func objectMapFunc(a client.Object) []reconcile.Request { } } -func endpointSliceMapFunc(a client.Object) []reconcile.Request { +func endpointSliceMapFunc(ctx context.Context, a client.Object) []reconcile.Request { labels := a.GetLabels() svcName := labels[discovery.LabelServiceName] mappedObject := types.NamespacedName{} diff --git a/multicluster/controllers/multicluster/member/serviceexport_controller_test.go b/multicluster/controllers/multicluster/member/serviceexport_controller_test.go index 9ec9a6c2a8f..91e2027c8ea 100644 --- a/multicluster/controllers/multicluster/member/serviceexport_controller_test.go +++ b/multicluster/controllers/multicluster/member/serviceexport_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package member import ( + "context" "reflect" "testing" @@ -271,7 +272,9 @@ func TestServiceExportReconciler_CheckExportStatus(t *testing.T) { } fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(mcsSvc, nginx0Svc, nginx1Svc, nginx3Svc, svcNoClusterIP, nginx1EP, svcNoClusterIPEP, - nginx2Svc, existSvcExport, nginx0SvcExport, nginx1SvcExportWithStatus, nginx2SvcExportWithStatus, nginx3SvcExport, mcsSvcExport, svcExpNoClusterIP).Build() + nginx2Svc, existSvcExport, nginx0SvcExport, nginx1SvcExportWithStatus, nginx2SvcExportWithStatus, nginx3SvcExport, mcsSvcExport, svcExpNoClusterIP). + WithStatusSubresource(mcsSvc, nginx0Svc, nginx1Svc, nginx3Svc, svcNoClusterIP, nginx1EP, svcNoClusterIPEP, nginx0SvcExport, nginx1SvcExportWithStatus, nginx2SvcExportWithStatus, nginx3SvcExport, mcsSvcExport, svcExpNoClusterIP). + Build() fakeRemoteClient := fake.NewClientBuilder().WithScheme(common.TestScheme).Build() commonArea := commonarea.NewFakeRemoteCommonArea(fakeRemoteClient, "leader-cluster", common.LocalClusterID, "default", nil) @@ -335,13 +338,15 @@ func TestServiceExportReconciler_handleServiceExportCreateEvent(t *testing.T) { fakeClient client.WithWatch }{ { - name: "with Endpoint API", - fakeClient: fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(common.SvcNginx, common.EPNginx, existSvcExport).Build(), + name: "with Endpoint API", + fakeClient: fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(common.SvcNginx, common.EPNginx, existSvcExport). + WithStatusSubresource(common.SvcNginx, common.EPNginx, existSvcExport).Build(), endpointIPType: "ClusterIP", }, { - name: "with EndpointSlice API", - fakeClient: fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(common.SvcNginx, epsNginx, existSvcExport).Build(), + name: "with EndpointSlice API", + fakeClient: fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(common.SvcNginx, epsNginx, existSvcExport). + WithStatusSubresource(common.SvcNginx, epsNginx, existSvcExport).Build(), endpointIPType: "PodIP", endpointSliceEnabled: true, }, @@ -568,6 +573,7 @@ func TestServiceExportReconciler_handleUpdateEvent(t *testing.T) { } func Test_objectMapFunc(t *testing.T) { + ctx := context.Background() tests := []struct { name string obj client.Object @@ -593,7 +599,7 @@ func Test_objectMapFunc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := objectMapFunc(tt.obj); !reflect.DeepEqual(got, tt.want) { + if got := objectMapFunc(ctx, tt.obj); !reflect.DeepEqual(got, tt.want) { t.Errorf("Test_objectMapFunc() = %v, want %v", got, tt.want) } }) @@ -601,6 +607,7 @@ func Test_objectMapFunc(t *testing.T) { } func Test_endpointSliceMapFunc(t *testing.T) { + ctx := context.Background() tests := []struct { name string obj client.Object @@ -637,7 +644,7 @@ func Test_endpointSliceMapFunc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := endpointSliceMapFunc(tt.obj); !reflect.DeepEqual(got, tt.want) { + if got := endpointSliceMapFunc(ctx, tt.obj); !reflect.DeepEqual(got, tt.want) { t.Errorf("Test_endpointSliceMapFunc() = %v, want %v", got, tt.want) } }) @@ -696,20 +703,21 @@ func TestClusterSetMapFunc_ServiceExport(t *testing.T) { }, }, } + ctx := context.Background() fakeClient := fake.NewClientBuilder().WithScheme(common.TestScheme).WithObjects(clusterSet).WithLists(serviceExports).Build() r := NewServiceExportReconciler(fakeClient, common.TestScheme, nil, "PodIP", true, clusterSet.Namespace) - requests := r.clusterSetMapFunc(clusterSet) + requests := r.clusterSetMapFunc(ctx, clusterSet) assert.Equal(t, expectedReqs, requests) r = NewServiceExportReconciler(fakeClient, common.TestScheme, nil, "PodIP", true, "mismatch_ns") - requests = r.clusterSetMapFunc(clusterSet) + requests = r.clusterSetMapFunc(ctx, clusterSet) assert.Equal(t, []reconcile.Request{}, requests) // non-existing ClusterSet r = NewServiceExportReconciler(fakeClient, common.TestScheme, nil, "PodIP", true, "default") r.installedSvcs.Add(&svcInfo{name: "nginx-stale", namespace: "default"}) r.installedEps.Add(&epInfo{name: "nginx-stale", namespace: "default"}) - requests = r.clusterSetMapFunc(clusterSet2) + requests = r.clusterSetMapFunc(ctx, clusterSet2) assert.Equal(t, []reconcile.Request{}, requests) assert.Equal(t, 0, len(r.installedSvcs.List())) assert.Equal(t, 0, len(r.installedEps.List())) diff --git a/multicluster/controllers/multicluster/member/stale_controller.go b/multicluster/controllers/multicluster/member/stale_controller.go index cc1a62444c8..fcd4d0482df 100644 --- a/multicluster/controllers/multicluster/member/stale_controller.go +++ b/multicluster/controllers/multicluster/member/stale_controller.go @@ -371,7 +371,7 @@ func (c *StaleResCleanupController) Run(stopCh <-chan struct{}) { klog.InfoS("Starting StaleResCleanupController") defer klog.InfoS("Shutting down StaleResCleanupController") - ctx, _ := wait.ContextForChannel(stopCh) + ctx := wait.ContextForChannel(stopCh) go func() { retry.OnError(common.CleanUpRetry, func(err error) bool { return true }, diff --git a/multicluster/hack/update-codegen.sh b/multicluster/hack/update-codegen.sh index 2aff4bdbe22..42a8bfab588 100755 --- a/multicluster/hack/update-codegen.sh +++ b/multicluster/hack/update-codegen.sh @@ -18,7 +18,7 @@ set -o errexit set -o pipefail ANTREA_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" && pwd )" -IMAGE_NAME="antrea/codegen:kubernetes-1.26.4-build.1" +IMAGE_NAME="antrea/codegen:kubernetes-1.29.0" # Recent versions of Git will not access .git directories which are owned by # another user (as a security measure), unless the directories are explicitly diff --git a/multicluster/test/integration/suite_test.go b/multicluster/test/integration/suite_test.go index fa795fc0802..a36fc8ff71e 100644 --- a/multicluster/test/integration/suite_test.go +++ b/multicluster/test/integration/suite_test.go @@ -134,7 +134,7 @@ var _ = BeforeSuite(func() { k8sServerURL = testEnv.Config.Host stopCh := signals.RegisterSignalHandlers() - ctx, _ := wait.ContextForChannel(stopCh) + ctx := wait.ContextForChannel(stopCh) By("Creating MemberClusterSetReconciler") k8sClient.Create(ctx, leaderNS) diff --git a/multicluster/test/mocks/mock_controller_runtime_manager.go b/multicluster/test/mocks/mock_controller_runtime_manager.go index 193198fea01..b1af80f9eec 100644 --- a/multicluster/test/mocks/mock_controller_runtime_manager.go +++ b/multicluster/test/mocks/mock_controller_runtime_manager.go @@ -32,7 +32,7 @@ import ( record "k8s.io/client-go/tools/record" cache "sigs.k8s.io/controller-runtime/pkg/cache" client "sigs.k8s.io/controller-runtime/pkg/client" - v1alpha1 "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + config "sigs.k8s.io/controller-runtime/pkg/config" healthz "sigs.k8s.io/controller-runtime/pkg/healthz" manager "sigs.k8s.io/controller-runtime/pkg/manager" webhook "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -70,51 +70,37 @@ func (m *MockManager) Add(arg0 manager.Runnable) error { } // Add indicates an expected call of Add. -func (mr *MockManagerMockRecorder) Add(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Add(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockManager)(nil).Add), arg0) } // AddHealthzCheck mocks base method. -func (m *MockManager) AddHealthzCheck(arg0 string, arg1 healthz.Checker) error { +func (m *MockManager) AddHealthzCheck(name string, check healthz.Checker) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddHealthzCheck", arg0, arg1) + ret := m.ctrl.Call(m, "AddHealthzCheck", name, check) ret0, _ := ret[0].(error) return ret0 } // AddHealthzCheck indicates an expected call of AddHealthzCheck. -func (mr *MockManagerMockRecorder) AddHealthzCheck(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) AddHealthzCheck(name, check any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHealthzCheck", reflect.TypeOf((*MockManager)(nil).AddHealthzCheck), arg0, arg1) -} - -// AddMetricsExtraHandler mocks base method. -func (m *MockManager) AddMetricsExtraHandler(arg0 string, arg1 http.Handler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddMetricsExtraHandler", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddMetricsExtraHandler indicates an expected call of AddMetricsExtraHandler. -func (mr *MockManagerMockRecorder) AddMetricsExtraHandler(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMetricsExtraHandler", reflect.TypeOf((*MockManager)(nil).AddMetricsExtraHandler), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHealthzCheck", reflect.TypeOf((*MockManager)(nil).AddHealthzCheck), name, check) } // AddReadyzCheck mocks base method. -func (m *MockManager) AddReadyzCheck(arg0 string, arg1 healthz.Checker) error { +func (m *MockManager) AddReadyzCheck(name string, check healthz.Checker) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddReadyzCheck", arg0, arg1) + ret := m.ctrl.Call(m, "AddReadyzCheck", name, check) ret0, _ := ret[0].(error) return ret0 } // AddReadyzCheck indicates an expected call of AddReadyzCheck. -func (mr *MockManagerMockRecorder) AddReadyzCheck(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) AddReadyzCheck(name, check any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddReadyzCheck", reflect.TypeOf((*MockManager)(nil).AddReadyzCheck), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddReadyzCheck", reflect.TypeOf((*MockManager)(nil).AddReadyzCheck), name, check) } // Elected mocks base method. @@ -188,10 +174,10 @@ func (mr *MockManagerMockRecorder) GetConfig() *gomock.Call { } // GetControllerOptions mocks base method. -func (m *MockManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { +func (m *MockManager) GetControllerOptions() config.Controller { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetControllerOptions") - ret0, _ := ret[0].(v1alpha1.ControllerConfigurationSpec) + ret0, _ := ret[0].(config.Controller) return ret0 } @@ -202,17 +188,17 @@ func (mr *MockManagerMockRecorder) GetControllerOptions() *gomock.Call { } // GetEventRecorderFor mocks base method. -func (m *MockManager) GetEventRecorderFor(arg0 string) record.EventRecorder { +func (m *MockManager) GetEventRecorderFor(name string) record.EventRecorder { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEventRecorderFor", arg0) + ret := m.ctrl.Call(m, "GetEventRecorderFor", name) ret0, _ := ret[0].(record.EventRecorder) return ret0 } // GetEventRecorderFor indicates an expected call of GetEventRecorderFor. -func (mr *MockManagerMockRecorder) GetEventRecorderFor(arg0 interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetEventRecorderFor(name any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventRecorderFor", reflect.TypeOf((*MockManager)(nil).GetEventRecorderFor), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventRecorderFor", reflect.TypeOf((*MockManager)(nil).GetEventRecorderFor), name) } // GetFieldIndexer mocks base method. @@ -229,6 +215,20 @@ func (mr *MockManagerMockRecorder) GetFieldIndexer() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFieldIndexer", reflect.TypeOf((*MockManager)(nil).GetFieldIndexer)) } +// GetHTTPClient mocks base method. +func (m *MockManager) GetHTTPClient() *http.Client { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHTTPClient") + ret0, _ := ret[0].(*http.Client) + return ret0 +} + +// GetHTTPClient indicates an expected call of GetHTTPClient. +func (mr *MockManagerMockRecorder) GetHTTPClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHTTPClient", reflect.TypeOf((*MockManager)(nil).GetHTTPClient)) +} + // GetLogger mocks base method. func (m *MockManager) GetLogger() logr.Logger { m.ctrl.T.Helper() @@ -272,10 +272,10 @@ func (mr *MockManagerMockRecorder) GetScheme() *gomock.Call { } // GetWebhookServer mocks base method. -func (m *MockManager) GetWebhookServer() *webhook.Server { +func (m *MockManager) GetWebhookServer() webhook.Server { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetWebhookServer") - ret0, _ := ret[0].(*webhook.Server) + ret0, _ := ret[0].(webhook.Server) return ret0 } @@ -285,22 +285,45 @@ func (mr *MockManagerMockRecorder) GetWebhookServer() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWebhookServer", reflect.TypeOf((*MockManager)(nil).GetWebhookServer)) } -// SetFields mocks base method. -func (m *MockManager) SetFields(arg0 interface{}) error { +// Start mocks base method. +func (m *MockManager) Start(ctx context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetFields", arg0) + ret := m.ctrl.Call(m, "Start", ctx) ret0, _ := ret[0].(error) return ret0 } -// SetFields indicates an expected call of SetFields. -func (mr *MockManagerMockRecorder) SetFields(arg0 interface{}) *gomock.Call { +// Start indicates an expected call of Start. +func (mr *MockManagerMockRecorder) Start(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFields", reflect.TypeOf((*MockManager)(nil).SetFields), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockManager)(nil).Start), ctx) +} + +// MockRunnable is a mock of Runnable interface. +type MockRunnable struct { + ctrl *gomock.Controller + recorder *MockRunnableMockRecorder +} + +// MockRunnableMockRecorder is the mock recorder for MockRunnable. +type MockRunnableMockRecorder struct { + mock *MockRunnable +} + +// NewMockRunnable creates a new mock instance. +func NewMockRunnable(ctrl *gomock.Controller) *MockRunnable { + mock := &MockRunnable{ctrl: ctrl} + mock.recorder = &MockRunnableMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRunnable) EXPECT() *MockRunnableMockRecorder { + return m.recorder } // Start mocks base method. -func (m *MockManager) Start(arg0 context.Context) error { +func (m *MockRunnable) Start(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start", arg0) ret0, _ := ret[0].(error) @@ -308,7 +331,44 @@ func (m *MockManager) Start(arg0 context.Context) error { } // Start indicates an expected call of Start. -func (mr *MockManagerMockRecorder) Start(arg0 interface{}) *gomock.Call { +func (mr *MockRunnableMockRecorder) Start(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockManager)(nil).Start), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockRunnable)(nil).Start), arg0) +} + +// MockLeaderElectionRunnable is a mock of LeaderElectionRunnable interface. +type MockLeaderElectionRunnable struct { + ctrl *gomock.Controller + recorder *MockLeaderElectionRunnableMockRecorder +} + +// MockLeaderElectionRunnableMockRecorder is the mock recorder for MockLeaderElectionRunnable. +type MockLeaderElectionRunnableMockRecorder struct { + mock *MockLeaderElectionRunnable } + +// NewMockLeaderElectionRunnable creates a new mock instance. +func NewMockLeaderElectionRunnable(ctrl *gomock.Controller) *MockLeaderElectionRunnable { + mock := &MockLeaderElectionRunnable{ctrl: ctrl} + mock.recorder = &MockLeaderElectionRunnableMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLeaderElectionRunnable) EXPECT() *MockLeaderElectionRunnableMockRecorder { + return m.recorder +} + +// NeedLeaderElection mocks base method. +func (m *MockLeaderElectionRunnable) NeedLeaderElection() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NeedLeaderElection") + ret0, _ := ret[0].(bool) + return ret0 +} + +// NeedLeaderElection indicates an expected call of NeedLeaderElection. +func (mr *MockLeaderElectionRunnableMockRecorder) NeedLeaderElection() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NeedLeaderElection", reflect.TypeOf((*MockLeaderElectionRunnable)(nil).NeedLeaderElection)) +} \ No newline at end of file diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go index 350770574bc..57db78cae5c 100644 --- a/pkg/agent/agent.go +++ b/pkg/agent/agent.go @@ -606,12 +606,13 @@ func (i *Initializer) initOpenFlowPipeline() error { // happen that ovsBridgeClient's connection is not ready when ofClient completes flow replay. We retry it // with a timeout that is longer time than ovsBridgeClient's maximum connecting retry interval (8 seconds) // to ensure the flag can be removed successfully. - err = wait.PollImmediate(200*time.Millisecond, 10*time.Second, func() (done bool, err error) { - if err := i.FlowRestoreComplete(); err != nil { - return false, nil - } - return true, nil - }) + err = wait.PollUntilContextTimeout(context.TODO(), 200*time.Millisecond, 10*time.Second, true, + func(ctx context.Context) (done bool, err error) { + if err := i.FlowRestoreComplete(); err != nil { + return false, nil + } + return true, nil + }) // This shouldn't happen unless OVS is disconnected again after replaying flows. If it happens, we will try // to clean up the config again so an error log should be fine. if err != nil { @@ -639,21 +640,22 @@ func (i *Initializer) FlowRestoreComplete() error { } // "flow-restore-wait" is supposed to be true here. - err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (done bool, err error) { - flowRestoreWait, err := getFlowRestoreWait() - if err != nil { - return false, err - } - if !flowRestoreWait { - // If the log is seen and the config becomes true later, we should look at why "ovs-vsctl set --no-wait" - // doesn't take effect on ovsdb immediately. - klog.Warning("flow-restore-wait was not true before the delete call was made, will retry") - return false, nil - } - return true, nil - }) + err := wait.PollUntilContextTimeout(context.TODO(), 200*time.Millisecond, 2*time.Second, true, + func(ctx context.Context) (done bool, err error) { + flowRestoreWait, err := getFlowRestoreWait() + if err != nil { + return false, err + } + if !flowRestoreWait { + // If the log is seen and the config becomes true later, we should look at why "ovs-vsctl set --no-wait" + // doesn't take effect on ovsdb immediately. + klog.Warning("flow-restore-wait was not true before the delete call was made, will retry") + return false, nil + } + return true, nil + }) if err != nil { - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { // This could happen if the method is triggered by OVS disconnection event, in which OVS doesn't restart. klog.Info("flow-restore-wait was not true, skip cleaning it up") return nil @@ -913,24 +915,25 @@ func (i *Initializer) setTunnelCsum(tunnelPortName string, enable bool) error { // host gateway interface. func (i *Initializer) initK8sNodeLocalConfig(nodeName string) error { var node *v1.Node - if err := wait.PollImmediate(5*time.Second, getNodeTimeout, func() (bool, error) { - var err error - node, err = i.client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) - if err != nil { - return false, fmt.Errorf("failed to get Node with name %s from K8s: %w", nodeName, err) - } + if err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, getNodeTimeout, true, + func(ctx context.Context) (bool, error) { + var err error + node, err = i.client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("failed to get Node with name %s from K8s: %w", nodeName, err) + } - // Except in networkPolicyOnly mode, we need a PodCIDR for the Node. - if !i.networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() { - // Validate that PodCIDR has been configured. - if node.Spec.PodCIDRs == nil && node.Spec.PodCIDR == "" { - klog.InfoS("Waiting for Node PodCIDR configuration to complete", "nodeName", nodeName) - return false, nil + // Except in networkPolicyOnly mode, we need a PodCIDR for the Node. + if !i.networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() { + // Validate that PodCIDR has been configured. + if node.Spec.PodCIDRs == nil && node.Spec.PodCIDR == "" { + klog.InfoS("Waiting for Node PodCIDR configuration to complete", "nodeName", nodeName) + return false, nil + } } - } - return true, nil - }); err != nil { - if err == wait.ErrWaitTimeout { + return true, nil + }); err != nil { + if wait.Interrupted(err) { klog.ErrorS(err, "Spec.PodCIDR is empty for Node. Please make sure --allocate-node-cidrs is enabled "+ "for kube-controller-manager and --cluster-cidr specifies a sufficient CIDR range, or nodeIPAM is "+ "enabled for antrea-controller", "nodeName", nodeName) @@ -1313,13 +1316,13 @@ func (i *Initializer) initNodeLocalConfig() error { func (i *Initializer) initVMLocalConfig(nodeName string) error { var en *v1alpha1.ExternalNode klog.InfoS("Initializing VM config", "ExternalNode", nodeName) - if err := wait.PollImmediateUntil(10*time.Second, func() (done bool, err error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(i.stopCh), 10*time.Second, true, func(ctx context.Context) (done bool, err error) { en, err = i.crdClient.CrdV1alpha1().ExternalNodes(i.externalNodeNamespace).Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { return false, nil } return true, nil - }, i.stopCh); err != nil { + }); err != nil { klog.Info("Stopped waiting for ExternalNode") return err } diff --git a/pkg/agent/agent_linux.go b/pkg/agent/agent_linux.go index cc150fa5f24..bc0b4fbeca9 100644 --- a/pkg/agent/agent_linux.go +++ b/pkg/agent/agent_linux.go @@ -18,6 +18,7 @@ package agent import ( + "context" "fmt" "net" "time" @@ -236,16 +237,17 @@ func (i *Initializer) ConnectUplinkToOVSBridge() error { // Move network configuration of uplink interface to OVS bridge local interface. // The net configuration of uplink will be restored by RestoreOVSBridge when shutting down. - wait.PollImmediate(100*time.Millisecond, 10000*time.Millisecond, func() (bool, error) { - // Wait a few seconds for OVS bridge local port. - link, err := netlink.LinkByName(uplinkName) - if err != nil { - klog.V(4).InfoS("OVS bridge local port is not ready", "port", uplinkName, "err", err) - return false, nil - } - klog.InfoS("OVS bridge local port is ready", "type", link.Type(), "attrs", link.Attrs()) - return true, nil - }) + wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, 10000*time.Millisecond, true, + func(ctx context.Context) (bool, error) { + // Wait a few seconds for OVS bridge local port. + link, err := netlink.LinkByName(uplinkName) + if err != nil { + klog.V(4).InfoS("OVS bridge local port is not ready", "port", uplinkName, "err", err) + return false, nil + } + klog.InfoS("OVS bridge local port is ready", "type", link.Type(), "attrs", link.Attrs()) + return true, nil + }) localLink, err := netlink.LinkByName(uplinkName) if err != nil { return err @@ -359,7 +361,7 @@ func (i *Initializer) prepareL7EngineInterfaces() error { if err != nil { return err } - if pollErr := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + if pollErr := wait.PollUntilContextTimeout(context.TODO(), time.Second, 5*time.Second, true, func(ctx context.Context) (bool, error) { _, _, err := util.SetLinkUp(portName) if err == nil { return true, nil diff --git a/pkg/agent/agent_test.go b/pkg/agent/agent_test.go index 11c449aee54..be080849de6 100644 --- a/pkg/agent/agent_test.go +++ b/pkg/agent/agent_test.go @@ -949,7 +949,7 @@ func TestInitVMLocalConfig(t *testing.T) { name: "provided external Node unavailable", nodeName: "testNode", crdClient: fakeversioned.NewSimpleClientset(), - expectedErr: "timed out waiting for the condition", + expectedErr: "context canceled", }, } diff --git a/pkg/agent/apiserver/apiserver.go b/pkg/agent/apiserver/apiserver.go index d07cb43823a..ab16057f9cb 100644 --- a/pkg/agent/apiserver/apiserver.go +++ b/pkg/agent/apiserver/apiserver.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" k8sversion "k8s.io/apimachinery/pkg/version" + genericopenapi "k8s.io/apiserver/pkg/endpoints/openapi" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" @@ -45,7 +46,9 @@ import ( agentquerier "antrea.io/antrea/pkg/agent/querier" systeminstall "antrea.io/antrea/pkg/apis/system/install" systemv1beta1 "antrea.io/antrea/pkg/apis/system/v1beta1" + "antrea.io/antrea/pkg/apiserver" "antrea.io/antrea/pkg/apiserver/handlers/loglevel" + "antrea.io/antrea/pkg/apiserver/openapi" "antrea.io/antrea/pkg/apiserver/registry/system/supportbundle" "antrea.io/antrea/pkg/ovs/ovsctl" "antrea.io/antrea/pkg/querier" @@ -199,6 +202,9 @@ func newConfig(aq agentquerier.AgentQuerier, return fmt.Errorf("disconnected from OFSwitch") }) serverConfig.LivezChecks = append(serverConfig.LivezChecks, ovsConnCheck) + serverConfig.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config( + openapi.GetOpenAPIDefinitions, + genericopenapi.NewDefinitionNamer(apiserver.Scheme)) completedServerCfg := serverConfig.Complete(nil) return &completedServerCfg, nil diff --git a/pkg/agent/cniserver/interface_configuration_windows.go b/pkg/agent/cniserver/interface_configuration_windows.go index a083354369d..1c83c6a4032 100644 --- a/pkg/agent/cniserver/interface_configuration_windows.go +++ b/pkg/agent/cniserver/interface_configuration_windows.go @@ -18,6 +18,7 @@ package cniserver import ( + "context" "errors" "fmt" "net" @@ -505,27 +506,28 @@ func (ic *ifConfigurator) addPostInterfaceCreateHook(containerID, endpointName s go func() { ifaceName := fmt.Sprintf("vEthernet (%s)", endpointName) var err error - pollErr := wait.PollImmediate(100*time.Millisecond, 60*time.Second, func() (bool, error) { - containerAccess.lockContainer(containerID) - defer containerAccess.unlockContainer(containerID) - currentEP, ok := ic.getEndpoint(endpointName) - if !ok { - klog.InfoS("HNSEndpoint doesn't exist in cache, exit current goroutine", "HNSEndpoint", endpointName) + pollErr := wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, 60*time.Second, true, + func(ctx context.Context) (bool, error) { + containerAccess.lockContainer(containerID) + defer containerAccess.unlockContainer(containerID) + currentEP, ok := ic.getEndpoint(endpointName) + if !ok { + klog.InfoS("HNSEndpoint doesn't exist in cache, exit current goroutine", "HNSEndpoint", endpointName) + return true, nil + } + if currentEP.Id != expectedEP.Id { + klog.InfoS("Detected HNSEndpoint change, exit current goroutine", "HNSEndpoint", endpointName) + return true, nil + } + if !hostInterfaceExistsFunc(ifaceName) { + klog.V(2).InfoS("Waiting for interface to be created", "interface", ifaceName) + return false, nil + } + if err = hook(); err != nil { + return false, err + } return true, nil - } - if currentEP.Id != expectedEP.Id { - klog.InfoS("Detected HNSEndpoint change, exit current goroutine", "HNSEndpoint", endpointName) - return true, nil - } - if !hostInterfaceExistsFunc(ifaceName) { - klog.V(2).InfoS("Waiting for interface to be created", "interface", ifaceName) - return false, nil - } - if err = hook(); err != nil { - return false, err - } - return true, nil - }) + }) if pollErr != nil { if err != nil { diff --git a/pkg/agent/cniserver/ipam/antrea_ipam.go b/pkg/agent/cniserver/ipam/antrea_ipam.go index 3aa3cfc1298..7172df15aab 100644 --- a/pkg/agent/cniserver/ipam/antrea_ipam.go +++ b/pkg/agent/cniserver/ipam/antrea_ipam.go @@ -15,6 +15,7 @@ package ipam import ( + "context" "fmt" "net" "sync" @@ -353,7 +354,7 @@ func (d *AntreaIPAM) owns(k8sArgs *types.K8sArgs) (mineType, *poolallocator.IPPo } func (d *AntreaIPAM) waitForControllerReady() error { - err := wait.PollImmediate(500*time.Millisecond, 5*time.Second, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { d.controllerMutex.RLock() defer d.controllerMutex.RUnlock() if d.controller == nil { diff --git a/pkg/agent/cniserver/ipam/antrea_ipam_test.go b/pkg/agent/cniserver/ipam/antrea_ipam_test.go index df517bce395..042a6dd0cf0 100644 --- a/pkg/agent/cniserver/ipam/antrea_ipam_test.go +++ b/pkg/agent/cniserver/ipam/antrea_ipam_test.go @@ -15,6 +15,7 @@ package ipam import ( + "context" "fmt" "regexp" "sync" @@ -376,7 +377,7 @@ func TestAntreaIPAMDriver(t *testing.T) { podNamespace := string(k8sArgsMap[test].K8S_POD_NAMESPACE) podName := string(k8sArgsMap[test].K8S_POD_NAME) - err = wait.Poll(time.Millisecond*200, time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Millisecond*200, time.Second, false, func(ctx context.Context) (bool, error) { ipPool, _ := antreaIPAMController.ipPoolLister.Get(podNamespace) found := false for _, ipAddress := range ipPool.Status.IPAddresses { @@ -410,7 +411,7 @@ func TestAntreaIPAMDriver(t *testing.T) { podNamespace := string(k8sArgsMap[test].K8S_POD_NAMESPACE) podName := string(k8sArgsMap[test].K8S_POD_NAME) - err = wait.Poll(time.Millisecond*200, time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Millisecond*200, time.Second, false, func(ctx context.Context) (bool, error) { ipPool, _ := antreaIPAMController.ipPoolLister.Get(podNamespace) found := false for _, ipAddress := range ipPool.Status.IPAddresses { @@ -492,14 +493,15 @@ func TestAntreaIPAMDriver(t *testing.T) { testDel("pear10", false) // Verify last update was propagated to informer - err = wait.PollImmediate(100*time.Millisecond, 1*time.Second, func() (bool, error) { - owns, err := testDriver.Check(cniArgsMap["orange2"], k8sArgsMap["orange2"], networkConfig) - if err != nil { - // container already relelased - return true, nil - } - return !owns, nil - }) + err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 1*time.Second, true, + func(ctx context.Context) (bool, error) { + owns, err := testDriver.Check(cniArgsMap["orange2"], k8sArgsMap["orange2"], networkConfig) + if err != nil { + // container already relelased + return true, nil + } + return !owns, nil + }) require.NoError(t, err, "orange2 pod was not released") @@ -575,7 +577,7 @@ func TestSecondaryNetworkAdd(t *testing.T) { }, }, }, - expectedRes: fmt.Errorf("Antrea IPAM driver not ready: timed out waiting for the condition"), + expectedRes: fmt.Errorf("Antrea IPAM driver not ready: context deadline exceeded"), }, { name: "Add secondary network successfully", diff --git a/pkg/agent/cniserver/server_windows_test.go b/pkg/agent/cniserver/server_windows_test.go index 600adb5bf8e..0fdd150315c 100644 --- a/pkg/agent/cniserver/server_windows_test.go +++ b/pkg/agent/cniserver/server_windows_test.go @@ -540,13 +540,14 @@ func TestCmdAdd(t *testing.T) { waiter.wait() // Wait for the completion of async function "setInterfaceMTUFunc", otherwise it may lead to the // race condition failure. - wait.PollImmediate(time.Millisecond*10, time.Second, func() (done bool, err error) { - mtuSet, exist := hostIfaces.Load(ovsPortName) - if !exist { - return false, nil - } - return mtuSet.(bool), nil - }) + wait.PollUntilContextTimeout(context.Background(), time.Millisecond*10, time.Second, true, + func(ctx context.Context) (done bool, err error) { + mtuSet, exist := hostIfaces.Load(ovsPortName) + if !exist { + return false, nil + } + return mtuSet.(bool), nil + }) } waiter.close() }) diff --git a/pkg/agent/controller/egress/egress_controller_test.go b/pkg/agent/controller/egress/egress_controller_test.go index 8c66a0f2a1b..194c3c76a1e 100644 --- a/pkg/agent/controller/egress/egress_controller_test.go +++ b/pkg/agent/controller/egress/egress_controller_test.go @@ -1118,16 +1118,17 @@ func TestSyncEgress(t *testing.T) { if tt.newLocalIPs != nil { c.localIPDetector = &fakeLocalIPDetector{localIPs: tt.newLocalIPs} } - assert.NoError(t, wait.Poll(time.Millisecond*100, time.Second, func() (done bool, err error) { + assert.Eventually(t, func() bool { if tt.newExternalIPPool != nil { pool, _ := c.externalIPPoolLister.Get(tt.newExternalIPPool.Name) if !reflect.DeepEqual(pool, tt.newExternalIPPool) { - return false, nil + return false } } egress, _ := c.egressLister.Get(tt.newEgress.Name) - return reflect.DeepEqual(egress, tt.newEgress), nil - })) + return reflect.DeepEqual(egress, tt.newEgress) + }, time.Second, 100*time.Millisecond) + c.egressIPScheduler.schedule() err = c.syncEgress(tt.newEgress.Name) assert.NoError(t, err) @@ -1184,9 +1185,10 @@ func TestPodUpdateShouldSyncEgress(t *testing.T) { PodNamespace: "ns1", } c.podUpdateChannel.Notify(ev) - require.NoError(t, wait.PollImmediate(10*time.Millisecond, time.Second, func() (done bool, err error) { - return c.queue.Len() == 1, nil - })) + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, time.Second, true, + func(ctx context.Context) (done bool, err error) { + return c.queue.Len() == 1, nil + })) item, _ = c.queue.Get() require.Equal(t, egress.Name, item) require.NoError(t, c.syncEgress(item.(string))) @@ -1216,9 +1218,10 @@ func TestExternalIPPoolUpdateShouldSyncEgress(t *testing.T) { c.informerFactory.WaitForCacheSync(stopCh) assertItemsInQueue := func(items ...string) { - require.NoError(t, wait.Poll(10*time.Millisecond, time.Second, func() (done bool, err error) { - return c.queue.Len() == len(items), nil - })) + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, time.Second, false, + func(ctx context.Context) (done bool, err error) { + return c.queue.Len() == len(items), nil + })) expectedItems := sets.New[string](items...) for i := 0; i < len(items); i++ { item, _ := c.queue.Get() @@ -1325,10 +1328,10 @@ func TestSyncOverlappingEgress(t *testing.T) { c.mockOFClient.EXPECT().UninstallPodSNATFlows(uint32(1)) c.mockOFClient.EXPECT().UninstallPodSNATFlows(uint32(2)) c.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress1.Name, metav1.DeleteOptions{}) - assert.NoError(t, wait.Poll(time.Millisecond*100, time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { _, err := c.egressLister.Get(egress1.Name) - return err != nil, nil - })) + return err != nil + }, time.Second, time.Millisecond*100) checkQueueItemExistence(t, c.queue, egress1.Name) c.mockIPAssigner.EXPECT().UnassignIP(fakeLocalEgressIP1) err = c.syncEgress(egress1.Name) @@ -1352,10 +1355,10 @@ func TestSyncOverlappingEgress(t *testing.T) { c.mockOFClient.EXPECT().UninstallPodSNATFlows(uint32(3)) c.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress2.Name, metav1.DeleteOptions{}) c.mockIPAssigner.EXPECT().UnassignIP(fakeRemoteEgressIP1) - assert.NoError(t, wait.Poll(time.Millisecond*100, time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { _, err := c.egressLister.Get(egress2.Name) - return err != nil, nil - })) + return err != nil + }, time.Second, time.Millisecond*100) checkQueueItemExistence(t, c.queue, egress2.Name) err = c.syncEgress(egress2.Name) assert.NoError(t, err) @@ -1368,10 +1371,10 @@ func TestSyncOverlappingEgress(t *testing.T) { c.mockOFClient.EXPECT().UninstallPodSNATFlows(uint32(4)) c.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress3.Name, metav1.DeleteOptions{}) c.mockIPAssigner.EXPECT().UnassignIP(fakeLocalEgressIP1) - assert.NoError(t, wait.Poll(time.Millisecond*100, time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { _, err := c.egressLister.Get(egress3.Name) - return err != nil, nil - })) + return err != nil + }, time.Second, time.Millisecond*100) checkQueueItemExistence(t, c.queue, egress3.Name) err = c.syncEgress(egress3.Name) assert.NoError(t, err) diff --git a/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go b/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go index 27429df4a0a..3115c5ef254 100644 --- a/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go +++ b/pkg/agent/controller/l7flowexporter/l7_flow_export_controller_test.go @@ -159,7 +159,7 @@ func newPodInterface(podName, podNamespace string, ofPort int32) *interfacestore } func waitEvents(t *testing.T, expectedEvents int, c *fakeController) { - require.NoError(t, wait.Poll(10*time.Millisecond, 5*time.Second, func() (done bool, err error) { + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { return c.queue.Len() == expectedEvents, nil })) } diff --git a/pkg/agent/controller/networkpolicy/allocator_test.go b/pkg/agent/controller/networkpolicy/allocator_test.go index 15b12033718..b222a9a2926 100644 --- a/pkg/agent/controller/networkpolicy/allocator_test.go +++ b/pkg/agent/controller/networkpolicy/allocator_test.go @@ -15,6 +15,7 @@ package networkpolicy import ( + "context" "fmt" "testing" "time" @@ -234,9 +235,10 @@ func TestIdAllocatorWorker(t *testing.T) { fakeClock.SetTime(expectedDeleteTime.Add(-10 * time.Millisecond)) // We wait for a small duration and ensure that the rule is not deleted. - err := wait.PollImmediate(10*time.Millisecond, 100*time.Millisecond, func() (bool, error) { - return ruleHasBeenDeleted(), nil - }) + err := wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 100*time.Millisecond, true, + func(ctx context.Context) (bool, error) { + return ruleHasBeenDeleted(), nil + }) require.Error(t, err, "Rule ID was unexpectedly released") _, exists, err := a.getRuleFromAsyncCache(tt.expectedID) require.NoError(t, err) @@ -244,9 +246,10 @@ func TestIdAllocatorWorker(t *testing.T) { fakeClock.SetTime(expectedDeleteTime.Add(10 * time.Millisecond)) - err = wait.PollImmediate(10*time.Millisecond, 1*time.Second, func() (bool, error) { - return ruleHasBeenDeleted(), nil - }) + err = wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 1*time.Second, true, + func(ctx context.Context) (bool, error) { + return ruleHasBeenDeleted(), nil + }) require.NoError(t, err, "Rule ID was not released") _, exists, err = a.getRuleFromAsyncCache(tt.expectedID) require.NoError(t, err) diff --git a/pkg/agent/controller/networkpolicy/l7engine/reconciler.go b/pkg/agent/controller/networkpolicy/l7engine/reconciler.go index 1eb6b132f03..4d489199fdd 100644 --- a/pkg/agent/controller/networkpolicy/l7engine/reconciler.go +++ b/pkg/agent/controller/networkpolicy/l7engine/reconciler.go @@ -16,6 +16,7 @@ package l7engine import ( "bytes" + "context" "encoding/json" "fmt" "os" @@ -494,7 +495,7 @@ func (r *Reconciler) startSuricata() { r.startSuricataFn() // Wait Suricata command socket file to be ready. - err = wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { if _, err = defaultFS.Stat(suricataCommandSocket); err != nil { return false, nil } diff --git a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go index 5565dad1f2a..c4f22e5d8a7 100644 --- a/pkg/agent/controller/networkpolicy/networkpolicy_controller.go +++ b/pkg/agent/controller/networkpolicy/networkpolicy_controller.go @@ -594,7 +594,7 @@ func (c *Controller) SetDenyConnStore(denyConnStore *connections.DenyConnectionS // Run will not return until stopCh is closed. func (c *Controller) Run(stopCh <-chan struct{}) { attempts := 0 - if err := wait.PollImmediateUntil(200*time.Millisecond, func() (bool, error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), 200*time.Millisecond, true, func(ctx context.Context) (bool, error) { if attempts%10 == 0 { klog.Info("Waiting for Antrea client to be ready") } @@ -603,7 +603,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) { return false, nil } return true, nil - }, stopCh); err != nil { + }); err != nil { klog.Info("Stopped waiting for Antrea client") return } diff --git a/pkg/agent/controller/networkpolicy/status_controller_test.go b/pkg/agent/controller/networkpolicy/status_controller_test.go index 2aa778edd7f..6e06d915549 100644 --- a/pkg/agent/controller/networkpolicy/status_controller_test.go +++ b/pkg/agent/controller/networkpolicy/status_controller_test.go @@ -15,6 +15,7 @@ package networkpolicy import ( + "context" "fmt" "sync" "testing" @@ -138,13 +139,14 @@ func TestSyncStatusUpForUpdatedPolicy(t *testing.T) { statusController.SetRuleRealization(rule1.ID, policy.UID) matchGeneration := func(generation int64) error { - return wait.PollImmediate(100*time.Millisecond, 1*time.Second, func() (done bool, err error) { - status := statusControl.getNetworkPolicyStatus() - if status == nil { - return false, nil - } - return status.Nodes[0].Generation == generation, nil - }) + return wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 1*time.Second, true, + func(ctx context.Context) (done bool, err error) { + status := statusControl.getNetworkPolicyStatus() + if status == nil { + return false, nil + } + return status.Nodes[0].Generation == generation, nil + }) } assert.NoError(t, matchGeneration(policy.Generation), "The generation should be updated to %v but was not updated", policy.Generation) diff --git a/pkg/agent/controller/traceflow/packetin.go b/pkg/agent/controller/traceflow/packetin.go index c60ec7b639b..f67b8fdac3d 100644 --- a/pkg/agent/controller/traceflow/packetin.go +++ b/pkg/agent/controller/traceflow/packetin.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "antrea.io/antrea/pkg/agent/openflow" crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" @@ -477,7 +477,7 @@ func parseCapturedPacket(pktIn *ofctrl.PacketIn) *crdv1beta1.Packet { capturedPacket.IPHeader = &crdv1beta1.IPHeader{Protocol: int32(pkt.IPProto), TTL: int32(pkt.TTL), Flags: int32(pkt.IPFlags)} } if pkt.IPProto == protocol.Type_TCP { - capturedPacket.TransportHeader.TCP = &crdv1beta1.TCPHeader{SrcPort: int32(pkt.SourcePort), DstPort: int32(pkt.DestinationPort), Flags: pointer.Int32(int32(pkt.TCPFlags))} + capturedPacket.TransportHeader.TCP = &crdv1beta1.TCPHeader{SrcPort: int32(pkt.SourcePort), DstPort: int32(pkt.DestinationPort), Flags: ptr.To(int32(pkt.TCPFlags))} } else if pkt.IPProto == protocol.Type_UDP { capturedPacket.TransportHeader.UDP = &crdv1beta1.UDPHeader{SrcPort: int32(pkt.SourcePort), DstPort: int32(pkt.DestinationPort)} } else if pkt.IPProto == protocol.Type_ICMP || pkt.IPProto == protocol.Type_IPv6ICMP { diff --git a/pkg/agent/controller/traceflow/packetin_test.go b/pkg/agent/controller/traceflow/packetin_test.go index 9cf6748da13..bd6e97d30dc 100644 --- a/pkg/agent/controller/traceflow/packetin_test.go +++ b/pkg/agent/controller/traceflow/packetin_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/openflow" @@ -145,7 +145,7 @@ func TestParseCapturedPacket(t *testing.T) { SrcIP: tcpPktIn.NWSrc.String(), DstIP: tcpPktIn.NWDst.String(), Length: int32(tcpPktIn.Length), IPHeader: &crdv1beta1.IPHeader{Protocol: int32(tcpPktIn.Protocol), TTL: int32(tcpPktIn.TTL), Flags: int32(tcpPktIn.Flags)}, TransportHeader: crdv1beta1.TransportHeader{ - TCP: &crdv1beta1.TCPHeader{SrcPort: int32(tcp.PortSrc), DstPort: int32(tcp.PortDst), Flags: pointer.Int32(int32(tcp.Code))}, + TCP: &crdv1beta1.TCPHeader{SrcPort: int32(tcp.PortSrc), DstPort: int32(tcp.PortDst), Flags: ptr.To(int32(tcp.Code))}, }, } diff --git a/pkg/agent/controller/traceflow/traceflow_controller_test.go b/pkg/agent/controller/traceflow/traceflow_controller_test.go index f82afe0132e..1311d19d16c 100644 --- a/pkg/agent/controller/traceflow/traceflow_controller_test.go +++ b/pkg/agent/controller/traceflow/traceflow_controller_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/agent/interfacestore" @@ -227,7 +227,7 @@ func TestPreparePacket(t *testing.T) { TCP: &crdv1beta1.TCPHeader{ SrcPort: 80, DstPort: 81, - Flags: pointer.Int32(11), + Flags: ptr.To(int32(11)), }, }, }, diff --git a/pkg/agent/controller/trafficcontrol/controller.go b/pkg/agent/controller/trafficcontrol/controller.go index e084b0e162f..745c173be4e 100644 --- a/pkg/agent/controller/trafficcontrol/controller.go +++ b/pkg/agent/controller/trafficcontrol/controller.go @@ -15,6 +15,7 @@ package trafficcontrol import ( + "context" "crypto/sha1" // #nosec G505: not used for security purposes "encoding/binary" "encoding/hex" @@ -589,16 +590,17 @@ func (c *Controller) createOVSInternalPort(portName string) (string, error) { if err != nil { return "", err } - if pollErr := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { - _, _, err := util.SetLinkUp(portName) - if err == nil { - return true, nil - } - if _, ok := err.(util.LinkNotFound); ok { - return false, nil - } - return false, err - }); pollErr != nil { + if pollErr := wait.PollUntilContextTimeout(context.TODO(), time.Second, 5*time.Second, true, + func(ctx context.Context) (bool, error) { + _, _, err := util.SetLinkUp(portName) + if err == nil { + return true, nil + } + if _, ok := err.(util.LinkNotFound); ok { + return false, nil + } + return false, err + }); pollErr != nil { return "", pollErr } return portUUID, nil diff --git a/pkg/agent/controller/trafficcontrol/controller_test.go b/pkg/agent/controller/trafficcontrol/controller_test.go index 0c5e67bbe1a..8187d802e0e 100644 --- a/pkg/agent/controller/trafficcontrol/controller_test.go +++ b/pkg/agent/controller/trafficcontrol/controller_test.go @@ -288,7 +288,7 @@ func generateTrafficControlState(direction v1alpha2.Direction, } func waitEvents(t *testing.T, expectedEvents int, c *fakeController) { - require.NoError(t, wait.Poll(10*time.Millisecond, 5*time.Second, func() (done bool, err error) { + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), 10*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (done bool, err error) { return c.queue.Len() == expectedEvents, nil })) } diff --git a/pkg/agent/externalnode/external_node_controller.go b/pkg/agent/externalnode/external_node_controller.go index ae28d103c84..559e9bbfcd6 100644 --- a/pkg/agent/externalnode/external_node_controller.go +++ b/pkg/agent/externalnode/external_node_controller.go @@ -15,6 +15,7 @@ package externalnode import ( + "context" "fmt" "net" "reflect" @@ -126,13 +127,13 @@ func (c *ExternalNodeController) Run(stopCh <-chan struct{}) { klog.InfoS("Starting controller", "name", controllerName) defer klog.InfoS("Shutting down controller", "name", controllerName) - if err := wait.PollImmediateUntil(5*time.Second, func() (done bool, err error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), 5*time.Second, true, func(ctx context.Context) (done bool, err error) { if err = c.reconcile(); err != nil { klog.ErrorS(err, "ExternalNodeController failed during reconciliation") return false, nil } return true, nil - }, stopCh); err != nil { + }); err != nil { klog.Info("Stopped ExternalNodeController reconciliation") return } @@ -600,7 +601,7 @@ func (c *ExternalNodeController) removeOVSPortsAndFlows(interfaceConfig *interfa }() // Wait until the host interface created by OVS is removed. - if err = wait.PollImmediate(50*time.Millisecond, 2*time.Second, func() (bool, error) { + if err = wait.PollUntilContextTimeout(context.TODO(), 50*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (bool, error) { return !hostInterfaceExists(hostIFName), nil }); err != nil { return fmt.Errorf("failed to wait for host interface %s deletion in 2s, err %v", hostIFName, err) diff --git a/pkg/agent/multicast/mcast_controller_test.go b/pkg/agent/multicast/mcast_controller_test.go index a322a657a58..2956e6d1ea7 100644 --- a/pkg/agent/multicast/mcast_controller_test.go +++ b/pkg/agent/multicast/mcast_controller_test.go @@ -831,7 +831,7 @@ func TestEncapLocalReportAndNotifyRemote(t *testing.T) { mockController.addOrUpdateGroupEvent(tc.e) if tc.groupChanged { - err := wait.PollImmediate(time.Millisecond*100, time.Second*3, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Millisecond*100, time.Second*3, true, func(ctx context.Context) (done bool, err error) { if tc.e.eType == groupJoin { return mockController.localGroupHasInstalled(groupKey) && mockController.groupHasInstalled(groupKey), nil } else { diff --git a/pkg/agent/multicluster/pod_route_controller_test.go b/pkg/agent/multicluster/pod_route_controller_test.go index ca76f6209ea..41dcf74dfed 100644 --- a/pkg/agent/multicluster/pod_route_controller_test.go +++ b/pkg/agent/multicluster/pod_route_controller_test.go @@ -319,7 +319,7 @@ func TestPodEvent(t *testing.T) { } func waitForGatewayRealized(gwLister mclisters.GatewayLister, gateway *mcv1alpha1.Gateway) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) { _, err := gwLister.Gateways(gateway.Namespace).Get(gateway.Name) if err != nil { return false, nil @@ -329,7 +329,7 @@ func waitForGatewayRealized(gwLister mclisters.GatewayLister, gateway *mcv1alpha } func waitForPodIPUpdate(podLister v1.PodLister, pod *corev1.Pod) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) { getPod, err := podLister.Pods(pod.Namespace).Get(pod.Name) if err != nil || pod.Status.PodIP != getPod.Status.PodIP || pod.Status.HostIP != getPod.Status.HostIP { return false, nil diff --git a/pkg/agent/multicluster/stretched_networkpolicy_controller.go b/pkg/agent/multicluster/stretched_networkpolicy_controller.go index 560ece18a61..8b455d64389 100644 --- a/pkg/agent/multicluster/stretched_networkpolicy_controller.go +++ b/pkg/agent/multicluster/stretched_networkpolicy_controller.go @@ -184,7 +184,7 @@ func (s *StretchedNetworkPolicyController) processNextWorkItem() bool { if podRef, ok := obj.(types.NamespacedName); !ok { s.queue.Forget(obj) - klog.Errorf("Expected type 'NamespacedName' in work queue but got object", "object", obj) + klog.ErrorS(nil, "Expected type 'NamespacedName' in work queue but got object", "object", obj) } else if err := s.syncPodClassifierFlow(podRef); err == nil { s.queue.Forget(podRef) } else { diff --git a/pkg/agent/multicluster/stretched_networkpolicy_controller_test.go b/pkg/agent/multicluster/stretched_networkpolicy_controller_test.go index f00d4a0beac..6d6fb65824b 100644 --- a/pkg/agent/multicluster/stretched_networkpolicy_controller_test.go +++ b/pkg/agent/multicluster/stretched_networkpolicy_controller_test.go @@ -558,7 +558,7 @@ func toPodAddEvent(pod *corev1.Pod) antreatypes.PodUpdate { } func waitForPodRealized(podLister v1.PodLister, pod *corev1.Pod) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) { _, err := podLister.Pods(pod.Namespace).Get(pod.Name) if err != nil { return false, nil @@ -568,7 +568,7 @@ func waitForPodRealized(podLister v1.PodLister, pod *corev1.Pod) error { } func waitForPodLabelUpdate(podLister v1.PodLister, pod *corev1.Pod) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) { getPod, err := podLister.Pods(pod.Namespace).Get(pod.Name) if err != nil || !reflect.DeepEqual(pod.Labels, getPod.Labels) { return false, nil @@ -578,7 +578,7 @@ func waitForPodLabelUpdate(podLister v1.PodLister, pod *corev1.Pod) error { } func waitForNSRealized(c *fakeStretchedNetworkPolicyController, ns *corev1.Namespace) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) { _, err := c.namespaceLister.Get(ns.Name) if err != nil { return false, nil @@ -588,7 +588,7 @@ func waitForNSRealized(c *fakeStretchedNetworkPolicyController, ns *corev1.Names } func waitForLabelIdentityRealized(c *fakeStretchedNetworkPolicyController, labelIdentity *v1alpha1.LabelIdentity) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(ctx context.Context) (bool, error) { _, err := c.labelIdentityLister.Get(labelIdentity.Name) if err != nil { return false, nil diff --git a/pkg/agent/nodeportlocal/npl_agent_test.go b/pkg/agent/nodeportlocal/npl_agent_test.go index 6d9e91ab946..a3bcdb43b86 100644 --- a/pkg/agent/nodeportlocal/npl_agent_test.go +++ b/pkg/agent/nodeportlocal/npl_agent_test.go @@ -310,7 +310,7 @@ func (t *testData) pollForPodAnnotation(podName string, found bool) ([]types.NPL var exists bool // do not use PollImmediate: 1 second is reserved for the controller to do his job and // update Pod NPL annotations as needed. - err := wait.Poll(time.Second, 20*time.Second, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) { updatedPod, err := t.k8sClient.CoreV1().Pods(defaultNS).Get(context.TODO(), podName, metav1.GetOptions{}) require.NoError(t, err, "Failed to get Pod") annotation := updatedPod.GetAnnotations() @@ -489,7 +489,7 @@ func TestPodDelete(t *testing.T) { require.NoError(t, err, "Pod deletion failed") t.Logf("Successfully deleted Pod: %s", testPod.Name) - err = wait.Poll(time.Second, 20*time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) { return !testData.portTable.RuleExists(defaultPodIP, defaultPort, protocolTCP), nil }) assert.NoError(t, err, "Error when polling for port table update") diff --git a/pkg/agent/proxy/proxier_test.go b/pkg/agent/proxy/proxier_test.go index 81065124f87..4ee213d00a5 100644 --- a/pkg/agent/proxy/proxier_test.go +++ b/pkg/agent/proxy/proxier_test.go @@ -36,7 +36,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/testutil" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" mccommon "antrea.io/antrea/multicluster/controllers/multicluster/common" agentconfig "antrea.io/antrea/pkg/agent/config" @@ -1080,9 +1080,9 @@ func TestLoadBalancerServiceWithMultiplePorts(t *testing.T) { endpoint1Address, }, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, NodeName: &endpoint1NodeName, }, @@ -1091,9 +1091,9 @@ func TestLoadBalancerServiceWithMultiplePorts(t *testing.T) { endpoint2Address, }, Conditions: discovery.EndpointConditions{ - Ready: pointer.Bool(true), - Serving: pointer.Bool(true), - Terminating: pointer.Bool(false), + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), }, NodeName: &endpoint2NodeName, }, diff --git a/pkg/agent/proxy/topology.go b/pkg/agent/proxy/topology.go index 4589c1d727f..7460082c201 100644 --- a/pkg/agent/proxy/topology.go +++ b/pkg/agent/proxy/topology.go @@ -127,7 +127,7 @@ func (p *proxier) canUseTopology(endpoints map[string]k8sproxy.Endpoint, svcInfo hintsAnnotation := svcInfo.HintsAnnotation() if hintsAnnotation != "Auto" && hintsAnnotation != "auto" { if hintsAnnotation != "" && hintsAnnotation != "Disabled" && hintsAnnotation != "disabled" { - klog.InfoS("Skipping topology aware Endpoint filtering since Service has unexpected value", "annotationTopologyAwareHints", v1.AnnotationTopologyAwareHints, "hints", hintsAnnotation) + klog.InfoS("Skipping topology aware Endpoint filtering since Service has unexpected value", "annotationTopologyAwareHints", v1.DeprecatedAnnotationTopologyAwareHints, "hints", hintsAnnotation) } return false } diff --git a/pkg/agent/secondarynetwork/podwatch/controller_test.go b/pkg/agent/secondarynetwork/podwatch/controller_test.go index 63572d4353c..3dd5ee499d6 100644 --- a/pkg/agent/secondarynetwork/podwatch/controller_test.go +++ b/pkg/agent/secondarynetwork/podwatch/controller_test.go @@ -39,7 +39,6 @@ import ( "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/util/workqueue" @@ -262,17 +261,17 @@ func TestPodControllerRun(t *testing.T) { // unfortunately, we cannot use the podcache being updated by the controller as a signal // here: the podcache is not thread-safe and is only meant to be accessed by the controller // event handlers (with the exception of the operations meant to be performed by the CNI server). - assert.NoError(t, wait.Poll(10*time.Millisecond, 1*time.Second, func() (bool, error) { - return atomic.LoadInt32(&interfaceConfigured) > 0, nil - })) + assert.Eventually(t, func() bool { + return atomic.LoadInt32(&interfaceConfigured) > 0 + }, 1*time.Second, 10*time.Millisecond) mockIPAM.EXPECT().SecondaryNetworkRelease(gomock.Any()) require.NotNil(t, podCache.GetCNIConfigInfoByContainerID(podName, testNamespace, containerID) == nil) require.NoError(t, client.CoreV1().Pods(testNamespace).Delete(context.Background(), podName, metav1.DeleteOptions{}), "error when deleting test Pod") - assert.NoError(t, wait.Poll(10*time.Millisecond, 1*time.Second, func() (bool, error) { - return podCache.GetCNIConfigInfoByContainerID(podName, testNamespace, containerID) == nil, nil - })) + assert.Eventually(t, func() bool { + return podCache.GetCNIConfigInfoByContainerID(podName, testNamespace, containerID) == nil + }, 1*time.Second, 10*time.Millisecond) close(stopCh) wg.Wait() diff --git a/pkg/agent/util/iptables/lock.go b/pkg/agent/util/iptables/lock.go index 3b17e5bef66..1b8e890ca93 100644 --- a/pkg/agent/util/iptables/lock.go +++ b/pkg/agent/util/iptables/lock.go @@ -18,6 +18,7 @@ package iptables import ( + "context" "fmt" "os" "time" @@ -40,12 +41,13 @@ func Lock(lockFilePath string, timeout time.Duration) (func() error, error) { } // Check whether the lock is available every 200ms. - if err := wait.PollImmediate(waitIntervalMicroSeconds*time.Microsecond, timeout, func() (bool, error) { - if err := unix.Flock(int(lockFile.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil { - return false, nil - } - return true, nil - }); err != nil { + if err := wait.PollUntilContextTimeout(context.TODO(), waitIntervalMicroSeconds*time.Microsecond, timeout, true, + func(ctx context.Context) (bool, error) { + if err := unix.Flock(int(lockFile.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil { + return false, nil + } + return true, nil + }); err != nil { lockFile.Close() return nil, fmt.Errorf("error acquiring xtables lock: %v", err) } diff --git a/pkg/agent/util/net_linux.go b/pkg/agent/util/net_linux.go index db1c5d8de8a..301691fb717 100644 --- a/pkg/agent/util/net_linux.go +++ b/pkg/agent/util/net_linux.go @@ -18,6 +18,7 @@ package util import ( + "context" "fmt" "net" "os" @@ -276,14 +277,15 @@ func GetInterfaceConfig(ifName string) (*net.Interface, []*net.IPNet, []interfac func RenameInterface(from, to string) error { klog.InfoS("Renaming interface", "oldName", from, "newName", to) var renameErr error - pollErr := wait.Poll(time.Millisecond*100, time.Second, func() (done bool, err error) { - renameErr = renameHostInterface(from, to) - if renameErr != nil { - klog.InfoS("Unable to rename host interface name with error, retrying", "oldName", from, "newName", to, "err", renameErr) - return false, nil - } - return true, nil - }) + pollErr := wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*100, time.Second, false, + func(ctx context.Context) (done bool, err error) { + renameErr = renameHostInterface(from, to) + if renameErr != nil { + klog.InfoS("Unable to rename host interface name with error, retrying", "oldName", from, "newName", to, "err", renameErr) + return false, nil + } + return true, nil + }) if pollErr != nil { return fmt.Errorf("failed to rename host interface name %s to %s", from, to) } diff --git a/pkg/agent/util/net_windows.go b/pkg/agent/util/net_windows.go index 1848c2e9ac4..dc57c0a2dbf 100644 --- a/pkg/agent/util/net_windows.go +++ b/pkg/agent/util/net_windows.go @@ -20,6 +20,7 @@ package util import ( "bufio" "bytes" + "context" "encoding/json" "errors" "fmt" @@ -180,7 +181,7 @@ func EnableHostInterface(ifaceName string) error { // Enable-NetAdapter is not a blocking operation based on our testing. // It returns immediately no matter whether the interface has been enabled or not. // So we need to check the interface status to ensure it is up before returning. - if err := wait.PollImmediate(commandRetryInterval, commandRetryTimeout, func() (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), commandRetryInterval, commandRetryTimeout, true, func(ctx context.Context) (done bool, err error) { if _, err := runCommand(cmd); err != nil { klog.Errorf("Failed to run command %s: %v", cmd, err) return false, nil @@ -468,7 +469,7 @@ func PrepareHNSNetwork(subnetCIDR *net.IPNet, nodeIPNet *net.IPNet, uplinkAdapte var ipFound bool // On the current Windows testbed, it takes a maximum of 1.8 seconds to obtain a valid IP. // Therefore, we set the timeout limit to triple of that value, allowing a maximum wait of 6 seconds here. - err = wait.PollImmediate(1*time.Second, 6*time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 6*time.Second, true, func(ctx context.Context) (bool, error) { var checkErr error adapter, ipFound, checkErr = adapterIPExists(nodeIPNet.IP, uplinkAdapter.HardwareAddr, ContainerVNICPrefix) if checkErr != nil { @@ -477,7 +478,7 @@ func PrepareHNSNetwork(subnetCIDR *net.IPNet, nodeIPNet *net.IPNet, uplinkAdapte return ipFound, nil }) if err != nil { - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { dhcpStatus, err := InterfaceIPv4DhcpEnabled(uplinkAdapter.Name) if err != nil { klog.ErrorS(err, "Failed to get IPv4 DHCP status on the network adapter", "adapter", uplinkAdapter.Name) @@ -1015,7 +1016,7 @@ func GetInterfaceConfig(ifName string) (*net.Interface, []*net.IPNet, []interfac func RenameInterface(from, to string) error { var renameErr error - pollErr := wait.Poll(time.Millisecond*100, time.Second, func() (done bool, err error) { + pollErr := wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*100, time.Second, false, func(ctx context.Context) (done bool, err error) { renameErr = renameHostInterface(from, to) if renameErr != nil { klog.ErrorS(renameErr, "Failed to rename adapter, retrying") diff --git a/pkg/antctl/raw/multicluster/common/common.go b/pkg/antctl/raw/multicluster/common/common.go index 90ad241b770..b3be75559e3 100644 --- a/pkg/antctl/raw/multicluster/common/common.go +++ b/pkg/antctl/raw/multicluster/common/common.go @@ -328,10 +328,11 @@ func DeleteMemberToken(cmd *cobra.Command, k8sClient client.Client, name string, } func waitForSecretReady(client client.Client, secretName string, namespace string) error { - return wait.PollImmediate( + return wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 5*time.Second, - func() (bool, error) { + true, + func(ctx context.Context) (bool, error) { secret := &corev1.Secret{} if err := client.Get(context.TODO(), types.NamespacedName{Name: secretName, Namespace: namespace}, secret); err != nil { if apierrors.IsNotFound(err) { diff --git a/pkg/antctl/raw/multicluster/join.go b/pkg/antctl/raw/multicluster/join.go index cc1dfb90f47..bff278a97fe 100644 --- a/pkg/antctl/raw/multicluster/join.go +++ b/pkg/antctl/raw/multicluster/join.go @@ -276,10 +276,11 @@ func waitForMemberClusterReady(cmd *cobra.Command, k8sClient client.Client) erro } func waitForClusterSetReady(client client.Client, name string, namespace string, clusterID string) error { - return wait.PollImmediate( + return wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 1*time.Minute, - func() (bool, error) { + true, + func(ctx context.Context) (bool, error) { clusterSet := &mcv1alpha2.ClusterSet{} if err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, clusterSet); err != nil { if apierrors.IsNotFound(err) { diff --git a/pkg/antctl/raw/traceflow/command.go b/pkg/antctl/raw/traceflow/command.go index 1893835e00f..3dcc6c93497 100644 --- a/pkg/antctl/raw/traceflow/command.go +++ b/pkg/antctl/raw/traceflow/command.go @@ -184,7 +184,7 @@ func runE(cmd *cobra.Command, _ []string) error { } var res *v1beta1.Traceflow - err = wait.Poll(1*time.Second, option.timeout, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, option.timeout, false, func(ctx context.Context) (bool, error) { res, err = client.CrdV1beta1().Traceflows().Get(context.TODO(), tf.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -194,7 +194,7 @@ func runE(cmd *cobra.Command, _ []string) error { } return true, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { err = errors.New("timeout waiting for Traceflow done") // Still output the Traceflow results if any. if res == nil { diff --git a/pkg/antctl/transform/networkpolicy/transform_test.go b/pkg/antctl/transform/networkpolicy/transform_test.go index b5fcdb411fe..61b1625606a 100644 --- a/pkg/antctl/transform/networkpolicy/transform_test.go +++ b/pkg/antctl/transform/networkpolicy/transform_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" cpv1beta "antrea.io/antrea/pkg/apis/controlplane/v1beta2" ) @@ -35,8 +35,8 @@ func TestListTransform(t *testing.T) { SourceRef: &cpv1beta.NetworkPolicyReference{ Name: "a", }, - TierPriority: pointer.Int32(260), - Priority: pointer.Float64(5.7), + TierPriority: ptr.To(int32(260)), + Priority: ptr.To(5.7), } var npB = cpv1beta.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -47,8 +47,8 @@ func TestListTransform(t *testing.T) { SourceRef: &cpv1beta.NetworkPolicyReference{ Name: "b", }, - TierPriority: pointer.Int32(260), - Priority: pointer.Float64(7.8), + TierPriority: ptr.To(int32(260)), + Priority: ptr.To(7.8), } var npC = cpv1beta.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -59,8 +59,8 @@ func TestListTransform(t *testing.T) { SourceRef: &cpv1beta.NetworkPolicyReference{ Name: "c", }, - TierPriority: pointer.Int32(200), - Priority: pointer.Float64(8), + TierPriority: ptr.To(int32(200)), + Priority: ptr.To(float64(8)), } var npList = &cpv1beta.NetworkPolicyList{ diff --git a/pkg/apiserver/certificate/certificate.go b/pkg/apiserver/certificate/certificate.go index 517547bf547..130b9ec2be6 100644 --- a/pkg/apiserver/certificate/certificate.go +++ b/pkg/apiserver/certificate/certificate.go @@ -15,6 +15,7 @@ package certificate import ( + "context" "fmt" "os" "path/filepath" @@ -67,17 +68,18 @@ func ApplyServerCert(selfSignedCert bool, tlsKeyPath := filepath.Join(caConfig.CertDir, TLSKeyFile) // The secret may be created after the Pod is created, for example, when cert-manager is used the secret // is created asynchronously. It waits for a while before it's considered to be failed. - if err = wait.PollImmediate(2*time.Second, caConfig.CertReadyTimeout, func() (bool, error) { - for _, path := range []string{caCertPath, tlsCertPath, tlsKeyPath} { - f, err := os.Open(path) - if err != nil { - klog.Warningf("Couldn't read %s when applying server certificate, retrying", path) - return false, nil + if err = wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, caConfig.CertReadyTimeout, true, + func(ctx context.Context) (bool, error) { + for _, path := range []string{caCertPath, tlsCertPath, tlsKeyPath} { + f, err := os.Open(path) + if err != nil { + klog.Warningf("Couldn't read %s when applying server certificate, retrying", path) + return false, nil + } + f.Close() } - f.Close() - } - return true, nil - }); err != nil { + return true, nil + }); err != nil { return nil, fmt.Errorf("error reading TLS certificate and/or key. Please make sure the TLS CA (%s), cert (%s), and key (%s) files are present in \"%s\", when selfSignedCert is set to false", CACertFile, TLSCertFile, TLSKeyFile, caConfig.CertDir) } // Since 1.17.0 (https://github.com/kubernetes/kubernetes/commit/3f5fbfbfac281f40c11de2f57d58cc332affc37b), diff --git a/pkg/apiserver/certificate/certificate_test.go b/pkg/apiserver/certificate/certificate_test.go index 26d6c3b707d..988f230e595 100644 --- a/pkg/apiserver/certificate/certificate_test.go +++ b/pkg/apiserver/certificate/certificate_test.go @@ -203,7 +203,7 @@ func TestApplyServerCert(t *testing.T) { if tt.selfSignedCert && tt.testRotate { oldCertKeyContent := got.getCertificate() go got.Run(ctx, 1) - err := wait.Poll(time.Second, 8*time.Second, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second, 8*time.Second, false, func(ctx context.Context) (bool, error) { newCertKeyContent := got.getCertificate() equal := bytes.Equal(oldCertKeyContent, newCertKeyContent) return !equal, nil diff --git a/pkg/apiserver/registry/system/supportbundle/rest.go b/pkg/apiserver/registry/system/supportbundle/rest.go index b62ef81f20a..8a6323e1ddd 100644 --- a/pkg/apiserver/registry/system/supportbundle/rest.go +++ b/pkg/apiserver/registry/system/supportbundle/rest.go @@ -308,6 +308,10 @@ func (r *supportBundleREST) clean(ctx context.Context, bundlePath string, durati defaultFS.Remove(bundlePath) } +func (r *supportBundleREST) GetSingularName() string { + return "supportBundle" +} + var ( _ rest.Storage = new(downloadREST) _ rest.Getter = new(downloadREST) diff --git a/pkg/controller/certificatesigningrequest/ipsec_csr_signing_controller_test.go b/pkg/controller/certificatesigningrequest/ipsec_csr_signing_controller_test.go index 5100f5bc63a..a996e051c9d 100644 --- a/pkg/controller/certificatesigningrequest/ipsec_csr_signing_controller_test.go +++ b/pkg/controller/certificatesigningrequest/ipsec_csr_signing_controller_test.go @@ -108,17 +108,18 @@ func TestIPsecCertificateApproverAndSigner(t *testing.T) { csr, err := clientset.CertificatesV1().CertificateSigningRequests().Create(context.TODO(), tt.csr, metav1.CreateOptions{}) require.NoError(t, err) - err = wait.PollImmediate(200*time.Millisecond, 10*time.Second, func() (done bool, err error) { - csr, err = clientset.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), tt.csr.Name, metav1.GetOptions{}) - require.NoError(t, err) - if !isCertificateRequestApproved(csr) { - return false, nil - } - if len(csr.Status.Certificate) == 0 { - return false, nil - } - return true, nil - }) + err = wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 10*time.Second, true, + func(ctx context.Context) (done bool, err error) { + csr, err = clientset.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), tt.csr.Name, metav1.GetOptions{}) + require.NoError(t, err) + if !isCertificateRequestApproved(csr) { + return false, nil + } + if len(csr.Status.Certificate) == 0 { + return false, nil + } + return true, nil + }) require.NoError(t, err) issued := csr.Status.Certificate parsed, err := certutil.ParseCertsPEM(issued) diff --git a/pkg/controller/egress/controller_test.go b/pkg/controller/egress/controller_test.go index 356774503aa..61303439d42 100644 --- a/pkg/controller/egress/controller_test.go +++ b/pkg/controller/egress/controller_test.go @@ -759,13 +759,14 @@ func TestSyncEgressIP(t *testing.T) { func checkExternalIPPoolUsed(t *testing.T, controller *egressController, poolName string, used int) { exists := controller.externalIPAllocator.IPPoolExists(poolName) require.True(t, exists) - err := wait.PollImmediate(50*time.Millisecond, 2*time.Second, func() (found bool, err error) { - eip, err := controller.crdClient.CrdV1beta1().ExternalIPPools().Get(context.TODO(), poolName, metav1.GetOptions{}) - if err != nil { - return false, err - } - return eip.Status.Usage.Used == used, nil - }) + err := wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 2*time.Second, true, + func(ctx context.Context) (found bool, err error) { + eip, err := controller.crdClient.CrdV1beta1().ExternalIPPools().Get(context.TODO(), poolName, metav1.GetOptions{}) + if err != nil { + return false, err + } + return eip.Status.Usage.Used == used, nil + }) assert.NoError(t, err) } diff --git a/pkg/controller/externalippool/controller_test.go b/pkg/controller/externalippool/controller_test.go index 3e8a3c594fc..d0e1052e316 100644 --- a/pkg/controller/externalippool/controller_test.go +++ b/pkg/controller/externalippool/controller_test.go @@ -458,7 +458,7 @@ func TestIPPoolHasIP(t *testing.T) { func checkExternalIPPoolStatus(t *testing.T, controller *controller, poolName string, expectedStatus antreacrds.IPPoolUsage) { exists := controller.IPPoolExists(poolName) require.True(t, exists) - err := wait.PollImmediate(50*time.Millisecond, 2*time.Second, func() (found bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (found bool, err error) { eip, err := controller.crdClient.CrdV1beta1().ExternalIPPools().Get(context.TODO(), poolName, metav1.GetOptions{}) if err != nil { return false, err diff --git a/pkg/controller/externalnode/controller_test.go b/pkg/controller/externalnode/controller_test.go index fa615abb1a7..04f5789deab 100644 --- a/pkg/controller/externalnode/controller_test.go +++ b/pkg/controller/externalnode/controller_test.go @@ -147,7 +147,7 @@ func TestAddExternalNode(t *testing.T) { defer close(stopCh) informerFactory.Start(stopCh) go controller.Run(stopCh) - err := wait.PollImmediate(time.Millisecond*50, time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Millisecond*50, time.Second, true, func(ctx context.Context) (done bool, err error) { for _, ee := range tc.expectedEntities { ok, err := checkExternalEntityExists(controller.crdClient, ee) if err != nil { @@ -416,7 +416,7 @@ func TestUpdateExternalNode(t *testing.T) { defer close(stopCh) informerFactory.Start(stopCh) go controller.Run(stopCh) - err := wait.PollImmediate(time.Millisecond*50, time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Millisecond*50, time.Second, true, func(ctx context.Context) (done bool, err error) { entities, listErr := controller.crdClient.CrdV1alpha2().ExternalEntities(tc.externalNode.Namespace).List(context.TODO(), metav1.ListOptions{}) if listErr != nil { return false, listErr @@ -437,7 +437,7 @@ func TestUpdateExternalNode(t *testing.T) { _, err = controller.crdClient.CrdV1alpha1().ExternalNodes(tc.externalNode.Namespace).Update(context.TODO(), tc.updatedExternalNode, metav1.UpdateOptions{}) require.NoError(t, err) - err = wait.PollImmediate(time.Millisecond*50, time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Millisecond*50, time.Second, true, func(ctx context.Context) (done bool, err error) { return checkExternalEntityExists(controller.crdClient, tc.expectedEntity) }) assert.NoError(t, err) @@ -488,7 +488,7 @@ func TestDeleteExternalNode(t *testing.T) { err := controller.crdClient.CrdV1alpha1().ExternalNodes(externalNode.Namespace).Delete(context.TODO(), externalNode.Name, metav1.DeleteOptions{}) require.NoError(t, err) key, _ := keyFunc(externalNode) - err = wait.PollImmediate(time.Millisecond*50, time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Millisecond*50, time.Second, true, func(ctx context.Context) (done bool, err error) { entities, listErr := controller.crdClient.CrdV1alpha2().ExternalEntities(externalNode.Namespace).List(context.TODO(), metav1.ListOptions{}) if listErr != nil { return false, listErr diff --git a/pkg/controller/grouping/controller.go b/pkg/controller/grouping/controller.go index b974e42b4db..1b6d6a009f8 100644 --- a/pkg/controller/grouping/controller.go +++ b/pkg/controller/grouping/controller.go @@ -15,6 +15,7 @@ package grouping import ( + "context" "fmt" "sync/atomic" "time" @@ -184,7 +185,7 @@ func (c *GroupEntityController) Run(stopCh <-chan struct{}) { } // Wait until all event handlers process the initial resources before setting groupEntityIndex as synced. - if err := wait.PollImmediateUntil(100*time.Millisecond, func() (done bool, err error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { if uint64(initialPodCount) > c.podAddEvents.Load() { return false, nil } @@ -197,7 +198,7 @@ func (c *GroupEntityController) Run(stopCh <-chan struct{}) { } } return true, nil - }, stopCh); err == nil { + }); err == nil { c.groupEntityIndex.setSynced(true) } diff --git a/pkg/controller/grouping/controller_test.go b/pkg/controller/grouping/controller_test.go index 9fcc7f9e294..97d1c425e78 100644 --- a/pkg/controller/grouping/controller_test.go +++ b/pkg/controller/grouping/controller_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" featuregatetesting "k8s.io/component-base/featuregate/testing" @@ -100,9 +99,9 @@ func TestGroupEntityControllerRun(t *testing.T) { go c.groupEntityIndex.Run(stopCh) go c.Run(stopCh) - assert.NoError(t, wait.Poll(10*time.Millisecond, time.Second, func() (done bool, err error) { - return index.HasSynced(), nil - }), "GroupEntityIndex hasn't been synced in 1 second after starting GroupEntityController") + assert.Eventually(t, func() bool { + return index.HasSynced() + }, time.Second, 10*time.Millisecond, "GroupEntityIndex hasn't been synced in 1 second after starting GroupEntityController") }) } } diff --git a/pkg/controller/ipam/antrea_ipam_controller_test.go b/pkg/controller/ipam/antrea_ipam_controller_test.go index 5b624285948..6fecc2a2624 100644 --- a/pkg/controller/ipam/antrea_ipam_controller_test.go +++ b/pkg/controller/ipam/antrea_ipam_controller_test.go @@ -125,17 +125,18 @@ func initTestObjects(annotateNamespace bool, annotateStatefulSet bool, replicas func verifyPoolAllocatedSize(t *testing.T, poolName string, poolLister listers.IPPoolLister, size int) { - err := wait.PollImmediate(100*time.Millisecond, 1*time.Second, func() (bool, error) { - pool, err := poolLister.Get(poolName) - if err != nil { - return false, nil - } - if len(pool.Status.IPAddresses) == size { - return true, nil - } + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 1*time.Second, true, + func(ctx context.Context) (bool, error) { + pool, err := poolLister.Get(poolName) + if err != nil { + return false, nil + } + if len(pool.Status.IPAddresses) == size { + return true, nil + } - return false, nil - }) + return false, nil + }) require.NoError(t, err) } @@ -189,13 +190,14 @@ func TestStatefulSetLifecycle(t *testing.T) { var allocator *poolallocator.IPPoolAllocator var err error // Wait until pool propagates to the informer - pollErr := wait.PollImmediate(100*time.Millisecond, 3*time.Second, func() (bool, error) { - allocator, err = poolallocator.NewIPPoolAllocator(pool.Name, controller.crdClient, controller.poolLister) - if err != nil { - return false, nil - } - return true, nil - }) + pollErr := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 3*time.Second, true, + func(ctx context.Context) (bool, error) { + allocator, err = poolallocator.NewIPPoolAllocator(pool.Name, controller.crdClient, controller.poolLister) + if err != nil { + return false, nil + } + return true, nil + }) require.NoError(t, pollErr) defer allocator.ReleaseStatefulSet(statefulSet.Namespace, statefulSet.Name) @@ -262,7 +264,7 @@ func TestReleaseStaleAddresses(t *testing.T) { go controller.Run(stopCh) // verify two stale entries were deleted, one updated to Reserved status - err := wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (bool, error) { pool, err := controller.poolLister.Get(pool.Name) if err != nil { return false, nil diff --git a/pkg/controller/labelidentity/controller.go b/pkg/controller/labelidentity/controller.go index 3accad43e7a..2722fa492bd 100644 --- a/pkg/controller/labelidentity/controller.go +++ b/pkg/controller/labelidentity/controller.go @@ -15,6 +15,7 @@ package labelidentity import ( + "context" "sync/atomic" "time" @@ -96,12 +97,12 @@ func (c *Controller) Run(stopCh <-chan struct{}) { } initialLabelCount := len(c.labelInformer.Informer().GetStore().List()) // Wait until initial label identities are processed before setting labelIdentityIndex as synced. - if err := wait.PollImmediateUntil(100*time.Millisecond, func() (done bool, err error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { if uint64(initialLabelCount) > c.labelAddEvents.Load() { return false, nil } return true, nil - }, stopCh); err == nil { + }); err == nil { c.labelIdentityIndex.setSynced(true) } <-stopCh diff --git a/pkg/controller/labelidentity/controller_test.go b/pkg/controller/labelidentity/controller_test.go index bf091fc3ced..b3ed33a0d99 100644 --- a/pkg/controller/labelidentity/controller_test.go +++ b/pkg/controller/labelidentity/controller_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" mcv1alpha1 "antrea.io/antrea/multicluster/apis/multicluster/v1alpha1" fakeversioned "antrea.io/antrea/multicluster/pkg/client/clientset/versioned/fake" @@ -78,7 +77,7 @@ func TestGroupEntityControllerRun(t *testing.T) { go c.labelIdentityIndex.Run(stopCh) go c.Run(stopCh) - assert.NoError(t, wait.Poll(10*time.Millisecond, time.Second, func() (done bool, err error) { - return index.HasSynced(), nil - }), "LabelIdentityIndex hasn't been synced in 1 second after starting LabelIdentityController") + assert.Eventually(t, func() bool { + return index.HasSynced() + }, 1*time.Second, 10*time.Millisecond, "LabelIdentityIndex hasn't been synced in 1 second after starting LabelIdentityController") } diff --git a/pkg/controller/networkpolicy/networkpolicy_controller_test.go b/pkg/controller/networkpolicy/networkpolicy_controller_test.go index 9459934d557..555963ea31e 100644 --- a/pkg/controller/networkpolicy/networkpolicy_controller_test.go +++ b/pkg/controller/networkpolicy/networkpolicy_controller_test.go @@ -2833,10 +2833,10 @@ func TestAddressGroupWithNodeSelector(t *testing.T) { if err != nil { return err } - assert.NoError(t, wait.Poll(time.Millisecond*100, time.Second, func() (done bool, err error) { + assert.Eventually(t, func() bool { newNode, err := c.nodeLister.Get(node.Name) - return reflect.DeepEqual(node, newNode), err - })) + return reflect.DeepEqual(node, newNode) && err == nil + }, time.Second, 100*time.Millisecond) return nil } fakeNode0.Labels = nodeSelectorA.MatchLabels @@ -3711,7 +3711,7 @@ func TestSyncInternalNetworkPolicyWithGroups(t *testing.T) { c.crdClient.CrdV1beta1().NetworkPolicies(tt.inputPolicy.Namespace).Create(context.TODO(), tt.inputPolicy, metav1.CreateOptions{}) var gotPolicy *antreatypes.NetworkPolicy - err := wait.PollImmediate(100*time.Millisecond, 3*time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 3*time.Second, true, func(ctx context.Context) (done bool, err error) { obj, exists, _ := c.internalNetworkPolicyStore.Get(tt.expectedPolicy.Name) if !exists { return false, nil diff --git a/pkg/controller/stats/aggregator_test.go b/pkg/controller/stats/aggregator_test.go index 9ccc5b5d4a3..da135e72c79 100644 --- a/pkg/controller/stats/aggregator_test.go +++ b/pkg/controller/stats/aggregator_test.go @@ -68,7 +68,7 @@ var ( func runWrapper(t *testing.T, a *Aggregator, policyCount int, summaries []*controlplane.NodeStatsSummary) { stopCh := make(chan struct{}) doneCh := make(chan struct{}) - err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, time.Second, true, func(ctx context.Context) (done bool, err error) { count := len(a.ListNetworkPolicyStats("")) + len(a.ListAntreaNetworkPolicyStats("")) + len(a.ListAntreaClusterNetworkPolicyStats()) return (count >= policyCount), nil }) @@ -81,7 +81,7 @@ func runWrapper(t *testing.T, a *Aggregator, policyCount int, summaries []*contr a.Collect(summary) } // Wait for all summaries to be consumed. - err = wait.PollImmediate(100*time.Millisecond, time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, time.Second, true, func(ctx context.Context) (done bool, err error) { return len(a.dataCh) == 0, nil }) require.NoError(t, err, "Timeout while waiting for summaries to be consummed by Aggregator") @@ -622,7 +622,7 @@ func TestDeleteNetworkPolicy(t *testing.T) { crdClient.CrdV1beta1().ClusterNetworkPolicies().Delete(context.TODO(), acnp1.Name, metav1.DeleteOptions{}) crdClient.CrdV1beta1().NetworkPolicies(annp1.Namespace).Delete(context.TODO(), annp1.Name, metav1.DeleteOptions{}) // Event handlers are asynchronous, it's supposed to finish very soon. - err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, time.Second, true, func(ctx context.Context) (done bool, err error) { return len(a.ListNetworkPolicyStats("")) == 0 && len(a.ListAntreaClusterNetworkPolicyStats()) == 0 && len(a.ListAntreaNetworkPolicyStats("")) == 0, nil }) assert.NoError(t, err) diff --git a/pkg/controller/supportbundlecollection/controller_test.go b/pkg/controller/supportbundlecollection/controller_test.go index 0b9cefd871c..678215dadd5 100644 --- a/pkg/controller/supportbundlecollection/controller_test.go +++ b/pkg/controller/supportbundlecollection/controller_test.go @@ -865,7 +865,7 @@ func TestCreateAndDeleteInternalSupportBundleCollection(t *testing.T) { } bundle, err := testClient.crdClient.CrdV1alpha1().SupportBundleCollections().Create(context.TODO(), generateSupportBundleResource(bundleConfig), metav1.CreateOptions{}) require.Nil(t, err) - err = wait.PollImmediate(time.Millisecond*50, time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Millisecond*50, time.Second, true, func(ctx context.Context) (done bool, err error) { _, getErr := controller.supportBundleCollectionLister.Get(tc.bundleConfig.name) if getErr == nil { return true, nil @@ -1060,7 +1060,7 @@ func TestSyncSupportBundleCollection(t *testing.T) { go controller.worker() for _, tc := range testCases { - err := wait.PollImmediate(time.Millisecond*100, time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Millisecond*100, time.Second, true, func(ctx context.Context) (done bool, err error) { _, exists, err := controller.supportBundleCollectionStore.Get(tc.bundleConfig.name) if err != nil { return false, err diff --git a/pkg/controller/traceflow/controller_test.go b/pkg/controller/traceflow/controller_test.go index 661357a5c69..c40a37d2602 100644 --- a/pkg/controller/traceflow/controller_test.go +++ b/pkg/controller/traceflow/controller_test.go @@ -153,7 +153,7 @@ func TestTraceflow(t *testing.T) { func (tfc *traceflowController) waitForPodInNamespace(ns string, name string, timeout time.Duration) (*corev1.Pod, error) { var pod *corev1.Pod var err error - if err = wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { // Make sure dummy Pod is synced by informer pod, err = tfc.podLister.Pods(ns).Get(name) if err != nil { @@ -169,7 +169,7 @@ func (tfc *traceflowController) waitForPodInNamespace(ns string, name string, ti func (tfc *traceflowController) waitForTraceflow(name string, phase crdv1beta1.TraceflowPhase, timeout time.Duration) (*crdv1beta1.Traceflow, error) { var tf *crdv1beta1.Traceflow var err error - if err = wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { tf, err = tfc.client.CrdV1beta1().Traceflows().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil || tf.Status.Phase != phase { return false, nil diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient.go index be21f777aaa..a53cd119973 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient.go @@ -427,7 +427,7 @@ func ConnectClickHouse(config *ClickHouseConfig) (*sql.DB, error) { connTimeout := 10 * time.Second // Connect to ClickHouse in a loop - if err := wait.PollImmediate(connRetryInterval, connTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), connRetryInterval, connTimeout, true, func(ctx context.Context) (bool, error) { // Open the database and ping it opt := clickhouse.Options{ Addr: []string{addr}, diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go index ce17f73d656..42a89d3372c 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go @@ -314,7 +314,7 @@ func TestUpdateCH(t *testing.T) { chExportProc.Start() defer chExportProc.Stop() - require.NoError(t, wait.Poll(commitInterval, time.Second, func() (bool, error) { + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), commitInterval, time.Second, false, func(ctx context.Context) (bool, error) { err := mock1.ExpectationsWereMet() return (err == nil), nil }), "timeout while waiting for first flow record to be committed (before DB connection update)") @@ -332,7 +332,7 @@ func TestUpdateCH(t *testing.T) { chExportProc.deque.PushBack(recordRow) }() - require.NoError(t, wait.Poll(commitInterval, time.Second, func() (bool, error) { + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), commitInterval, time.Second, false, func(ctx context.Context) (bool, error) { err := mock2.ExpectationsWereMet() return (err == nil), nil }), "timeout while waiting for second flow record to be committed (after DB connection update)") diff --git a/pkg/flowaggregator/exporter/clickhouse.go b/pkg/flowaggregator/exporter/clickhouse.go index 7276b20f3b4..239ad3e3757 100644 --- a/pkg/flowaggregator/exporter/clickhouse.go +++ b/pkg/flowaggregator/exporter/clickhouse.go @@ -15,6 +15,7 @@ package exporter import ( + "context" "fmt" "os" "path" @@ -62,7 +63,7 @@ func NewClickHouseExporter(k8sClient kubernetes.Interface, opt *options.Options) "compress", *chConfig.Compress, "commitInterval", chConfig.CommitInterval, "insecureSkipVerify", chConfig.InsecureSkipVerify, "caCert", chConfig.CACert) var errMessage error if chConfig.CACert { - err := wait.Poll(DefaultInterval, Timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), DefaultInterval, Timeout, false, func(ctx context.Context) (bool, error) { caCertPath := path.Join(CertDir, CACertFile) certificate, err := os.ReadFile(caCertPath) if err != nil { diff --git a/pkg/flowaggregator/exporter/utils.go b/pkg/flowaggregator/exporter/utils.go index ced42593a41..7a1d0596323 100644 --- a/pkg/flowaggregator/exporter/utils.go +++ b/pkg/flowaggregator/exporter/utils.go @@ -15,6 +15,7 @@ package exporter import ( + "context" "fmt" "time" @@ -41,7 +42,7 @@ func getClusterUUID(k8sClient kubernetes.Interface) (uuid.UUID, error) { k8sClient, ) var clusterUUID uuid.UUID - if err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), retryInterval, timeout, true, func(ctx context.Context) (bool, error) { clusterIdentity, _, err := clusterIdentityProvider.Get() if err != nil { return false, nil diff --git a/pkg/ipam/poolallocator/allocator_test.go b/pkg/ipam/poolallocator/allocator_test.go index 78faa87aab5..207e168a783 100644 --- a/pkg/ipam/poolallocator/allocator_test.go +++ b/pkg/ipam/poolallocator/allocator_test.go @@ -15,6 +15,7 @@ package poolallocator import ( + "context" "fmt" "net" "testing" @@ -60,7 +61,7 @@ func newTestIPPoolAllocator(pool *crdv1a2.IPPool, stopCh <-chan struct{}) *IPPoo var allocator *IPPoolAllocator var err error - wait.PollImmediate(100*time.Millisecond, 1*time.Second, func() (bool, error) { + wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 1*time.Second, true, func(ctx context.Context) (bool, error) { allocator, err = NewIPPoolAllocator(pool.Name, crdClient, pools.Lister()) if err != nil { return false, nil @@ -377,7 +378,7 @@ func TestHas(t *testing.T) { _, _, err := allocator.AllocateNext(crdv1a2.IPAddressPhaseAllocated, owner) require.NoError(t, err) - err = wait.PollImmediate(100*time.Millisecond, 1*time.Second, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 1*time.Second, true, func(ctx context.Context) (bool, error) { has, _ := allocator.hasPod(testNamespace, "fakePod") return has, nil }) diff --git a/pkg/monitor/controller.go b/pkg/monitor/controller.go index e0436938b28..321f28d182f 100644 --- a/pkg/monitor/controller.go +++ b/pkg/monitor/controller.go @@ -390,7 +390,7 @@ func (monitor *controllerMonitor) antreaAgentInfoAPIAvailable(stopCh <-chan stru } found := false - if err := wait.PollImmediateUntil(time.Second*10, func() (done bool, err error) { + if err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second*10, true, func(ctx context.Context) (done bool, err error) { var checkErr error found, checkErr = checkFunc() if checkErr != nil { @@ -398,7 +398,7 @@ func (monitor *controllerMonitor) antreaAgentInfoAPIAvailable(stopCh <-chan stru return false, nil } return true, nil - }, stopCh); err != nil { + }); err != nil { klog.ErrorS(err, "Failed to get server resources for GroupVersion", "groupVersion", groupVersion) found = false } diff --git a/pkg/ovs/ovsctl/ovsctl_others.go b/pkg/ovs/ovsctl/ovsctl_others.go index c4a58ff2a39..26e55138fa5 100644 --- a/pkg/ovs/ovsctl/ovsctl_others.go +++ b/pkg/ovs/ovsctl/ovsctl_others.go @@ -53,7 +53,7 @@ func ovsVSwitchdUDS(ctx context.Context) (string, error) { var readErr error startTime := time.Now() hasFailure := false - err := wait.PollImmediateWithContext(ctx, 50*time.Millisecond, 5*time.Second, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(ctx, 50*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) { pid, readErr = readOVSVSwitchdPID() if readErr != nil { hasFailure = true diff --git a/pkg/util/channel/channel_test.go b/pkg/util/channel/channel_test.go index 12efc053e6c..2891e20030f 100644 --- a/pkg/util/channel/channel_test.go +++ b/pkg/util/channel/channel_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" ) type eventReceiver struct { @@ -71,17 +70,17 @@ func TestSubscribe(t *testing.T) { var errReceiver int var errReceivedEvents sets.Set[string] - assert.NoError(t, wait.PollImmediate(10*time.Millisecond, 100*time.Millisecond, func() (done bool, err error) { + assert.Eventually(t, func() bool { for i, r := range eventReceivers { receivedEvents := r.received() if !receivedEvents.Equal(desiredEvents) { errReceiver = i errReceivedEvents = receivedEvents - return false, nil + return false } } - return true, nil - }), "Receiver %d failed to receive all events, expected %d events, got %d events", errReceiver, len(desiredEvents), len(errReceivedEvents)) + return true + }, 100*time.Millisecond, 10*time.Millisecond, "Receiver %d failed to receive all events, expected %d events, got %d events", errReceiver, len(desiredEvents), len(errReceivedEvents)) } func TestNotify(t *testing.T) { diff --git a/test/e2e/antreaipam_test.go b/test/e2e/antreaipam_test.go index da84884ed0f..db5fc8f2a9e 100644 --- a/test/e2e/antreaipam_test.go +++ b/test/e2e/antreaipam_test.go @@ -477,7 +477,7 @@ func checkStatefulSetIPPoolAllocation(tb testing.TB, data *TestData, name string expectedIPAddressJson, _ := json.Marshal(expectedIPAddressMap) tb.Logf("expectedIPAddressMap: %s", expectedIPAddressJson) - err = wait.Poll(time.Second*3, time.Second*15, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), time.Second*3, time.Second*15, false, func(ctx context.Context) (bool, error) { ipPool, err := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), ipPoolName, metav1.GetOptions{}) if err != nil { tb.Fatalf("Failed to get IPPool %s, err: %+v", ipPoolName, err) @@ -574,7 +574,7 @@ func deleteIPPoolWrapper(tb testing.TB, data *TestData, name string) { func checkIPPoolsEmpty(tb testing.TB, data *TestData, names []string) { count := 0 - err := wait.PollImmediate(3*time.Second, defaultTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, defaultTimeout, true, func(ctx context.Context) (bool, error) { for _, name := range names { ipPool, _ := data.crdClient.CrdV1alpha2().IPPools().Get(context.TODO(), name, metav1.GetOptions{}) if len(ipPool.Status.IPAddresses) > 0 { diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index cef8e0b4639..e4f0ce66ef6 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -3917,7 +3917,7 @@ func testACNPIGMPQuery(t *testing.T, data *TestData, acnpName, caseName, groupAd } // check if IGMP can be sent to Pod - if err := wait.Poll(3*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { captured, err := checkPacketCaptureResult(t, data, tcpdumpName, cmd) if captured { return true, nil @@ -4000,7 +4000,7 @@ func testACNPMulticastEgress(t *testing.T, data *TestData, acnpName, caseName, g t.Fatalf("failed to call generateConnCheckCmd: %v", err) } - if err := wait.Poll(3*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { captured, err := checkPacketCaptureResult(t, data, tcpdumpName, cmd) if captured { return true, nil @@ -4050,7 +4050,7 @@ func checkAuditLoggingResult(t *testing.T, data *TestData, nodeName, logLocator } cmd := []string{"cat", logDir + logfileName} - if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Second, false, func(ctx context.Context) (bool, error) { stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, "antrea-agent", cmd) if err != nil || stderr != "" { // file may not exist yet @@ -4652,7 +4652,7 @@ func TestAntreaPolicyStatusWithAppliedToUnsupportedGroup(t *testing.T) { } func checkANNPStatus(t *testing.T, data *TestData, annp *crdv1beta1.NetworkPolicy, expectedStatus crdv1beta1.NetworkPolicyStatus) *crdv1beta1.NetworkPolicy { - err := wait.Poll(100*time.Millisecond, policyRealizedTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, policyRealizedTimeout, false, func(ctx context.Context) (bool, error) { var err error annp, err = data.crdClient.CrdV1beta1().NetworkPolicies(annp.Namespace).Get(context.TODO(), annp.Name, metav1.GetOptions{}) if err != nil { @@ -4665,7 +4665,7 @@ func checkANNPStatus(t *testing.T, data *TestData, annp *crdv1beta1.NetworkPolic } func checkACNPStatus(t *testing.T, data *TestData, acnp *crdv1beta1.ClusterNetworkPolicy, expectedStatus crdv1beta1.NetworkPolicyStatus) *crdv1beta1.ClusterNetworkPolicy { - err := wait.Poll(100*time.Millisecond, policyRealizedTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, policyRealizedTimeout, false, func(ctx context.Context) (bool, error) { var err error acnp, err = data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Get(context.TODO(), acnp.Name, metav1.GetOptions{}) if err != nil { @@ -4682,7 +4682,7 @@ func checkACNPStatus(t *testing.T, data *TestData, acnp *crdv1beta1.ClusterNetwo // resource's Generation and the Phase is set to Realized. func (data *TestData) waitForANNPRealized(t *testing.T, namespace string, name string, timeout time.Duration) error { t.Logf("Waiting for ANNP '%s/%s' to be realized", namespace, name) - if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { annp, err := data.crdClient.CrdV1beta1().NetworkPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err @@ -4699,7 +4699,7 @@ func (data *TestData) waitForANNPRealized(t *testing.T, namespace string, name s // resource's Generation and the Phase is set to Realized. func (data *TestData) waitForACNPRealized(t *testing.T, name string, timeout time.Duration) error { t.Logf("Waiting for ACNP '%s' to be realized", name) - if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { acnp, err := data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err @@ -4820,7 +4820,7 @@ func testANNPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { totalSessionsPerRule += sessionsPerAddressFamily } - if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { stats, err := data.crdClient.StatsV1alpha1().AntreaNetworkPolicyStats(data.testNamespace).Get(context.TODO(), "np1", metav1.GetOptions{}) if err != nil { return false, err @@ -4955,7 +4955,7 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { totalSessionsPerRule += sessionsPerAddressFamily } - if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { stats, err := data.crdClient.StatsV1alpha1().AntreaClusterNetworkPolicyStats().Get(context.TODO(), "cnp1", metav1.GetOptions{}) if err != nil { return false, err diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index 058d4c54567..0f012678a77 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -15,6 +15,7 @@ package e2e import ( + "context" "encoding/json" "fmt" "net" @@ -379,7 +380,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo t.Logf("Retrieving gateway routes on Node '%s'", nodeName) var routes []Route - if err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (found bool, err error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (found bool, err error) { var llRoute *Route routes, _, llRoute, err = getGatewayRoutes(t, data, antreaGWName, nodeName, isIPv6) if err != nil { @@ -396,7 +397,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo return false, fmt.Errorf("IPv6 link-local route not found") } return true, nil - }); err == wait.ErrWaitTimeout { + }); wait.Interrupted(err) { t.Fatalf("Not enough gateway routes after %v", defaultTimeout) } else if err != nil { t.Fatalf("Error while waiting for gateway routes: %v", err) @@ -477,7 +478,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo // We expect the agent to delete the extra route we added and add back the route we deleted t.Logf("Waiting for gateway routes to converge") - if err := wait.Poll(defaultInterval, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, false, func(ctx context.Context) (bool, error) { var llRoute *Route newRoutes, _, llRoute, err := getGatewayRoutes(t, data, antreaGWName, nodeName, isIPv6) if err != nil { @@ -508,7 +509,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo } // We haven't found the deleted route, keep trying return false, nil - }); err == wait.ErrWaitTimeout { + }); wait.Interrupted(err) { t.Errorf("Gateway routes did not converge after %v", defaultTimeout) } else if err != nil { t.Fatalf("Error while waiting for gateway routes to converge: %v", err) @@ -566,7 +567,7 @@ func testCleanStaleClusterIPRoutes(t *testing.T, data *TestData, isIPv6 bool) { t.Fatalf("Failed to detect gateway interface name from ConfigMap: %v", err) } var routes []Route - if err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (bool, error) { _, routes, _, err = getGatewayRoutes(t, data, antreaGWName, nodeName, isIPv6) if err != nil { t.Logf("Failed to get Service gateway routes: %v", err) @@ -738,7 +739,7 @@ func testDeletePreviousRoundFlowsOnStartup(t *testing.T, data *TestData) { waitForNextRoundNum := func(roundNum uint64) uint64 { var nextRoundNum uint64 - if err := wait.Poll(defaultInterval, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, false, func(ctx context.Context) (bool, error) { nextRoundNum = roundNumber(podName) if nextRoundNum != roundNum { return true, nil @@ -813,7 +814,7 @@ func testDeletePreviousRoundFlowsOnStartup(t *testing.T, data *TestData) { // In theory there should be no need to poll here because the agent only persists the new // round number after stale flows have been deleted, but it is probably better not to make // this assumption in an e2e test. - if err := wait.PollImmediate(defaultInterval, smallTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, smallTimeout, true, func(ctx context.Context) (bool, error) { return !checkFlow(), nil }); err != nil { @@ -879,7 +880,7 @@ func testClusterIdentity(t *testing.T, data *TestData) { const retryInterval = time.Second const timeout = 10 * time.Second var clusterUUID uuid.UUID - err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), retryInterval, timeout, true, func(ctx context.Context) (bool, error) { clusterIdentity, _, err := clusterIdentityProvider.Get() if err != nil { return false, nil diff --git a/test/e2e/batch_test.go b/test/e2e/batch_test.go index e076c300488..64d026e9e14 100644 --- a/test/e2e/batch_test.go +++ b/test/e2e/batch_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" ) // TestBatchCreatePods verifies there is no FD leak after batched Pod creation. @@ -67,10 +66,8 @@ func TestBatchCreatePods(t *testing.T) { // reasons (health probes, CNI invocations, ...). In that case, the new set of FDs can // contain additional entries compared to the old set of FDs. However, eventually, getFDs() // should return a subset of oldFDs. - // Most of the time, wait.PollImmediate will return immediately, after the first call to the - // condition function. - assert.NoError(t, wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { newFDs := getFDs() - return oldFDs.IsSuperset(newFDs), nil - }), "Batched Pod creation allocated new FDs") + return oldFDs.IsSuperset(newFDs) + }, 2*time.Second, 100*time.Millisecond, "Batched Pod creation allocated new FDs") } diff --git a/test/e2e/connectivity_test.go b/test/e2e/connectivity_test.go index 2c081b60803..87df12205af 100644 --- a/test/e2e/connectivity_test.go +++ b/test/e2e/connectivity_test.go @@ -236,7 +236,7 @@ func createPodsOnDifferentNodes(t *testing.T, data *TestData, namespace, tag str return fmt.Errorf("error deleting DaemonSet") } // Wait for all Pods managed by DaemonSet to be deleted to avoid affecting following tests. - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { pods, err := getDaemonSetPods() if err != nil { return false, fmt.Errorf("error getting Pods managed by DaemonSet") diff --git a/test/e2e/egress_test.go b/test/e2e/egress_test.go index 63ea8bfc16e..57ad33eeaca 100644 --- a/test/e2e/egress_test.go +++ b/test/e2e/egress_test.go @@ -172,13 +172,14 @@ func testEgressClientIP(t *testing.T, data *TestData) { assertClientIP(data, t, remotePod, toolboxContainerName, serverIPStr, egressNodeIP) var err error - err = wait.Poll(time.Millisecond*100, time.Second, func() (bool, error) { - egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - return egress.Status.EgressNode == egressNode, nil - }) + err = wait.PollUntilContextTimeout(context.Background(), time.Millisecond*100, time.Second, false, + func(ctx context.Context) (bool, error) { + egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return egress.Status.EgressNode == egressNode, nil + }) assert.NoError(t, err, "Egress failed to reach expected status") t.Log("Checking the client IP of a Pod whose Egress has been created in advance") @@ -317,7 +318,7 @@ func testEgressClientIPFromVLANSubnet(t *testing.T, data *TestData) { egress := data.createEgress(t, "egress-vlan", nil, map[string]string{"antrea-e2e": clientPod1}, pool.Name, "", nil) defer data.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{}) - err := wait.PollImmediate(500*time.Millisecond, 3*time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 500*time.Millisecond, 3*time.Second, true, func(ctx context.Context) (done bool, err error) { egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -460,7 +461,7 @@ func testEgressCRUD(t *testing.T, data *TestData) { defer data.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{}) // Use Poll to wait the interval before the first run to detect the case that the IP is assigned to any Node // when it's not supposed to. - err := wait.Poll(500*time.Millisecond, 3*time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 500*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (done bool, err error) { egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -499,20 +500,21 @@ func testEgressCRUD(t *testing.T, data *TestData) { checkEIPStatus := func(expectedUsed int) { var gotUsed, gotTotal int - err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (done bool, err error) { - pool, err := data.crdClient.CrdV1beta1().ExternalIPPools().Get(context.TODO(), pool.Name, metav1.GetOptions{}) - if err != nil { - return false, fmt.Errorf("failed to get ExternalIPPool: %v", err) - } - gotUsed, gotTotal = pool.Status.Usage.Used, pool.Status.Usage.Total - if expectedUsed != pool.Status.Usage.Used { - return false, nil - } - if tt.expectedTotal != pool.Status.Usage.Total { - return false, nil - } - return true, nil - }) + err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 2*time.Second, true, + func(ctx context.Context) (done bool, err error) { + pool, err := data.crdClient.CrdV1beta1().ExternalIPPools().Get(context.TODO(), pool.Name, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("failed to get ExternalIPPool: %v", err) + } + gotUsed, gotTotal = pool.Status.Usage.Used, pool.Status.Usage.Total + if expectedUsed != pool.Status.Usage.Used { + return false, nil + } + if tt.expectedTotal != pool.Status.Usage.Total { + return false, nil + } + return true, nil + }) require.NoError(t, err, "ExternalIPPool status not match: expectedTotal=%d, got=%d, expectedUsed=%d, got=%d", tt.expectedTotal, gotTotal, expectedUsed, gotUsed) } checkEIPStatus(1) @@ -520,13 +522,14 @@ func testEgressCRUD(t *testing.T, data *TestData) { err = data.crdClient.CrdV1beta1().Egresses().Delete(context.TODO(), egress.Name, metav1.DeleteOptions{}) require.NoError(t, err, "Failed to delete Egress") if egress.Status.EgressNode != "" { - err := wait.PollImmediate(200*time.Millisecond, timeout, func() (done bool, err error) { - exists, err := hasIP(data, egress.Status.EgressNode, egress.Spec.EgressIP) - if err != nil { - return false, fmt.Errorf("check ip error: %v", err) - } - return !exists, nil - }) + err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, timeout, true, + func(ctx context.Context) (done bool, err error) { + exists, err := hasIP(data, egress.Status.EgressNode, egress.Spec.EgressIP) + if err != nil { + return false, fmt.Errorf("check ip error: %v", err) + } + return !exists, nil + }) require.NoError(t, err, "Found stale IP (%s) exists on Node (%s)", egress.Spec.EgressIP, egress.Status.EgressNode) } checkEIPStatus(0) @@ -604,13 +607,14 @@ func testEgressUpdateEgressIP(t *testing.T, data *TestData) { _, err = data.checkEgressState(egress.Name, tt.newEgressIP, tt.newNode, "", time.Second) require.NoError(t, err) - err = wait.PollImmediate(200*time.Millisecond, timeout, func() (done bool, err error) { - exists, err := hasIP(data, tt.originalNode, tt.originalEgressIP) - if err != nil { - return false, fmt.Errorf("check ip error: %v", err) - } - return !exists, nil - }) + err = wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, timeout, true, + func(ctx context.Context) (done bool, err error) { + exists, err := hasIP(data, tt.originalNode, tt.originalEgressIP) + if err != nil { + return false, fmt.Errorf("check ip error: %v", err) + } + return !exists, nil + }) require.NoError(t, err, "Found stale IP (%s) exists on Node (%s)", tt.originalEgressIP, tt.originalNode) }) } @@ -835,7 +839,7 @@ func testEgressUpdateBandwidth(t *testing.T, data *TestData) { func (data *TestData) checkEgressState(egressName, expectedIP, expectedNode, otherNode string, timeout time.Duration) (*v1beta1.Egress, error) { var egress *v1beta1.Egress var expectedNodeHasIP, otherNodeHasIP bool - pollErr := wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) { + pollErr := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, timeout, true, func(ctx context.Context) (bool, error) { var err error egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egressName, metav1.GetOptions{}) if err != nil { @@ -980,16 +984,17 @@ func (data *TestData) createEgress(t *testing.T, generateName string, matchExpre } func (data *TestData) waitForEgressRealized(egress *v1beta1.Egress) (*v1beta1.Egress, error) { - err := wait.PollImmediate(200*time.Millisecond, waitEgressRealizedTimeout, func() (done bool, err error) { - egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - if egress.Spec.EgressIP == "" || egress.Status.EgressNode == "" { - return false, nil - } - return true, nil - }) + err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, waitEgressRealizedTimeout, true, + func(ctx context.Context) (done bool, err error) { + egress, err = data.crdClient.CrdV1beta1().Egresses().Get(context.TODO(), egress.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + if egress.Spec.EgressIP == "" || egress.Status.EgressNode == "" { + return false, nil + } + return true, nil + }) if err != nil { return nil, fmt.Errorf("wait for Egress %#v realized failed: %v", egress, err) } @@ -1000,25 +1005,26 @@ func (data *TestData) waitForEgressRealized(egress *v1beta1.Egress) (*v1beta1.Eg func assertClientIP(data *TestData, t *testing.T, pod, container, server string, clientIPs ...string) { var exeErr error var stdout, stderr string - err := wait.Poll(100*time.Millisecond, 5*time.Second, func() (done bool, err error) { - url := fmt.Sprintf("%s:8080/clientip", server) - stdout, stderr, exeErr = data.runWgetCommandFromTestPodWithRetry(pod, data.testNamespace, container, url, 5) - if exeErr != nil { - return false, nil - } + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, false, + func(ctx context.Context) (done bool, err error) { + url := fmt.Sprintf("%s:8080/clientip", server) + stdout, stderr, exeErr = data.runWgetCommandFromTestPodWithRetry(pod, data.testNamespace, container, url, 5) + if exeErr != nil { + return false, nil + } - // The stdout return is in this format: x.x.x.x:port or [xx:xx:xx::x]:port - host, _, err := net.SplitHostPort(stdout) - if err != nil { - return false, nil - } - for _, cip := range clientIPs { - if cip == host { - return true, nil + // The stdout return is in this format: x.x.x.x:port or [xx:xx:xx::x]:port + host, _, err := net.SplitHostPort(stdout) + if err != nil { + return false, nil } - } - return false, nil - }) + for _, cip := range clientIPs { + if cip == host { + return true, nil + } + } + return false, nil + }) require.NoError(t, err, "Failed to get expected client IPs %s for Pod %s, stdout: %s, stderr: %s, err: %v", clientIPs, pod, stdout, stderr, exeErr) } @@ -1026,7 +1032,7 @@ func assertClientIP(data *TestData, t *testing.T, pod, container, server string, func assertConnError(data *TestData, t *testing.T, pod, container, server string) { var exeErr error var stdout, stderr string - err := wait.Poll(100*time.Millisecond, 2*time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 2*time.Second, false, func(ctx context.Context) (done bool, err error) { url := fmt.Sprintf("%s:8080/clientip", server) stdout, stderr, exeErr = data.runWgetCommandFromTestPodWithRetry(pod, data.testNamespace, url, container, 5) if exeErr != nil { diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index dc9ebe048e5..db0b161dd01 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -1437,7 +1437,7 @@ func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService var recordSlices []string // In the ToExternalFlows test, flow record will arrive 5.5s (exporterActiveFlowExportTimeout+aggregatorActiveFlowRecordTimeout) after executing wget command // We set the timeout to 9s (5.5s plus one more aggregatorActiveFlowRecordTimeout) to make the ToExternalFlows test more stable - err := wait.PollImmediate(500*time.Millisecond, exporterActiveFlowExportTimeout+aggregatorActiveFlowRecordTimeout*2, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 500*time.Millisecond, exporterActiveFlowExportTimeout+aggregatorActiveFlowRecordTimeout*2, true, func(ctx context.Context) (bool, error) { var rc int var err error var cmd string @@ -1499,7 +1499,7 @@ func getClickHouseOutput(t *testing.T, data *TestData, srcIP, dstIP, srcPort str } // ClickHouse output expected to be checked after IPFIX collector. // Waiting additional 4x commit interval to be adequate for 3 commit attempts. - err := wait.PollImmediate(500*time.Millisecond, aggregatorClickHouseCommitInterval*4, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 500*time.Millisecond, aggregatorClickHouseCommitInterval*4, true, func(ctx context.Context) (bool, error) { queryOutput, _, err := data.RunCommandFromPod(flowVisibilityNamespace, clickHousePodName, "clickhouse", cmd) if err != nil { return false, err @@ -1791,7 +1791,7 @@ func addLabelToTestPods(t *testing.T, data *TestData, label string, podNames []s testPod.Labels["targetLabel"] = label _, err = data.clientset.CoreV1().Pods(data.testNamespace).Update(context.TODO(), testPod, metav1.UpdateOptions{}) require.NoErrorf(t, err, "Error when adding label to %s", testPod.Name) - err = wait.Poll(defaultInterval, timeout, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { pod, err := data.clientset.CoreV1().Pods(data.testNamespace).Get(context.TODO(), testPod.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { @@ -1863,7 +1863,7 @@ func getAndCheckFlowAggregatorMetrics(t *testing.T, data *TestData) error { } podName := flowAggPod.Name command := []string{"antctl", "get", "recordmetrics", "-o", "json"} - if err := wait.Poll(defaultInterval, 2*defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, 2*defaultTimeout, false, func(ctx context.Context) (bool, error) { stdout, _, err := runAntctl(podName, command, data) if err != nil { t.Logf("Error when requesting recordmetrics, %v", err) diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 4ec22253ca3..c873c4be2b1 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -53,7 +53,7 @@ import ( aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" "k8s.io/kubectl/pkg/util/podutils" utilnet "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "antrea.io/antrea/pkg/agent/config" crdclientset "antrea.io/antrea/pkg/client/clientset/versioned" @@ -801,7 +801,7 @@ func (data *TestData) DeleteNamespace(namespace string, timeout time.Duration) e return fmt.Errorf("error when deleting '%s' Namespace: %v", namespace, err) } if timeout >= 0 { - return wait.Poll(defaultInterval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { if ns, err := data.clientset.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { // Success @@ -865,7 +865,7 @@ func (data *TestData) deployFlowVisibilityClickHouse(o flowVisibilityTestOptions if err != nil || rc != 0 { return "", fmt.Errorf("error when deploying the ClickHouse Operator YML; %s not available on the control-plane Node", chOperatorYML) } - if err := wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), 2*time.Second, 10*time.Second, false, func(ctx context.Context) (bool, error) { rc, stdout, stderr, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply -f %s", visibilityYML)) if err != nil || rc != 0 { // ClickHouseInstallation CRD from ClickHouse Operator install bundle applied soon before @@ -888,7 +888,7 @@ func (data *TestData) deployFlowVisibilityClickHouse(o flowVisibilityTestOptions // check clickhouse service http port for service connectivity var chSvc *corev1.Service - if err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (bool, error) { chSvc, err = data.GetService(flowVisibilityNamespace, "clickhouse-clickhouse") if err != nil { return false, nil @@ -899,7 +899,7 @@ func (data *TestData) deployFlowVisibilityClickHouse(o flowVisibilityTestOptions return "", fmt.Errorf("timeout waiting for ClickHouse Service: %v", err) } - if err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (bool, error) { rc, stdout, stderr, err := testData.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("curl -Ss %s:%s", chSvc.Spec.ClusterIP, clickHouseHTTPPort)) if rc != 0 || err != nil { @@ -1075,7 +1075,7 @@ func (data *TestData) getAgentContainersRestartCount() (int, error) { // waitForAntreaDaemonSetPods waits for the K8s apiserver to report that all the Antrea Pods are // available, i.e. all the Nodes have one or more of the Antrea daemon Pod running and available. func (data *TestData) waitForAntreaDaemonSetPods(timeout time.Duration) error { - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { getDS := func(dsName string, os string) (*appsv1.DaemonSet, error) { ds, err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { @@ -1126,7 +1126,7 @@ func (data *TestData) waitForAntreaDaemonSetPods(timeout time.Duration) error { } return true, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { _, stdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", antreaNamespace)) return fmt.Errorf("antrea-agent DaemonSet not ready within %v; kubectl describe pod output: %v", defaultTimeout, stdout) } else if err != nil { @@ -1138,7 +1138,7 @@ func (data *TestData) waitForAntreaDaemonSetPods(timeout time.Duration) error { // waitForCoreDNSPods waits for the K8s apiserver to report that all the CoreDNS Pods are available. func (data *TestData) waitForCoreDNSPods(timeout time.Duration) error { - err := wait.PollImmediate(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, true, func(ctx context.Context) (bool, error) { deployment, err := data.clientset.AppsV1().Deployments("kube-system").Get(context.TODO(), "coredns", metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("error when retrieving CoreDNS deployment: %v", err) @@ -1149,7 +1149,7 @@ func (data *TestData) waitForCoreDNSPods(timeout time.Duration) error { // Keep trying return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return fmt.Errorf("some CoreDNS replicas are still unavailable after %v", defaultTimeout) } else if err != nil { return err @@ -1240,7 +1240,7 @@ func (data *TestData) deleteAntrea(timeout time.Duration) error { } return fmt.Errorf("error when trying to delete Antrea DaemonSet: %v", err) } - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { if _, err := data.clientset.AppsV1().DaemonSets(antreaNamespace).Get(context.TODO(), ds, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { // Antrea DaemonSet does not exist any more, success @@ -1448,7 +1448,7 @@ func (b *PodBuilder) Create(data *TestData) error { HostNetwork: b.HostNetwork, ServiceAccountName: b.ServiceAccountName, // Set it to 1s for immediate shutdown to reduce test run time and to avoid affecting subsequent tests. - TerminationGracePeriodSeconds: pointer.Int64(1), + TerminationGracePeriodSeconds: ptr.To(int64(1)), } if b.NodeName != "" { podSpec.NodeSelector = map[string]string{ @@ -1579,7 +1579,7 @@ func (data *TestData) DeletePodAndWait(timeout time.Duration, name string, ns st if err := data.DeletePod(ns, name); err != nil { return err } - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { if _, err := data.clientset.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return true, nil @@ -1589,7 +1589,7 @@ func (data *TestData) DeletePodAndWait(timeout time.Duration, name string, ns st // Keep trying return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return fmt.Errorf("Pod '%s' still visible to client after %v", name, timeout) } return err @@ -1601,7 +1601,7 @@ type PodCondition func(*corev1.Pod) (bool, error) // the condition predicate is met (or until the provided timeout expires). func (data *TestData) PodWaitFor(timeout time.Duration, name, namespace string, condition PodCondition) (*corev1.Pod, error) { var pod *corev1.Pod - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { var err error pod, err = data.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { @@ -1613,7 +1613,7 @@ func (data *TestData) PodWaitFor(timeout time.Duration, name, namespace string, return condition(pod) }) if err != nil { - if err == wait.ErrWaitTimeout && pod != nil { + if wait.Interrupted(err) && pod != nil { return nil, fmt.Errorf("timed out waiting for the condition, Pod.Status: %s", pod.Status.String()) } return nil, err @@ -1743,7 +1743,7 @@ func (data *TestData) deleteAntreaAgentOnNode(nodeName string, gracePeriodSecond return 0, fmt.Errorf("error when deleting antrea-agent Pods on Node '%s': %v", nodeName, err) } - if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { for _, pod := range pods.Items { if _, err := data.clientset.CoreV1().Pods(antreaNamespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { @@ -1762,7 +1762,7 @@ func (data *TestData) deleteAntreaAgentOnNode(nodeName string, gracePeriodSecond delay := time.Since(start) // wait for new antrea-agent Pod - if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return false, fmt.Errorf("failed to list antrea-agent Pods on Node '%s': %v", nodeName, err) @@ -1857,7 +1857,7 @@ func (data *TestData) restartAntreaControllerPod(timeout time.Duration) (*corev1 var newPod *corev1.Pod // wait for new antrea-controller Pod - if err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { pods, err := data.clientset.CoreV1().Pods(antreaNamespace).List(context.TODO(), listOptions) if err != nil { return false, fmt.Errorf("failed to list antrea-controller Pods: %v", err) @@ -2075,7 +2075,7 @@ func (data *TestData) deleteServiceAndWait(timeout time.Duration, name, namespac if err := data.deleteService(namespace, name); err != nil { return err } - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { if _, err := data.clientset.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { return true, nil @@ -2085,7 +2085,7 @@ func (data *TestData) deleteServiceAndWait(timeout time.Duration, name, namespac // Keep trying return false, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { return fmt.Errorf("Service '%s' still visible to client after %v", name, timeout) } return err @@ -2660,7 +2660,7 @@ func (data *TestData) killProcessAndCollectCovFiles(namespace, podName, containe } log.Infof("Copying coverage files from Pod '%s'", podName) - if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 5*time.Second, true, func(ctx context.Context) (bool, error) { if err = data.copyPodFiles(podName, containerName, namespace, covFile, covDir); err != nil { log.Infof("Coverage file not available yet for copy: %v", err) return false, nil @@ -2863,7 +2863,7 @@ func (data *TestData) createDaemonSet(name string, ns string, ctrName string, im podSpec := corev1.PodSpec{ Tolerations: controlPlaneNoScheduleTolerations(), // Set it to 1s for immediate shutdown to reduce test run time and to avoid affecting subsequent tests. - TerminationGracePeriodSeconds: pointer.Int64(1), + TerminationGracePeriodSeconds: ptr.To(int64(1)), Containers: []corev1.Container{ { Name: ctrName, @@ -2917,7 +2917,7 @@ func (data *TestData) createDaemonSet(name string, ns string, ctrName string, im } func (data *TestData) waitForDaemonSetPods(timeout time.Duration, dsName string, namespace string) error { - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { ds, err := data.clientset.AppsV1().DaemonSets(namespace).Get(context.TODO(), dsName, metav1.GetOptions{}) if err != nil { return false, err @@ -2946,7 +2946,7 @@ func (data *TestData) createStatefulSet(name string, ns string, size int32, ctrN }, }, // Set it to 1s for immediate shutdown to reduce test run time and to avoid affecting subsequent tests. - TerminationGracePeriodSeconds: pointer.Int64(1), + TerminationGracePeriodSeconds: ptr.To(int64(1)), } stsSpec := appsv1.StatefulSetSpec{ Selector: &metav1.LabelSelector{ @@ -3019,7 +3019,7 @@ func (data *TestData) restartStatefulSet(name string, ns string) (*appsv1.Statef } func (data *TestData) waitForStatefulSetPods(timeout time.Duration, stsName string, namespace string) error { - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), defaultInterval, timeout, false, func(ctx context.Context) (bool, error) { sts, err := data.clientset.AppsV1().StatefulSets(namespace).Get(context.TODO(), stsName, metav1.GetOptions{}) if err != nil { return false, err @@ -3046,7 +3046,7 @@ func retryOnConnectionLostError(backoff wait.Backoff, fn func() error) error { } func (data *TestData) checkAntreaAgentInfo(interval time.Duration, timeout time.Duration, name string) error { - err := wait.PollImmediate(interval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, true, func(ctx context.Context) (bool, error) { aai, err := data.crdClient.CrdV1beta1().AntreaAgentInfos().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { diff --git a/test/e2e/ipsec_test.go b/test/e2e/ipsec_test.go index 417a6e382a5..b972f18ecdf 100644 --- a/test/e2e/ipsec_test.go +++ b/test/e2e/ipsec_test.go @@ -15,6 +15,7 @@ package e2e import ( + "context" "fmt" "regexp" "strconv" @@ -180,9 +181,9 @@ func testIPSecDeleteStaleTunnelPorts(t *testing.T, data *TestData) { } t.Logf("Checking that tunnel port has been created") - if err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (found bool, err error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (found bool, err error) { return doesOVSPortExist(), nil - }); err == wait.ErrWaitTimeout { + }); wait.Interrupted(err) { t.Fatalf("Timed out while waiting for OVS tunnel port to be created") } else if err != nil { t.Fatalf("Error while waiting for OVS tunnel port to be created") @@ -192,9 +193,9 @@ func testIPSecDeleteStaleTunnelPorts(t *testing.T, data *TestData) { data.redeployAntrea(t, deployAntreaDefault) t.Logf("Checking that tunnel port has been deleted") - if err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (found bool, err error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (found bool, err error) { return !doesOVSPortExist(), nil - }); err == wait.ErrWaitTimeout { + }); wait.Interrupted(err) { t.Fatalf("Timed out while waiting for OVS tunnel port to be deleted") } else if err != nil { t.Fatalf("Error while waiting for OVS tunnel port to be deleted") diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index ef0720afc6d..c4c36d12415 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -938,7 +938,7 @@ func (data *TestData) CleanANNPs(namespaces []string) error { func (data *TestData) WaitForANNPCreationAndRealization(t *testing.T, namespace string, name string, timeout time.Duration) error { t.Logf("Waiting for ANNP '%s/%s' to be realized", namespace, name) - if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { annp, err := data.crdClient.CrdV1beta1().NetworkPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, nil @@ -952,7 +952,7 @@ func (data *TestData) WaitForANNPCreationAndRealization(t *testing.T, namespace func (data *TestData) WaitForACNPCreationAndRealization(t *testing.T, name string, timeout time.Duration) error { t.Logf("Waiting for ACNP '%s' to be created and realized", name) - if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { acnp, err := data.crdClient.CrdV1beta1().ClusterNetworkPolicies().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, nil diff --git a/test/e2e/l7networkpolicy_test.go b/test/e2e/l7networkpolicy_test.go index 378834c80f5..dbc8f349de7 100644 --- a/test/e2e/l7networkpolicy_test.go +++ b/test/e2e/l7networkpolicy_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" crdv1beta1 "antrea.io/antrea/pkg/apis/crd/v1beta1" agentconfig "antrea.io/antrea/pkg/config/agent" @@ -129,59 +128,42 @@ func probeL7NetworkPolicyHTTP(t *testing.T, data *TestData, serverPodName, clien baseURL := net.JoinHostPort(ip.String(), "8080") // Verify that access to path /clientip is as expected. - assert.NoError(t, wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { _, err := probeClientIPFromPod(data, clientPodName, agnhostContainerName, baseURL) - if (allowHTTPPathClientIP && err != nil) || (!allowHTTPPathClientIP && err == nil) { - return false, nil - } - return true, nil - })) + return (allowHTTPPathClientIP && err == nil) || (!allowHTTPPathClientIP && err != nil) + }, 5*time.Second, time.Second) // Verify that access to path /hostname is as expected. - assert.NoError(t, wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { hostname, err := probeHostnameFromPod(data, clientPodName, agnhostContainerName, baseURL) - if (allowHTTPPathHostname && err != nil) || (!allowHTTPPathHostname && err == nil) { - return false, nil - } - if allowHTTPPathHostname && serverPodName != hostname { - return false, nil - } - return true, nil - })) + return (allowHTTPPathHostname && err == nil && serverPodName == hostname) || (!allowHTTPPathHostname && err != nil) + }, 5*time.Second, time.Second) // For IPv4, non-HTTP connections should be rejected by Suricata. For IPv6, there is an issue that reject // packet cannot be generated by Suricata and sent back to client. if ip.To4() != nil { cmd := []string{"bash", "-c", fmt.Sprintf("dig @%s google.com a +tcp -p 8080", ip)} - assert.NoError(t, wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { stdout, _, err := data.RunCommandFromPod(data.testNamespace, clientPodName, agnhostContainerName, cmd) - // For the client Pod which is selected by the L7 NetworkPolicy, the expected output returned - // from Suricata should contain "connection reset". - if err != nil { - return false, nil - } - if !strings.Contains(stdout, fmt.Sprintf("communications error to %s#8080: connection reset", ip)) { - return false, nil - } - return true, nil - })) + return err == nil && strings.Contains(stdout, fmt.Sprintf("communications error to %s#8080: connection reset", ip)) + }, 5*time.Second, time.Second) } } } func probeL7NetworkPolicyTLS(t *testing.T, data *TestData, clientPodName string, serverName string, canAccess bool) { url := fmt.Sprintf("https://%s", serverName) - assert.NoError(t, wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + assert.Eventually(t, func() bool { stdout, stderr, err := data.runWgetCommandFromTestPodWithRetry(clientPodName, data.testNamespace, agnhostContainerName, url, 5) if canAccess && err != nil { t.Logf("Failed to access %s: %v\nStdout: %s\nStderr: %s\n", url, err, stdout, stderr) - return false, err + return false } else if !canAccess && err == nil { t.Logf("Expected not to access the server, but the request succeeded.\nStdout: %s\nStderr: %s\n", stdout, stderr) - return false, fmt.Errorf("expected not to access the server %s, but the request succeeded", url) + return false } - return true, nil - })) + return true + }, 5*time.Second, time.Second) } func testL7NetworkPolicyHTTP(t *testing.T, data *TestData) { diff --git a/test/e2e/multicast_test.go b/test/e2e/multicast_test.go index 72f6bcf8666..8134467a4d7 100644 --- a/test/e2e/multicast_test.go +++ b/test/e2e/multicast_test.go @@ -465,7 +465,7 @@ func testMulticastStatsWithSendersReceivers(t *testing.T, data *TestData, testNa } wg.Wait() - if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { for _, senderConfig := range mc.senderConfigs { stats := mc.antctlResults[senderConfig.name] t.Logf("Checking antctl get podmulticaststats result for %s", senderConfig.name) @@ -571,7 +571,7 @@ func testMulticastForwardToMultipleInterfaces(t *testing.T, data *TestData, send data.RunCommandFromPod(data.testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) }() - if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { // Check whether multicast interfaces can receive multicast traffic in the server side. // The check is needed for verifying external interfaces acting as multicast interfaces are able to forward multicast traffic. // If multicast traffic is sent from non-HostNetwork pods, all multicast interfaces in senders should receive multicast traffic. @@ -644,7 +644,7 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc readyReceivers := sets.New[int]() senderReady := false - if err := wait.Poll(3*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 3*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { if checkSenderRoute && !senderReady { // Sender pods should add an outbound multicast route except when running as HostNetwork. mRoutesResult, err := getMroutes(nodeName(mc.senderConfig.nodeIdx), gatewayInterface, mc.group.String(), strings.Join(nodeMulticastInterfaces[mc.senderConfig.nodeIdx], " ")) diff --git a/test/e2e/networkpolicy_test.go b/test/e2e/networkpolicy_test.go index 24995915d26..1a5e3199752 100644 --- a/test/e2e/networkpolicy_test.go +++ b/test/e2e/networkpolicy_test.go @@ -193,7 +193,7 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { totalSessions += sessionsPerAddressFamily } - if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, defaultTimeout, false, func(ctx context.Context) (bool, error) { var ingressStats *v1alpha1.NetworkPolicyStats for _, np := range []string{"test-networkpolicy-ingress", "test-networkpolicy-egress"} { stats, err := data.crdClient.StatsV1alpha1().NetworkPolicyStats(data.testNamespace).Get(context.TODO(), np, metav1.GetOptions{}) @@ -1133,7 +1133,7 @@ func createAndWaitForPodWithLabels(t *testing.T, data *TestData, createFunc func } func waitForAgentCondition(t *testing.T, data *TestData, podName string, conditionType v1beta1.AgentConditionType, expectedStatus corev1.ConditionStatus) { - if err := wait.Poll(defaultInterval, defaultTimeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, false, func(ctx context.Context) (bool, error) { cmds := []string{"antctl", "get", "agentinfo", "-o", "json"} t.Logf("cmds: %s", cmds) diff --git a/test/e2e/nodeportlocal_test.go b/test/e2e/nodeportlocal_test.go index 3a95ed3bf96..6aa801c38c9 100644 --- a/test/e2e/nodeportlocal_test.go +++ b/test/e2e/nodeportlocal_test.go @@ -16,6 +16,7 @@ package e2e import ( "bufio" + "context" "encoding/json" "fmt" "regexp" @@ -207,7 +208,7 @@ func checkForNPLRuleInIPTables(t *testing.T, data *TestData, r *require.Assertio cmd := []string{"iptables", "-t", "nat", "-S"} t.Logf("Verifying iptables rules %v, present: %v", rules, present) const timeout = 60 * time.Second - err := wait.Poll(time.Second, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, false, func(ctx context.Context) (bool, error) { stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) if err != nil { t.Logf("Error while checking rules in iptables: %v", err) @@ -251,7 +252,7 @@ func checkForNPLRuleInNetNat(t *testing.T, data *TestData, r *require.Assertions defaultnodeIP := "0.0.0.0" t.Logf("Verifying NetNat rules %v, present: %v", rules, present) const timeout = 60 * time.Second - err := wait.Poll(time.Second, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, false, func(ctx context.Context) (bool, error) { _, _, _, err := data.RunCommandOnNode(nodeName, "Get-NetNatStaticMapping") if err != nil { t.Logf("Error while checking NPL rules on Windows Node: %v", err) @@ -294,7 +295,7 @@ func checkForNPLRuleInNetNat(t *testing.T, data *TestData, r *require.Assertions func checkForNPLListeningSockets(t *testing.T, data *TestData, r *require.Assertions, antreaPod string, rules []nplRuleData, present bool) { t.Logf("Verifying NPL listening sockets") const timeout = 30 * time.Second - err := wait.Poll(time.Second, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, false, func(ctx context.Context) (bool, error) { for _, rule := range rules { protocolOption := "--" + rule.protocol cmd := []string{"ss", "--listening", protocolOption, "-H", "-n"} diff --git a/test/e2e/performance_test.go b/test/e2e/performance_test.go index 947fe2395ac..da64de14754 100644 --- a/test/e2e/performance_test.go +++ b/test/e2e/performance_test.go @@ -278,7 +278,7 @@ func networkPolicyRealize(policyRules int, data *TestData, b *testing.B) { } func WaitNetworkPolicyRealize(nodeName string, table *openflow.Table, policyRules int, data *TestData) error { - return wait.PollImmediate(50*time.Millisecond, *realizeTimeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.Background(), 50*time.Millisecond, *realizeTimeout, true, func(ctx context.Context) (bool, error) { return checkRealize(nodeName, table, policyRules, data) }) } diff --git a/test/e2e/prometheus_test.go b/test/e2e/prometheus_test.go index 25ce5354ba0..216bfbf0c09 100644 --- a/test/e2e/prometheus_test.go +++ b/test/e2e/prometheus_test.go @@ -143,7 +143,7 @@ func getMetricsFromAPIServer(t *testing.T, url string, token string) string { } var body []byte - err = wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (bool, error) { // Query metrics via HTTPS from Pod resp, err := client.Do(req) if err != nil { @@ -286,7 +286,7 @@ func testMetricsFromPrometheusServer(t *testing.T, data *TestData, prometheusJob client := &http.Client{} var output prometheusServerOutput - err := wait.PollImmediate(defaultInterval, defaultTimeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), defaultInterval, defaultTimeout, true, func(ctx context.Context) (bool, error) { resp, err := client.Get(queryURL) if err != nil { // Retry when accessing prometheus failed for flexible-ipam diff --git a/test/e2e/security_test.go b/test/e2e/security_test.go index 9401e5e4905..b8adff2a570 100644 --- a/test/e2e/security_test.go +++ b/test/e2e/security_test.go @@ -166,7 +166,7 @@ func testCert(t *testing.T, data *TestData, expectedCABundle string, restartPod var caBundle string var configMap *v1.ConfigMap - if err := wait.Poll(2*time.Second, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, false, func(ctx context.Context) (bool, error) { var err error configMap, err = data.clientset.CoreV1().ConfigMaps(caConfigMapNamespace).Get(context.TODO(), certificate.AntreaCAConfigMapName, metav1.GetOptions{}) if err != nil { @@ -214,7 +214,7 @@ func testCert(t *testing.T, data *TestData, expectedCABundle string, restartPod require.NoError(t, NewPodBuilder(clientName, data.testNamespace, agnhostImage).WithContainerName(getImageName(agnhostImage)).MountConfigMap(configMapCopy.Name, "/etc/config/", "config-volume").WithHostNetwork(false).Create(data)) defer data.DeletePodAndWait(defaultTimeout, clientName, data.testNamespace) require.NoError(t, data.podWaitForRunning(defaultTimeout, clientName, data.testNamespace)) - if err := wait.Poll(2*time.Second, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, false, func(ctx context.Context) (bool, error) { stdout, stderr, err := data.RunCommandFromPod(data.testNamespace, clientName, agnhostContainerName, cmd) if err != nil { t.Logf("error when running cmd: %v , stdout: <%v>, stderr: <%v>", err, stdout, stderr) @@ -241,7 +241,7 @@ func testCert(t *testing.T, data *TestData, expectedCABundle string, restartPod } // antrea-agents reconnect every 5 seconds, we expect their connections are restored in a few seconds. - if err := wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) { cmds := []string{"antctl", "get", "controllerinfo", "-o", "json"} stdout, _, err := runAntctl(antreaController.Name, cmds, data) if err != nil { diff --git a/test/e2e/service_externalip_test.go b/test/e2e/service_externalip_test.go index aaae30c6b8f..d8704995802 100644 --- a/test/e2e/service_externalip_test.go +++ b/test/e2e/service_externalip_test.go @@ -344,20 +344,21 @@ func testServiceWithExternalIPCRUD(t *testing.T, data *TestData) { checkEIPStatus := func(expectedUsed int) { var gotUsed, gotTotal int - err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (done bool, err error) { - pool, err := data.crdClient.CrdV1alpha2().ExternalIPPools().Get(context.TODO(), ipPool.Name, metav1.GetOptions{}) - if err != nil { - return false, fmt.Errorf("failed to get ExternalIPPool: %v", err) - } - gotUsed, gotTotal = pool.Status.Usage.Used, pool.Status.Usage.Total - if expectedUsed != pool.Status.Usage.Used { - return false, nil - } - if tt.expectedTotal != pool.Status.Usage.Total { - return false, nil - } - return true, nil - }) + err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 2*time.Second, true, + func(ctx context.Context) (done bool, err error) { + pool, err := data.crdClient.CrdV1alpha2().ExternalIPPools().Get(context.TODO(), ipPool.Name, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("failed to get ExternalIPPool: %v", err) + } + gotUsed, gotTotal = pool.Status.Usage.Used, pool.Status.Usage.Total + if expectedUsed != pool.Status.Usage.Used { + return false, nil + } + if tt.expectedTotal != pool.Status.Usage.Total { + return false, nil + } + return true, nil + }) require.NoError(t, err, "ExternalIPPool status not match: expectedTotal=%d, got=%d, expectedUsed=%d, got=%d", tt.expectedTotal, gotTotal, expectedUsed, gotUsed) } checkEIPStatus(1) @@ -521,7 +522,7 @@ func testServiceNodeFailure(t *testing.T, data *TestData) { expectedMigratedNode = nodeName(0) } // The Agent on the original Node is paused. Run antctl from the expected migrated Node instead. - err = wait.PollImmediate(200*time.Millisecond, 15*time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 15*time.Second, true, func(ctx context.Context) (done bool, err error) { assignedNode, err := data.getServiceAssignedNode(expectedMigratedNode, service) if err != nil { return false, nil @@ -601,7 +602,7 @@ func testExternalIPAccess(t *testing.T, data *TestData) { waitExternalIPConfigured := func(service *v1.Service) (string, string, error) { var ip string var assignedNode string - err := wait.PollImmediate(200*time.Millisecond, 5*time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (done bool, err error) { service, err = data.clientset.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -714,7 +715,7 @@ func (data *TestData) getServiceAssignedNode(node string, service *v1.Service) ( func (data *TestData) waitForServiceConfigured(service *v1.Service, expectedExternalIP string, expectedNodeName string) (*v1.Service, string, error) { var assignedNode string - err := wait.PollImmediate(200*time.Millisecond, 15*time.Second, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 15*time.Second, true, func(ctx context.Context) (done bool, err error) { service, err = data.clientset.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) if err != nil { return false, err diff --git a/test/e2e/supportbundle_test.go b/test/e2e/supportbundle_test.go index a7a51e07cd0..08633112efb 100644 --- a/test/e2e/supportbundle_test.go +++ b/test/e2e/supportbundle_test.go @@ -111,7 +111,7 @@ func getAndCheckSupportBundle(t *testing.T, name, podIP string, podPort int, tok require.Equal(t, systemv1beta1.SupportBundleStatusCollecting, bundle.Status) // Waiting for the generation to be completed. ddl := time.After(defaultTimeout) - err = wait.PollImmediateUntil(200*time.Millisecond, func() (done bool, err error) { + err = wait.PollUntilContextCancel(context.TODO(), 200*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { select { case <-ddl: return false, fmt.Errorf("collecting timeout") @@ -120,7 +120,7 @@ func getAndCheckSupportBundle(t *testing.T, name, podIP string, podPort int, tok bundle, err = clients.SystemV1beta1().SupportBundles().Get(context.TODO(), name, metav1.GetOptions{}) require.NoError(t, err) return bundle.Status == systemv1beta1.SupportBundleStatusCollected, nil - }, nil) + }) require.NoError(t, err) // Checking the complete status. bundle, err = clients.SystemV1beta1().SupportBundles().Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/test/e2e/traceflow_test.go b/test/e2e/traceflow_test.go index 98bc03e52f4..f8df547836e 100644 --- a/test/e2e/traceflow_test.go +++ b/test/e2e/traceflow_test.go @@ -2315,7 +2315,7 @@ func (data *TestData) waitForTraceflow(t *testing.T, name string, phase v1beta1. var tf *v1beta1.Traceflow var err error timeout := 15 * time.Second - if err = wait.PollImmediate(defaultInterval, timeout, func() (bool, error) { + if err = wait.PollUntilContextTimeout(context.Background(), defaultInterval, timeout, true, func(ctx context.Context) (bool, error) { tf, err = data.crdClient.CrdV1beta1().Traceflows().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil || tf.Status.Phase != phase { return false, nil @@ -2442,7 +2442,7 @@ func (data *TestData) waitForNetworkpolicyRealized(pod string, node string, isWi if npType == v1beta2.AntreaNetworkPolicy { npOption = "ANNP" } - if err := wait.Poll(200*time.Millisecond, 5*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 200*time.Millisecond, 5*time.Second, false, func(ctx context.Context) (bool, error) { var stdout, stderr string var err error if isWindows { @@ -2458,7 +2458,7 @@ func (data *TestData) waitForNetworkpolicyRealized(pod string, node string, isWi return false, fmt.Errorf("Error when executing antctl get NetworkPolicy, stdout: %s, stderr: %s, err: %v", stdout, stderr, err) } return strings.Contains(stdout, fmt.Sprintf("%s:%s/%s", npType, data.testNamespace, networkpolicy)), nil - }); err == wait.ErrWaitTimeout { + }); wait.Interrupted(err) { return fmt.Errorf("NetworkPolicy %s isn't realized in time", networkpolicy) } else if err != nil { return err diff --git a/test/e2e/vmagent_test.go b/test/e2e/vmagent_test.go index 9b5551d9007..94bbf55cd54 100644 --- a/test/e2e/vmagent_test.go +++ b/test/e2e/vmagent_test.go @@ -84,14 +84,14 @@ func TestVMAgent(t *testing.T) { func (data *TestData) waitForDeploymentReady(t *testing.T, namespace string, name string, timeout time.Duration) error { t.Logf("Waiting for Deployment '%s/%s' to be ready", namespace, name) - err := wait.Poll(1*time.Second, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, timeout, false, func(ctx context.Context) (bool, error) { dp, err := data.clientset.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return false, err } return dp.Status.ObservedGeneration == dp.Generation && dp.Status.ReadyReplicas == *dp.Spec.Replicas, nil }) - if err == wait.ErrWaitTimeout { + if wait.Interrupted(err) { _, stdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod -l app=sftp", namespace)) return fmt.Errorf("some replicas for Deployment '%s/%s' are not ready after %v:\n%v", namespace, name, timeout, stdout) } else if err != nil { @@ -103,7 +103,7 @@ func (data *TestData) waitForDeploymentReady(t *testing.T, namespace string, nam func (data *TestData) waitForSupportBundleCollectionRealized(t *testing.T, name string, timeout time.Duration) error { t.Logf("Waiting for SupportBundleCollection '%s' to be realized", name) var sbc *crdv1alpha1.SupportBundleCollection - if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) { var getErr error sbc, getErr = data.crdClient.CrdV1alpha1().SupportBundleCollections().Get(context.TODO(), name, metav1.GetOptions{}) if getErr != nil { @@ -265,7 +265,7 @@ func setupVMAgentTest(t *testing.T, data *TestData) ([]vmInfo, error) { // and verifies uplink configuration is restored. func teardownVMAgentTest(t *testing.T, data *TestData, vmList []vmInfo) { verifyUpLinkAfterCleanup := func(vm vmInfo) { - err := wait.PollImmediate(10*time.Second, 1*time.Minute, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 1*time.Minute, true, func(ctx context.Context) (done bool, err error) { var tempVM vmInfo if vm.osType == linuxOS { tempVM = getVMInfo(t, data, vm.nodeName) @@ -294,7 +294,7 @@ func teardownVMAgentTest(t *testing.T, data *TestData, vmList []vmInfo) { } func verifyExternalEntityExistence(t *testing.T, data *TestData, eeName string, vmNodeName string, expectExists bool) { - if err := wait.PollImmediate(10*time.Second, 1*time.Minute, func() (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 1*time.Minute, true, func(ctx context.Context) (done bool, err error) { t.Logf("Verifying ExternalEntity %s, expectExists %t", eeName, expectExists) _, err = data.crdClient.CrdV1alpha2().ExternalEntities(namespace).Get(context.TODO(), eeName, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { @@ -325,7 +325,7 @@ func verifyExternalEntityExistence(t *testing.T, data *TestData, eeName string, func testExternalNode(t *testing.T, data *TestData, vmList []vmInfo) { verifyExternalNodeRealization := func(vm vmInfo) { - err := wait.PollImmediate(10*time.Second, 1*time.Minute, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 1*time.Minute, true, func(ctx context.Context) (done bool, err error) { t.Logf("Verify host interface configuration for VM: %s", vm.nodeName) exists, err := verifyInterfaceIsInOVS(t, data, vm) return exists, err @@ -665,7 +665,7 @@ func runPingCommandOnVM(data *TestData, dstVM vmInfo, connected bool) error { expOutput := fmt.Sprintf("%d packets transmitted, %d received", pingCount, expCount) // Use master Node to run ping command. pingClient := nodeName(0) - err := wait.PollImmediate(time.Second*5, time.Second*20, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second*5, time.Second*20, true, func(ctx context.Context) (done bool, err error) { if err := runCommandAndCheckResult(data, pingClient, cmdStr, expOutput, ""); err != nil { return false, nil } @@ -676,7 +676,7 @@ func runPingCommandOnVM(data *TestData, dstVM vmInfo, connected bool) error { func runIperfCommandOnVMs(t *testing.T, data *TestData, srcVM vmInfo, dstVM vmInfo, connected bool, isUDP bool, ruleAction crdv1beta1.RuleAction) error { svrIP := net.ParseIP(dstVM.ip) - err := wait.PollImmediate(time.Second*5, time.Second*20, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second*5, time.Second*20, true, func(ctx context.Context) (done bool, err error) { if err := runIperfClient(t, data, srcVM, svrIP, iperfPort, isUDP, connected, ruleAction); err != nil { return false, nil } @@ -769,7 +769,7 @@ func runCurlCommandOnVM(data *TestData, targetVM vmInfo, url string, action crdv case crdv1beta1.RuleActionReject: expectedErr = "Connection refused" } - err := wait.PollImmediate(time.Second*5, time.Second*20, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(context.Background(), time.Second*5, time.Second*20, true, func(ctx context.Context) (done bool, err error) { if err := runCommandAndCheckResult(data, targetVM.nodeName, cmdStr, expectedOutput, expectedErr); err != nil { return false, nil } diff --git a/test/integration/agent/route_test.go b/test/integration/agent/route_test.go index 974fb3f7919..63ed6adcab8 100644 --- a/test/integration/agent/route_test.go +++ b/test/integration/agent/route_test.go @@ -18,6 +18,7 @@ package agent import ( + "context" "fmt" "net" "os" @@ -472,7 +473,7 @@ func TestSyncGatewayKernelRoute(t *testing.T) { listCmd := fmt.Sprintf("ip route show table 0 exact %s", podCIDR) - err = wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 2*time.Second, true, func(ctx context.Context) (done bool, err error) { expOutput, err := exec.Command("bash", "-c", listCmd).Output() if err != nil { return false, err @@ -490,7 +491,7 @@ func TestSyncGatewayKernelRoute(t *testing.T) { route.SyncInterval = 2 * time.Second go routeClient.Run(stopCh) - err = wait.Poll(1*time.Second, 2*route.SyncInterval, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 2*route.SyncInterval, false, func(ctx context.Context) (done bool, err error) { expOutput, err := exec.Command("bash", "-c", listCmd).Output() if err != nil { return false, err diff --git a/test/integration/ovs/ofctrl_test.go b/test/integration/ovs/ofctrl_test.go index 0faa93f50d4..b7c92d35851 100644 --- a/test/integration/ovs/ofctrl_test.go +++ b/test/integration/ovs/ofctrl_test.go @@ -15,6 +15,7 @@ package ovs import ( + "context" "fmt" "net" "strings" @@ -240,11 +241,12 @@ func TestOFctrlFlow(t *testing.T) { if err != nil { t.Errorf("Failed to DeleteFlowsByCookie: %v", err) } - require.NoError(t, wait.PollImmediate(time.Millisecond*100, time.Second, func() (done bool, err error) { - flowList, err = OfctlDumpTableFlowsWithoutName(ovsCtlClient, myTable.GetID()) - require.Nil(t, err) - return len(flowList) == 0, nil - }), "Failed to delete flows by CookieID") + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), time.Millisecond*100, time.Second, true, + func(ctx context.Context) (done bool, err error) { + flowList, err = OfctlDumpTableFlowsWithoutName(ovsCtlClient, myTable.GetID()) + require.Nil(t, err) + return len(flowList) == 0, nil + }), "Failed to delete flows by CookieID") } } @@ -312,11 +314,12 @@ func TestOFctrlGroup(t *testing.T) { // Check if the group could be added. require.Nil(t, group.Add()) var groups [][]string - require.NoError(t, wait.PollImmediate(openFlowCheckInterval, openFlowCheckTimeout, func() (done bool, err error) { - groups, err = OfCtlDumpGroups(ovsCtlClient) - require.Nil(t, err) - return len(groups) == 1, nil - }), "Failed to install group") + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), openFlowCheckInterval, openFlowCheckTimeout, true, + func(ctx context.Context) (done bool, err error) { + groups, err = OfCtlDumpGroups(ovsCtlClient) + require.Nil(t, err) + return len(groups) == 1, nil + }), "Failed to install group") dumpedGroup := groups[0] for i, bucket := range buckets { if name == "Normal" { @@ -342,11 +345,12 @@ func TestOFctrlGroup(t *testing.T) { } // Check if the group could be deleted. require.Nil(t, group.Delete()) - require.NoError(t, wait.PollImmediate(openFlowCheckInterval, openFlowCheckTimeout, func() (done bool, err error) { - groups, err = OfCtlDumpGroups(ovsCtlClient) - require.Nil(t, err) - return len(groups) == 0, nil - }), "Failed to delete group") + require.NoError(t, wait.PollUntilContextTimeout(context.Background(), openFlowCheckInterval, openFlowCheckTimeout, true, + func(ctx context.Context) (done bool, err error) { + groups, err = OfCtlDumpGroups(ovsCtlClient) + require.Nil(t, err) + return len(groups) == 0, nil + }), "Failed to delete group") }) id++ } diff --git a/test/integration/ovs/openflow_test_utils.go b/test/integration/ovs/openflow_test_utils.go index 81b3abbc811..168cd42eab7 100644 --- a/test/integration/ovs/openflow_test_utils.go +++ b/test/integration/ovs/openflow_test_utils.go @@ -15,6 +15,7 @@ package ovs import ( + "context" "fmt" "os/exec" "strings" @@ -71,7 +72,7 @@ func CheckFlowExists(t *testing.T, ovsCtlClient ovsctl.OVSCtlClient, tableName s if table == "" { table = fmt.Sprintf("%d", tableID) } - if err := wait.PollImmediate(openFlowCheckInterval, openFlowCheckTimeout, func() (done bool, err error) { + if err := wait.PollUntilContextTimeout(context.TODO(), openFlowCheckInterval, openFlowCheckTimeout, true, func(ctx context.Context) (done bool, err error) { unexpectedFlows = unexpectedFlows[:0] if tableName != "" { flowList, err = OfctlDumpTableFlows(ovsCtlClient, tableName) @@ -108,25 +109,26 @@ func CheckGroupExists(t *testing.T, ovsCtlClient ovsctl.OVSCtlClient, groupID bi } groupStr := fmt.Sprintf("group_id=%d,type=%s,%s", groupID, groupType, strings.Join(bucketStrs, ",")) var groupList [][]string - if err := wait.PollImmediate(openFlowCheckInterval, openFlowCheckTimeout, func() (done bool, err error) { - groupList, err = OfCtlDumpGroups(ovsCtlClient) - require.NoError(t, err, "Error dumping groups") - found := false - for _, groupElems := range groupList { - groupEntry := fmt.Sprintf("%s,bucket=", groupElems[0]) - var groupElemStrs []string - for _, elem := range groupElems[1:] { - elemStr := strings.Join(strings.Split(elem, ",")[1:], ",") - groupElemStrs = append(groupElemStrs, elemStr) + if err := wait.PollUntilContextTimeout(context.TODO(), openFlowCheckInterval, openFlowCheckTimeout, true, + func(ctx context.Context) (done bool, err error) { + groupList, err = OfCtlDumpGroups(ovsCtlClient) + require.NoError(t, err, "Error dumping groups") + found := false + for _, groupElems := range groupList { + groupEntry := fmt.Sprintf("%s,bucket=", groupElems[0]) + var groupElemStrs []string + for _, elem := range groupElems[1:] { + elemStr := strings.Join(strings.Split(elem, ",")[1:], ",") + groupElemStrs = append(groupElemStrs, elemStr) + } + groupEntry = fmt.Sprintf("%s%s", groupEntry, strings.Join(groupElemStrs, ",bucket=")) + if strings.Contains(groupEntry, groupStr) { + found = true + break + } } - groupEntry = fmt.Sprintf("%s%s", groupEntry, strings.Join(groupElemStrs, ",bucket=")) - if strings.Contains(groupEntry, groupStr) { - found = true - break - } - } - return found == expectFound, nil - }); err != nil { + return found == expectFound, nil + }); err != nil { if expectFound { t.Errorf("Failed to install group: %s", groupStr) } else { diff --git a/third_party/ipam/nodeipam/ipam/cidr_allocator.go b/third_party/ipam/nodeipam/ipam/cidr_allocator.go index 06bab81ad10..4b0fa301fa9 100644 --- a/third_party/ipam/nodeipam/ipam/cidr_allocator.go +++ b/third_party/ipam/nodeipam/ipam/cidr_allocator.go @@ -41,7 +41,7 @@ import ( "net" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -144,18 +144,19 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { var nodeList *v1.NodeList // We must poll because apiserver might not be up. This error causes // controller manager to restart. - if pollErr := wait.Poll(nodePollInterval, apiserverStartupGracePeriod, func() (bool, error) { - var err error - nodeList, err = kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ - FieldSelector: fields.Everything().String(), - LabelSelector: labels.Everything().String(), - }) - if err != nil { - klog.Errorf("Failed to list all nodes: %v", err) - return false, nil - } - return true, nil - }); pollErr != nil { + if pollErr := wait.PollUntilContextTimeout(context.TODO(), nodePollInterval, apiserverStartupGracePeriod, false, + func(ctx context.Context) (bool, error) { + var err error + nodeList, err = kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ + FieldSelector: fields.Everything().String(), + LabelSelector: labels.Everything().String(), + }) + if err != nil { + klog.Errorf("Failed to list all nodes: %v", err) + return false, nil + } + return true, nil + }); pollErr != nil { return nil, fmt.Errorf("failed to list all nodes in %v, cannot proceed without updating CIDR map", apiserverStartupGracePeriod) } diff --git a/third_party/proxy/service.go b/third_party/proxy/service.go index 53c8c72c745..625a63804b5 100644 --- a/third_party/proxy/service.go +++ b/third_party/proxy/service.go @@ -244,12 +244,11 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic internalPolicyLocal: internalPolicyLocal, internalTrafficPolicy: service.Spec.InternalTrafficPolicy, } - // TODO: Switch to v1.DeprecatedAnnotationTopologyAwareHints and v1.AnnotationTopologyMode after - // upgrading Antrea K8s API to at least 1.27 + var ok bool - info.hintsAnnotation, ok = service.Annotations[v1.AnnotationTopologyAwareHints] + info.hintsAnnotation, ok = service.Annotations[v1.DeprecatedAnnotationTopologyAwareHints] if !ok { - info.hintsAnnotation, _ = service.Annotations["service.kubernetes.io/topology-mode"] + info.hintsAnnotation = service.Annotations[v1.AnnotationTopologyMode] } loadBalancerSourceRanges := make([]string, len(service.Spec.LoadBalancerSourceRanges)) diff --git a/third_party/proxy/types.go b/third_party/proxy/types.go index 85632ee6425..4ac809e148b 100644 --- a/third_party/proxy/types.go +++ b/third_party/proxy/types.go @@ -108,7 +108,7 @@ type ServicePort interface { InternalPolicyLocal() bool // InternalTrafficPolicy returns service InternalTrafficPolicy InternalTrafficPolicy() *v1.ServiceInternalTrafficPolicyType - // HintsAnnotation returns the value of the v1.AnnotationTopologyAwareHints annotation or + // HintsAnnotation returns the value of the v1.DeprecatedAnnotationTopologyAwareHints annotation or // service.kubernetes.io/topology-mode annotation. HintsAnnotation() string // ExternallyAccessible returns true if the service port is reachable via something