diff --git a/e2e/genpolicy/genpolicy_test.go b/e2e/genpolicy/genpolicy_test.go index 8939460e2..854f4c685 100644 --- a/e2e/genpolicy/genpolicy_test.go +++ b/e2e/genpolicy/genpolicy_test.go @@ -66,7 +66,7 @@ func TestGenpolicy(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) t.Cleanup(cancel) - require.NoError(t, ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, name)) + require.NoError(t, ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, name)) }) } } diff --git a/e2e/getdents/getdents_test.go b/e2e/getdents/getdents_test.go index c90158b99..4b01cffad 100644 --- a/e2e/getdents/getdents_test.go +++ b/e2e/getdents/getdents_test.go @@ -76,7 +76,7 @@ func TestGetDEnts(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(30*time.Second)) defer cancel() - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, getdent)) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, getdent)) pods, err := ct.Kubeclient.PodsFromDeployment(ctx, ct.Namespace, getdent) require.NoError(err) diff --git a/e2e/internal/contrasttest/contrasttest.go b/e2e/internal/contrasttest/contrasttest.go index d51260a51..4b790ccff 100644 --- a/e2e/internal/contrasttest/contrasttest.go +++ b/e2e/internal/contrasttest/contrasttest.go @@ -313,15 +313,15 @@ func (ct *ContrastTest) installRuntime(t *testing.T) { require.NoError(ct.Kubeclient.Apply(ctx, unstructuredResources...)) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.DaemonSet{}, ct.Namespace, "contrast-node-installer")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.DaemonSet{}, ct.Namespace, "contrast-node-installer")) } // runAgainstCoordinator forwards the coordinator port and executes the command against it. func (ct *ContrastTest) runAgainstCoordinator(ctx context.Context, cmd *cobra.Command, args ...string) error { - if err := ct.Kubeclient.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"); err != nil { + if err := ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"); err != nil { return fmt.Errorf("waiting for coordinator: %w", err) } - if err := ct.Kubeclient.WaitFor(ctx, kubeclient.Pod{}, ct.Namespace, "port-forwarder-coordinator"); err != nil { + if err := ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Pod{}, ct.Namespace, "port-forwarder-coordinator"); err != nil { return fmt.Errorf("waiting for port-forwarder-coordinator: %w", err) } diff --git a/e2e/internal/kubeclient/deploy.go b/e2e/internal/kubeclient/deploy.go index 464f6c65e..05c03d069 100644 --- a/e2e/internal/kubeclient/deploy.go +++ b/e2e/internal/kubeclient/deploy.go @@ -22,6 +22,17 @@ import ( "k8s.io/client-go/kubernetes" ) +// WaitCondition is an enum type for the possible wait conditions when using `kubeclient.WaitFor`. +type WaitCondition int + +const ( + _ WaitCondition = iota + // Ready waits until the resource becomes ready. + Ready + // InitContainersRunning waits until all initial containers of all pods of the resource are running. + InitContainersRunning +) + // ResourceWaiter is implemented by resources that can be waited for with WaitFor. type ResourceWaiter interface { kind() string @@ -161,12 +172,62 @@ func (c *Kubeclient) WaitForPod(ctx context.Context, namespace, name string) err } } +func (c *Kubeclient) checkIfReady(ctx context.Context, name string, namespace string, evt watch.Event, resource ResourceWaiter) (bool, error) { + switch evt.Type { + case watch.Added: + fallthrough + case watch.Modified: + pods, err := resource.getPods(ctx, c, namespace, name) + if err != nil { + return false, err + } + numPodsReady := 0 + for _, pod := range pods { + if isPodReady(&pod) { + numPodsReady++ + } + } + desiredPods, err := resource.numDesiredPods(evt.Object) + if err != nil { + return false, err + } + if desiredPods <= numPodsReady { + // Wait for 5 more seconds just to be *really* sure that + // the pods are actually up. + sleep, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + <-sleep.Done() + return true, nil + } + case watch.Deleted: + return false, fmt.Errorf("%s %s/%s was deleted while waiting for it", resource.kind(), namespace, name) + default: + return false, fmt.Errorf("unexpected watch event while waiting for %s %s/%s: type=%s, object=%#v", resource.kind(), namespace, name, evt.Type, evt.Object) + } + return false, nil +} + +func (c *Kubeclient) checkIfRunning(ctx context.Context, name string, namespace string, resource ResourceWaiter) (bool, error) { + pods, err := resource.getPods(ctx, c, namespace, name) + if err != nil { + return false, err + } + for _, pod := range pods { + // check if all containers in the pod are running + containers := pod.Status.InitContainerStatuses + + for _, container := range containers { + if container.State.Running == nil { + return false, nil + } + } + } + return true, nil +} + // WaitFor watches the given resource kind and blocks until the desired number of pods are // ready or the context expires (is cancelled or times out). -func (c *Kubeclient) WaitFor(ctx context.Context, resource ResourceWaiter, namespace, name string) error { - logger := c.log.With("namespace", namespace) - logger.Info(fmt.Sprintf("Waiting for %s %s/%s to become ready", resource.kind(), namespace, name)) - +func (c *Kubeclient) WaitFor(ctx context.Context, condition WaitCondition, resource ResourceWaiter, namespace, name string) error { // When the node-installer restarts K3s, the watcher fails. The watcher has // a retry loop internally, but it only retries starting the request, once // it has established a request and that request dies spuriously, the @@ -201,7 +262,8 @@ retryLoop: } return fmt.Errorf("watcher for %s %s/%s unexpectedly closed", resource.kind(), namespace, name) } - logger.Error("resource did not become ready", "kind", resource, "name", name, "contextErr", ctx.Err()) + logger := c.log.With("namespace", namespace) + logger.Error("failed to wait for resource", "condition", condition, "kind", resource, "name", name, "contextErr", ctx.Err()) if ctx.Err() != context.DeadlineExceeded { return ctx.Err() } @@ -220,36 +282,23 @@ retryLoop: } return origErr } - switch evt.Type { - case watch.Added: - fallthrough - case watch.Modified: - pods, err := resource.getPods(ctx, c, namespace, name) + switch condition { + case Ready: + ready, err := c.checkIfReady(ctx, name, namespace, evt, resource) if err != nil { return err } - numPodsReady := 0 - for _, pod := range pods { - if isPodReady(&pod) { - numPodsReady++ - } + if ready { + return nil } - desiredPods, err := resource.numDesiredPods(evt.Object) + case InitContainersRunning: + running, err := c.checkIfRunning(ctx, name, namespace, resource) if err != nil { return err } - if desiredPods <= numPodsReady { - // Wait for 5 more seconds just to be *really* sure that - // the pods are actually up. - sleep, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - <-sleep.Done() + if running { return nil } - case watch.Deleted: - return fmt.Errorf("%s %s/%s was deleted while waiting for it", resource.kind(), namespace, name) - default: - return fmt.Errorf("unexpected watch event while waiting for %s %s/%s: type=%s, object=%#v", resource.kind(), namespace, name, evt.Type, evt.Object) } } } diff --git a/e2e/internal/kubeclient/kubeclient.go b/e2e/internal/kubeclient/kubeclient.go index 0ecf675e6..36e8a4c5f 100644 --- a/e2e/internal/kubeclient/kubeclient.go +++ b/e2e/internal/kubeclient/kubeclient.go @@ -171,7 +171,7 @@ func (c *Kubeclient) Exec(ctx context.Context, namespace, pod string, argv []str // ExecDeployment executes a process in one of the deployment's pods. func (c *Kubeclient) ExecDeployment(ctx context.Context, namespace, deployment string, argv []string) (stdout string, stderr string, err error) { - if err := c.WaitFor(ctx, Deployment{}, namespace, deployment); err != nil { + if err := c.WaitFor(ctx, Ready, Deployment{}, namespace, deployment); err != nil { return "", "", fmt.Errorf("deployment not ready: %w", err) } diff --git a/e2e/openssl/openssl_test.go b/e2e/openssl/openssl_test.go index adfaf345d..a69728b01 100644 --- a/e2e/openssl/openssl_test.go +++ b/e2e/openssl/openssl_test.go @@ -73,7 +73,7 @@ func TestOpenSSL(t *testing.T) { require := require.New(t) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) frontendPods, err := ct.Kubeclient.PodsFromDeployment(ctx, ct.Namespace, opensslFrontend) require.NoError(err) @@ -98,8 +98,8 @@ func TestOpenSSL(t *testing.T) { require := require.New(t) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Pod{}, ct.Namespace, "port-forwarder-openssl-frontend")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Pod{}, ct.Namespace, "port-forwarder-openssl-frontend")) require.NoError(ct.Kubeclient.WithForwardedPort(ctx, ct.Namespace, "port-forwarder-openssl-frontend", "443", func(addr string) error { dialer := &tls.Dialer{Config: &tls.Config{RootCAs: pool}} @@ -121,7 +121,7 @@ func TestOpenSSL(t *testing.T) { c := kubeclient.NewForTest(t) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) // Call the backend server from the frontend. If this command produces no TLS error, we verified that // - the certificate in the frontend pod can be used as a client certificate @@ -164,7 +164,7 @@ func TestOpenSSL(t *testing.T) { // Restart one deployment so it has the new certificates. require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentToRestart)) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentToRestart)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, deploymentToRestart)) // This should not succeed because the certificates have changed. stdout, stderr, err := c.ExecDeployment(ctx, ct.Namespace, opensslFrontend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-backend:443", meshCAFile)}) @@ -184,7 +184,7 @@ func TestOpenSSL(t *testing.T) { d = opensslFrontend } require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, d)) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, d)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, d)) // This should succeed since both workloads now have updated certificates. stdout, stderr, err = c.ExecDeployment(ctx, ct.Namespace, opensslFrontend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-backend:443", meshCAFile)}) @@ -202,7 +202,7 @@ func TestOpenSSL(t *testing.T) { c := kubeclient.NewForTest(t) require.NoError(c.Restart(ctx, kubeclient.StatefulSet{}, ct.Namespace, "coordinator")) - require.NoError(c.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "coordinator")) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "coordinator")) // TODO(freax13): The following verify sometimes fails spuriously due to // connection issues. Waiting a little bit longer makes @@ -216,7 +216,7 @@ func TestOpenSSL(t *testing.T) { require.True(t.Run("contrast verify", ct.Verify)) require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) t.Run("root CA is still accepted after coordinator recovery", func(t *testing.T) { stdout, stderr, err := c.ExecDeployment(ctx, ct.Namespace, opensslBackend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-frontend:443", rootCAFile)}) @@ -232,7 +232,7 @@ func TestOpenSSL(t *testing.T) { }) require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) t.Run("mesh CA after coordinator recovery is accepted when workloads are restarted", func(t *testing.T) { stdout, stderr, err := c.ExecDeployment(ctx, ct.Namespace, opensslBackend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-frontend:443", meshCAFile)}) diff --git a/e2e/policy/policy_test.go b/e2e/policy/policy_test.go index 773a6fe62..dc4eccd94 100644 --- a/e2e/policy/policy_test.go +++ b/e2e/policy/policy_test.go @@ -74,8 +74,8 @@ func TestPolicy(t *testing.T) { c := kubeclient.NewForTest(t) t.Log("Waiting for deployments") - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) // get the attestation failures before removing a policy initialFailures := getFailures(ctx, t, ct) @@ -128,18 +128,10 @@ func TestPolicy(t *testing.T) { // restart the deployments require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) // not waiting since it would fail require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend)) // wait for the init container of the openssl-frontend pod to enter the running state - ready := false - for !ready { - time.Sleep(1 * time.Second) - pods, err := ct.Kubeclient.PodsFromDeployment(ctx, ct.Namespace, opensslFrontend) - require.NoError(err) - require.NotEmpty(pods, "pod not found: %s/%s", ct.Namespace, opensslFrontend) - require.NotEmpty(pods[0].Status.InitContainerStatuses, "pod doesn't contain init container statuses: %s/%s", ct.Namespace, opensslFrontend) - ready = pods[0].Status.InitContainerStatuses[0].State.Running != nil - } + require.NoError(c.WaitFor(ctx, kubeclient.InitContainersRunning, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) newFailures := getFailures(ctx, t, ct) t.Log("New failures:", newFailures) // errors should happen diff --git a/e2e/regression/regression_test.go b/e2e/regression/regression_test.go index 5708cbe9d..e5461754d 100644 --- a/e2e/regression/regression_test.go +++ b/e2e/regression/regression_test.go @@ -91,7 +91,7 @@ func TestRegression(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(3*time.Minute)) defer cancel() - require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentName)) + require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, deploymentName)) }) } } diff --git a/e2e/release/release_test.go b/e2e/release/release_test.go index 4d20622e2..78f8a8e36 100644 --- a/e2e/release/release_test.go +++ b/e2e/release/release_test.go @@ -105,7 +105,7 @@ func TestRelease(t *testing.T) { require.NoError(err) require.NoError(k.Apply(ctx, resources...)) - require.NoError(k.WaitFor(ctx, kubeclient.DaemonSet{}, "kube-system", "contrast-node-installer")) + require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.DaemonSet{}, "kube-system", "contrast-node-installer")) }), "the runtime is required for subsequent tests to run") var coordinatorIP string @@ -120,7 +120,7 @@ func TestRelease(t *testing.T) { require.NoError(err) require.NoError(k.Apply(ctx, resources...)) - require.NoError(k.WaitFor(ctx, kubeclient.StatefulSet{}, "default", "coordinator")) + require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, "default", "coordinator")) coordinatorIP, err = k.WaitForService(ctx, "default", "coordinator", hasLoadBalancer) require.NoError(err) }), "the coordinator is required for subsequent tests to run") @@ -175,10 +175,10 @@ func TestRelease(t *testing.T) { require.NoError(k.Apply(ctx, resources...)) } - require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "vote-bot")) - require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "voting")) - require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "emoji")) - require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "web")) + require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "vote-bot")) + require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "voting")) + require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "emoji")) + require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "web")) }), "applying the demo is required for subsequent tests to run") t.Run("test-demo", func(t *testing.T) { diff --git a/e2e/servicemesh/servicemesh_test.go b/e2e/servicemesh/servicemesh_test.go index c4a519898..20068f332 100644 --- a/e2e/servicemesh/servicemesh_test.go +++ b/e2e/servicemesh/servicemesh_test.go @@ -65,11 +65,10 @@ func TestIngressEgress(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(1*time.Minute)) defer cancel() - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "vote-bot")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "emoji")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "voting")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "web")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Pod{}, ct.Namespace, "port-forwarder-web-svc")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "vote-bot")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "emoji")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "voting")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "web")) }), "deployments need to be ready for subsequent tests") certs := map[string]*x509.CertPool{ diff --git a/e2e/volumestatefulset/volumestatefulset_test.go b/e2e/volumestatefulset/volumestatefulset_test.go index c8488728e..e9bd93956 100644 --- a/e2e/volumestatefulset/volumestatefulset_test.go +++ b/e2e/volumestatefulset/volumestatefulset_test.go @@ -61,7 +61,7 @@ func TestVolumeStatefulSet(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester")) }), "deployments need to be ready for subsequent tests") filePath := "/srv/state/test" @@ -93,7 +93,7 @@ func TestVolumeStatefulSet(t *testing.T) { defer cancel() require.NoError(ct.Kubeclient.Restart(ctx, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester")) pods, err := ct.Kubeclient.PodsFromOwner(ctx, ct.Namespace, "StatefulSet", "volume-tester") require.NoError(err) diff --git a/e2e/workloadsecret/workloadsecret_test.go b/e2e/workloadsecret/workloadsecret_test.go index 6d5a830be..8257558f2 100644 --- a/e2e/workloadsecret/workloadsecret_test.go +++ b/e2e/workloadsecret/workloadsecret_test.go @@ -64,10 +64,10 @@ func TestWorkloadSecrets(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(1*time.Minute)) defer cancel() - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "vote-bot")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "emoji")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "voting")) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "web")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "vote-bot")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "emoji")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "voting")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "web")) }), "deployments need to be ready for subsequent tests") // Scale web deployment to 2 replicas. @@ -78,7 +78,7 @@ func TestWorkloadSecrets(t *testing.T) { defer cancel() require.NoError(ct.Kubeclient.ScaleDeployment(ctx, ct.Namespace, "web", 2)) - require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "web")) + require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "web")) }), "web deployment needs to be scaled for subsequent tests") var webWorkloadSecretBytes []byte