Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e: add ability to wait for different conditions in kubeclient.WaitFor #888

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
2 changes: 1 addition & 1 deletion e2e/genpolicy/genpolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func TestGenpolicy(t *testing.T) {

ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
t.Cleanup(cancel)
require.NoError(t, ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, name))
require.NoError(t, ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, name))
})
}
}
Expand Down
2 changes: 1 addition & 1 deletion e2e/getdents/getdents_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func TestGetDEnts(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(30*time.Second))
defer cancel()

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, getdent))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, getdent))

pods, err := ct.Kubeclient.PodsFromDeployment(ctx, ct.Namespace, getdent)
require.NoError(err)
Expand Down
6 changes: 3 additions & 3 deletions e2e/internal/contrasttest/contrasttest.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,15 +313,15 @@ func (ct *ContrastTest) installRuntime(t *testing.T) {

require.NoError(ct.Kubeclient.Apply(ctx, unstructuredResources...))

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.DaemonSet{}, ct.Namespace, "contrast-node-installer"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.DaemonSet{}, ct.Namespace, "contrast-node-installer"))
}

// runAgainstCoordinator forwards the coordinator port and executes the command against it.
func (ct *ContrastTest) runAgainstCoordinator(ctx context.Context, cmd *cobra.Command, args ...string) error {
if err := ct.Kubeclient.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"); err != nil {
if err := ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"); err != nil {
return fmt.Errorf("waiting for coordinator: %w", err)
}
if err := ct.Kubeclient.WaitFor(ctx, kubeclient.Pod{}, ct.Namespace, "port-forwarder-coordinator"); err != nil {
if err := ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Pod{}, ct.Namespace, "port-forwarder-coordinator"); err != nil {
return fmt.Errorf("waiting for port-forwarder-coordinator: %w", err)
}

Expand Down
96 changes: 70 additions & 26 deletions e2e/internal/kubeclient/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,17 @@ import (
"k8s.io/client-go/kubernetes"
)

// WaitCondition is an enum type for the possible wait conditions when using `kubeclient.WaitFor`.
type WaitCondition int

const (
_ WaitCondition = iota
// Ready waits until the resource becomes ready.
Ready
// InitContainersRunning waits until all initial containers of all pods of the resource are running.
InitContainersRunning
)

// ResourceWaiter is implemented by resources that can be waited for with WaitFor.
type ResourceWaiter interface {
kind() string
Expand Down Expand Up @@ -161,12 +172,57 @@ func (c *Kubeclient) WaitForPod(ctx context.Context, namespace, name string) err
}
}

func (c *Kubeclient) checkIfReady(ctx context.Context, name string, namespace string, evt watch.Event, resource ResourceWaiter) (bool, error) {
switch evt.Type {
case watch.Added:
fallthrough
case watch.Modified:
pods, err := resource.getPods(ctx, c, namespace, name)
if err != nil {
return false, err
}
numPodsReady := 0
for _, pod := range pods {
if isPodReady(&pod) {
numPodsReady++
}
}
desiredPods, err := resource.numDesiredPods(evt.Object)
if err != nil {
return false, err
}
if desiredPods <= numPodsReady {
return true, nil
}
case watch.Deleted:
return false, fmt.Errorf("%s %s/%s was deleted while waiting for it", resource.kind(), namespace, name)
default:
return false, fmt.Errorf("unexpected watch event while waiting for %s %s/%s: type=%s, object=%#v", resource.kind(), namespace, name, evt.Type, evt.Object)
}
return false, nil
}

func (c *Kubeclient) checkIfRunning(ctx context.Context, name string, namespace string, resource ResourceWaiter) (bool, error) {
pods, err := resource.getPods(ctx, c, namespace, name)
if err != nil {
return false, err
}
for _, pod := range pods {
// check if all containers in the pod are running
containers := pod.Status.InitContainerStatuses

for _, container := range containers {
if container.State.Running == nil {
return false, nil
}
}
}
return true, nil
}

// WaitFor watches the given resource kind and blocks until the desired number of pods are
// ready or the context expires (is cancelled or times out).
func (c *Kubeclient) WaitFor(ctx context.Context, resource ResourceWaiter, namespace, name string) error {
logger := c.log.With("namespace", namespace)
logger.Info(fmt.Sprintf("Waiting for %s %s/%s to become ready", resource.kind(), namespace, name))

func (c *Kubeclient) WaitFor(ctx context.Context, condition WaitCondition, resource ResourceWaiter, namespace, name string) error {
// When the node-installer restarts K3s, the watcher fails. The watcher has
// a retry loop internally, but it only retries starting the request, once
// it has established a request and that request dies spuriously, the
Expand Down Expand Up @@ -201,7 +257,8 @@ retryLoop:
}
return fmt.Errorf("watcher for %s %s/%s unexpectedly closed", resource.kind(), namespace, name)
}
logger.Error("resource did not become ready", "kind", resource, "name", name, "contextErr", ctx.Err())
logger := c.log.With("namespace", namespace)
logger.Error("failed to wait for resource", "condition", condition, "kind", resource, "name", name, "contextErr", ctx.Err())
if ctx.Err() != context.DeadlineExceeded {
return ctx.Err()
}
Expand All @@ -220,36 +277,23 @@ retryLoop:
}
return origErr
}
switch evt.Type {
case watch.Added:
fallthrough
case watch.Modified:
pods, err := resource.getPods(ctx, c, namespace, name)
switch condition {
case Ready:
ready, err := c.checkIfReady(ctx, name, namespace, evt, resource)
if err != nil {
return err
}
numPodsReady := 0
for _, pod := range pods {
if isPodReady(&pod) {
numPodsReady++
}
if ready {
return nil
}
desiredPods, err := resource.numDesiredPods(evt.Object)
case InitContainersRunning:
running, err := c.checkIfRunning(ctx, name, namespace, resource)
if err != nil {
return err
}
if desiredPods <= numPodsReady {
// Wait for 5 more seconds just to be *really* sure that
// the pods are actually up.
sleep, cancel := context.WithTimeout(ctx, time.Second*5)
defer cancel()
<-sleep.Done()
miampf marked this conversation as resolved.
Show resolved Hide resolved
if running {
return nil
}
case watch.Deleted:
return fmt.Errorf("%s %s/%s was deleted while waiting for it", resource.kind(), namespace, name)
default:
return fmt.Errorf("unexpected watch event while waiting for %s %s/%s: type=%s, object=%#v", resource.kind(), namespace, name, evt.Type, evt.Object)
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion e2e/internal/kubeclient/kubeclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func (c *Kubeclient) Exec(ctx context.Context, namespace, pod string, argv []str

// ExecDeployment executes a process in one of the deployment's pods.
func (c *Kubeclient) ExecDeployment(ctx context.Context, namespace, deployment string, argv []string) (stdout string, stderr string, err error) {
if err := c.WaitFor(ctx, Deployment{}, namespace, deployment); err != nil {
if err := c.WaitFor(ctx, Ready, Deployment{}, namespace, deployment); err != nil {
return "", "", fmt.Errorf("deployment not ready: %w", err)
}

Expand Down
18 changes: 9 additions & 9 deletions e2e/openssl/openssl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func TestOpenSSL(t *testing.T) {

require := require.New(t)

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))

frontendPods, err := ct.Kubeclient.PodsFromDeployment(ctx, ct.Namespace, opensslFrontend)
require.NoError(err)
Expand All @@ -98,8 +98,8 @@ func TestOpenSSL(t *testing.T) {

require := require.New(t)

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Pod{}, ct.Namespace, "port-forwarder-openssl-frontend"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Pod{}, ct.Namespace, "port-forwarder-openssl-frontend"))

require.NoError(ct.Kubeclient.WithForwardedPort(ctx, ct.Namespace, "port-forwarder-openssl-frontend", "443", func(addr string) error {
dialer := &tls.Dialer{Config: &tls.Config{RootCAs: pool}}
Expand All @@ -121,7 +121,7 @@ func TestOpenSSL(t *testing.T) {

c := kubeclient.NewForTest(t)

require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend))

// Call the backend server from the frontend. If this command produces no TLS error, we verified that
// - the certificate in the frontend pod can be used as a client certificate
Expand Down Expand Up @@ -164,7 +164,7 @@ func TestOpenSSL(t *testing.T) {

// Restart one deployment so it has the new certificates.
require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentToRestart))
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentToRestart))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, deploymentToRestart))

// This should not succeed because the certificates have changed.
stdout, stderr, err := c.ExecDeployment(ctx, ct.Namespace, opensslFrontend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-backend:443", meshCAFile)})
Expand All @@ -184,7 +184,7 @@ func TestOpenSSL(t *testing.T) {
d = opensslFrontend
}
require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, d))
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, d))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, d))

// This should succeed since both workloads now have updated certificates.
stdout, stderr, err = c.ExecDeployment(ctx, ct.Namespace, opensslFrontend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-backend:443", meshCAFile)})
Expand All @@ -202,7 +202,7 @@ func TestOpenSSL(t *testing.T) {
c := kubeclient.NewForTest(t)

require.NoError(c.Restart(ctx, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"))
require.NoError(c.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "coordinator"))

// TODO(freax13): The following verify sometimes fails spuriously due to
// connection issues. Waiting a little bit longer makes
Expand All @@ -216,7 +216,7 @@ func TestOpenSSL(t *testing.T) {
require.True(t.Run("contrast verify", ct.Verify))

require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))

t.Run("root CA is still accepted after coordinator recovery", func(t *testing.T) {
stdout, stderr, err := c.ExecDeployment(ctx, ct.Namespace, opensslBackend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-frontend:443", rootCAFile)})
Expand All @@ -232,7 +232,7 @@ func TestOpenSSL(t *testing.T) {
})

require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend))

t.Run("mesh CA after coordinator recovery is accepted when workloads are restarted", func(t *testing.T) {
stdout, stderr, err := c.ExecDeployment(ctx, ct.Namespace, opensslBackend, []string{"/bin/sh", "-c", opensslConnectCmd("openssl-frontend:443", meshCAFile)})
Expand Down
16 changes: 4 additions & 12 deletions e2e/policy/policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ func TestPolicy(t *testing.T) {
c := kubeclient.NewForTest(t)

t.Log("Waiting for deployments")
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))

// get the attestation failures before removing a policy
initialFailures := getFailures(ctx, t, ct)
Expand Down Expand Up @@ -128,18 +128,10 @@ func TestPolicy(t *testing.T) {
// restart the deployments
require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslFrontend)) // not waiting since it would fail
require.NoError(c.Restart(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, opensslBackend))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, opensslBackend))

// wait for the init container of the openssl-frontend pod to enter the running state
ready := false
for !ready {
time.Sleep(1 * time.Second)
pods, err := ct.Kubeclient.PodsFromDeployment(ctx, ct.Namespace, opensslFrontend)
require.NoError(err)
require.NotEmpty(pods, "pod not found: %s/%s", ct.Namespace, opensslFrontend)
require.NotEmpty(pods[0].Status.InitContainerStatuses, "pod doesn't contain init container statuses: %s/%s", ct.Namespace, opensslFrontend)
ready = pods[0].Status.InitContainerStatuses[0].State.Running != nil
}
require.NoError(c.WaitFor(ctx, kubeclient.InitContainersRunning, kubeclient.Deployment{}, ct.Namespace, opensslFrontend))
newFailures := getFailures(ctx, t, ct)
t.Log("New failures:", newFailures)
// errors should happen
Expand Down
2 changes: 1 addition & 1 deletion e2e/regression/regression_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func TestRegression(t *testing.T) {

ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(3*time.Minute))
defer cancel()
require.NoError(c.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, deploymentName))
require.NoError(c.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, deploymentName))
})
}
}
Expand Down
12 changes: 6 additions & 6 deletions e2e/release/release_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func TestRelease(t *testing.T) {
require.NoError(err)

require.NoError(k.Apply(ctx, resources...))
require.NoError(k.WaitFor(ctx, kubeclient.DaemonSet{}, "kube-system", "contrast-node-installer"))
require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.DaemonSet{}, "kube-system", "contrast-node-installer"))
}), "the runtime is required for subsequent tests to run")

var coordinatorIP string
Expand All @@ -120,7 +120,7 @@ func TestRelease(t *testing.T) {
require.NoError(err)

require.NoError(k.Apply(ctx, resources...))
require.NoError(k.WaitFor(ctx, kubeclient.StatefulSet{}, "default", "coordinator"))
require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, "default", "coordinator"))
coordinatorIP, err = k.WaitForService(ctx, "default", "coordinator", hasLoadBalancer)
require.NoError(err)
}), "the coordinator is required for subsequent tests to run")
Expand Down Expand Up @@ -175,10 +175,10 @@ func TestRelease(t *testing.T) {
require.NoError(k.Apply(ctx, resources...))
}

require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "vote-bot"))
require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "voting"))
require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "emoji"))
require.NoError(k.WaitFor(ctx, kubeclient.Deployment{}, "default", "web"))
require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "vote-bot"))
require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "voting"))
require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "emoji"))
require.NoError(k.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, "default", "web"))
}), "applying the demo is required for subsequent tests to run")

t.Run("test-demo", func(t *testing.T) {
Expand Down
9 changes: 4 additions & 5 deletions e2e/servicemesh/servicemesh_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,10 @@ func TestIngressEgress(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(1*time.Minute))
defer cancel()

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "vote-bot"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "emoji"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "voting"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "web"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Pod{}, ct.Namespace, "port-forwarder-web-svc"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "vote-bot"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "emoji"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "voting"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "web"))
}), "deployments need to be ready for subsequent tests")

certs := map[string]*x509.CertPool{
Expand Down
4 changes: 2 additions & 2 deletions e2e/volumestatefulset/volumestatefulset_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func TestVolumeStatefulSet(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester"))
}), "deployments need to be ready for subsequent tests")

filePath := "/srv/state/test"
Expand Down Expand Up @@ -93,7 +93,7 @@ func TestVolumeStatefulSet(t *testing.T) {
defer cancel()

require.NoError(ct.Kubeclient.Restart(ctx, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.StatefulSet{}, ct.Namespace, "volume-tester"))

pods, err := ct.Kubeclient.PodsFromOwner(ctx, ct.Namespace, "StatefulSet", "volume-tester")
require.NoError(err)
Expand Down
10 changes: 5 additions & 5 deletions e2e/workloadsecret/workloadsecret_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ func TestWorkloadSecrets(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), ct.FactorPlatformTimeout(1*time.Minute))
defer cancel()

require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "vote-bot"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "emoji"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "voting"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "web"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "vote-bot"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "emoji"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "voting"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "web"))
}), "deployments need to be ready for subsequent tests")

// Scale web deployment to 2 replicas.
Expand All @@ -78,7 +78,7 @@ func TestWorkloadSecrets(t *testing.T) {
defer cancel()

require.NoError(ct.Kubeclient.ScaleDeployment(ctx, ct.Namespace, "web", 2))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Deployment{}, ct.Namespace, "web"))
require.NoError(ct.Kubeclient.WaitFor(ctx, kubeclient.Ready, kubeclient.Deployment{}, ct.Namespace, "web"))
}), "web deployment needs to be scaled for subsequent tests")

var webWorkloadSecretBytes []byte
Expand Down
Loading