diff --git a/e2e/scenario_helpers_test.go b/e2e/scenario_helpers_test.go index 0d4698c82b2..db5db4358ff 100644 --- a/e2e/scenario_helpers_test.go +++ b/e2e/scenario_helpers_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/Azure/agentbaker/pkg/agent/datamodel" + nbcontractv1 "github.com/Azure/agentbaker/pkg/proto/nbcontract/v1" "github.com/Azure/agentbakere2e/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -111,7 +112,10 @@ func createAndValidateVM(ctx context.Context, t *testing.T, scenario *Scenario) // skip when outbound type is block as the wasm will create pod from gcr, however, network isolated cluster scenario will block egress traffic of gcr. // TODO(xinhl): add another way to validate - if scenario.Runtime.NBC.AgentPoolProfile.WorkloadRuntime == datamodel.WasmWasi && (scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeBlock && scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeNone) { + if scenario.Runtime.NBC != nil && scenario.Runtime.NBC.AgentPoolProfile.WorkloadRuntime == datamodel.WasmWasi && scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeBlock && scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeNone { + validateWasm(ctx, t, scenario.Runtime.Cluster.Kube, nodeName) + } + if scenario.Runtime.AKSNodeConfig.WorkloadRuntime == nbcontractv1.WorkloadRuntime_WASM_WASI { validateWasm(ctx, t, scenario.Runtime.Cluster.Kube, nodeName) } diff --git a/e2e/validation.go b/e2e/validation.go index 589d8efa090..013287f0211 100644 --- a/e2e/validation.go +++ b/e2e/validation.go @@ -159,22 +159,29 @@ func commonLiveVMValidators(scenario *Scenario) []*LiveVMValidator { } func leakedSecretsValidators(scenario *Scenario) []*LiveVMValidator { - logPath := "/var/log/azure/cluster-provision.log" - nbc := scenario.Runtime.NBC - clientPrivateKey := nbc.ContainerService.Properties.CertificateProfile.ClientPrivateKey - spSecret := nbc.ContainerService.Properties.ServicePrincipalProfile.Secret - bootstrapToken := *nbc.KubeletClientTLSBootstrapToken - + var secrets map[string]string b64Encoded := func(val string) string { return base64.StdEncoding.EncodeToString([]byte(val)) } - return []*LiveVMValidator{ - // Base64 encoded in baker.go (GetKubeletClientKey) - FileExcludesContentsValidator(logPath, b64Encoded(clientPrivateKey), "client private key"), - // Base64 encoded in baker.go (GetServicePrincipalSecret) - FileExcludesContentsValidator(logPath, b64Encoded(spSecret), "service principal secret"), - // Bootstrap token is already encoded so we don't need to - // encode it again here. - FileExcludesContentsValidator(logPath, bootstrapToken, "bootstrap token"), + if scenario.Runtime.NBC != nil { + secrets = map[string]string{ + "client private key": b64Encoded(scenario.Runtime.NBC.ContainerService.Properties.CertificateProfile.ClientPrivateKey), + "service principal secret": b64Encoded(scenario.Runtime.NBC.ContainerService.Properties.ServicePrincipalProfile.Secret), + "bootstrap token": *scenario.Runtime.NBC.KubeletClientTLSBootstrapToken, + } + } else { + secrets = map[string]string{ + "client private key": b64Encoded(scenario.Runtime.AKSNodeConfig.KubeletConfig.KubeletClientKey), + "service principal secret": b64Encoded(scenario.Runtime.AKSNodeConfig.AuthConfig.ServicePrincipalSecret), + "bootstrap token": scenario.Runtime.AKSNodeConfig.TlsBootstrappingConfig.TlsBootstrappingToken, + } } + + validators := make([]*LiveVMValidator, 0) + for _, logFile := range []string{"/var/log/azure/cluster-provision.log", "/var/log/azure/node-bootstrapper.log"} { + for secretName, secretValue := range secrets { + validators = append(validators, FileExcludesContentsValidator(logFile, secretValue, secretName)) + } + } + return validators }