Skip to content

Commit

Permalink
fix scriptless tests
Browse files Browse the repository at this point in the history
  • Loading branch information
r2k1 committed Oct 26, 2024
1 parent 508b925 commit 99eec2d
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 15 deletions.
6 changes: 5 additions & 1 deletion e2e/scenario_helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"testing"

"github.com/Azure/agentbaker/pkg/agent/datamodel"
nbcontractv1 "github.com/Azure/agentbaker/pkg/proto/nbcontract/v1"
"github.com/Azure/agentbakere2e/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -111,7 +112,10 @@ func createAndValidateVM(ctx context.Context, t *testing.T, scenario *Scenario)

// skip when outbound type is block as the wasm will create pod from gcr, however, network isolated cluster scenario will block egress traffic of gcr.
// TODO(xinhl): add another way to validate
if scenario.Runtime.NBC.AgentPoolProfile.WorkloadRuntime == datamodel.WasmWasi && (scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeBlock && scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeNone) {
if scenario.Runtime.NBC != nil && scenario.Runtime.NBC.AgentPoolProfile.WorkloadRuntime == datamodel.WasmWasi && scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeBlock && scenario.Runtime.NBC.OutboundType != datamodel.OutboundTypeNone {
validateWasm(ctx, t, scenario.Runtime.Cluster.Kube, nodeName)
}
if scenario.Runtime.AKSNodeConfig.WorkloadRuntime == nbcontractv1.WorkloadRuntime_WASM_WASI {
validateWasm(ctx, t, scenario.Runtime.Cluster.Kube, nodeName)
}

Expand Down
35 changes: 21 additions & 14 deletions e2e/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,22 +159,29 @@ func commonLiveVMValidators(scenario *Scenario) []*LiveVMValidator {
}

func leakedSecretsValidators(scenario *Scenario) []*LiveVMValidator {
logPath := "/var/log/azure/cluster-provision.log"
nbc := scenario.Runtime.NBC
clientPrivateKey := nbc.ContainerService.Properties.CertificateProfile.ClientPrivateKey
spSecret := nbc.ContainerService.Properties.ServicePrincipalProfile.Secret
bootstrapToken := *nbc.KubeletClientTLSBootstrapToken

var secrets map[string]string
b64Encoded := func(val string) string {
return base64.StdEncoding.EncodeToString([]byte(val))
}
return []*LiveVMValidator{
// Base64 encoded in baker.go (GetKubeletClientKey)
FileExcludesContentsValidator(logPath, b64Encoded(clientPrivateKey), "client private key"),
// Base64 encoded in baker.go (GetServicePrincipalSecret)
FileExcludesContentsValidator(logPath, b64Encoded(spSecret), "service principal secret"),
// Bootstrap token is already encoded so we don't need to
// encode it again here.
FileExcludesContentsValidator(logPath, bootstrapToken, "bootstrap token"),
if scenario.Runtime.NBC != nil {
secrets = map[string]string{
"client private key": b64Encoded(scenario.Runtime.NBC.ContainerService.Properties.CertificateProfile.ClientPrivateKey),
"service principal secret": b64Encoded(scenario.Runtime.NBC.ContainerService.Properties.ServicePrincipalProfile.Secret),
"bootstrap token": *scenario.Runtime.NBC.KubeletClientTLSBootstrapToken,
}
} else {
secrets = map[string]string{
"client private key": b64Encoded(scenario.Runtime.AKSNodeConfig.KubeletConfig.KubeletClientKey),
"service principal secret": b64Encoded(scenario.Runtime.AKSNodeConfig.AuthConfig.ServicePrincipalSecret),
"bootstrap token": scenario.Runtime.AKSNodeConfig.TlsBootstrappingConfig.TlsBootstrappingToken,
}
}

validators := make([]*LiveVMValidator, 0)
for _, logFile := range []string{"/var/log/azure/cluster-provision.log", "/var/log/azure/node-bootstrapper.log"} {
for secretName, secretValue := range secrets {
validators = append(validators, FileExcludesContentsValidator(logFile, secretValue, secretName))
}
}
return validators
}

0 comments on commit 99eec2d

Please sign in to comment.