diff --git a/e2e/cluster.go b/e2e/cluster.go index ead67f33bb6..f10341479b5 100644 --- a/e2e/cluster.go +++ b/e2e/cluster.go @@ -12,6 +12,7 @@ import ( "time" "github.com/Azure/agentbaker/pkg/agent/datamodel" + nbcontractv1 "github.com/Azure/agentbaker/pkg/proto/nbcontract/v1" "github.com/Azure/agentbakere2e/config" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" @@ -39,6 +40,7 @@ type Cluster struct { Kube *Kubeclient SubnetID string NodeBootstrappingConfiguration *datamodel.NodeBootstrappingConfiguration + AKSNodeConfig *nbcontractv1.Configuration Maintenance *armcontainerservice.MaintenanceConfiguration } @@ -85,20 +87,6 @@ func ClusterAzureNetwork(ctx context.Context, t *testing.T) (*Cluster, error) { return clusterAzureNetwork, clusterAzureNetworkError } -func nodeBootstrappingConfig(ctx context.Context, t *testing.T, kube *Kubeclient) (*datamodel.NodeBootstrappingConfiguration, error) { - clusterParams, err := extractClusterParameters(ctx, t, kube) - if err != nil { - return nil, fmt.Errorf("extract cluster parameters: %w", err) - } - - baseNodeBootstrappingConfig, err := getBaseNodeBootstrappingConfiguration(clusterParams) - if err != nil { - return nil, fmt.Errorf("get base node bootstrapping configuration: %w", err) - } - - return baseNodeBootstrappingConfig, nil -} - func prepareCluster(ctx context.Context, t *testing.T, cluster *armcontainerservice.ManagedCluster, isAirgap bool) (*Cluster, error) { cluster.Name = to.Ptr(fmt.Sprintf("%s-%s", *cluster.Name, hash(cluster))) @@ -141,14 +129,25 @@ func prepareCluster(ctx context.Context, t *testing.T, cluster *armcontainerserv return nil, fmt.Errorf("ensure debug daemonsets for %q: %w", *cluster.Name, err) } - // nodeBootstrappingConfig requires the debug deamonset to already be created t.Log("getting the node bootstrapping configuration for cluster") - nbc, err := nodeBootstrappingConfig(ctx, t, kube) + clusterParams, err := extractClusterParameters(ctx, t, kube) + if err != nil { + return nil, fmt.Errorf("extract cluster parameters: %w", err) + } + + nbc, err := getBaseNodeBootstrappingConfiguration(clusterParams) if err != nil { - return nil, fmt.Errorf("get node bootstrapping configuration: %w", err) + return nil, fmt.Errorf("get base node bootstrapping configuration: %w", err) } - return &Cluster{Model: cluster, Kube: kube, SubnetID: subnetID, NodeBootstrappingConfiguration: nbc, Maintenance: maintenance}, nil + return &Cluster{ + Model: cluster, + Kube: kube, + SubnetID: subnetID, + NodeBootstrappingConfiguration: nbc, + Maintenance: maintenance, + AKSNodeConfig: nbcToNbcContractV1(nbc), // TODO: replace with base template + }, nil } func hash(cluster *armcontainerservice.ManagedCluster) string { diff --git a/e2e/exec.go b/e2e/exec.go index 8869106a7b3..7dd2cf2a3ae 100644 --- a/e2e/exec.go +++ b/e2e/exec.go @@ -93,34 +93,36 @@ func extractLogsFromVM(ctx context.Context, t *testing.T, vmssName, privateIP, s return result, nil } -func extractClusterParameters(ctx context.Context, t *testing.T, kube *Kubeclient) (map[string]string, error) { - commandList := map[string]string{ - "/etc/kubernetes/azure.json": "cat /etc/kubernetes/azure.json", - "/etc/kubernetes/certs/ca.crt": "cat /etc/kubernetes/certs/ca.crt", - "/var/lib/kubelet/bootstrap-kubeconfig": "cat /var/lib/kubelet/bootstrap-kubeconfig", - } +type ClusterParams struct { + AzureJSON []byte + CACert []byte + BootstrapKubeconfig []byte +} +func extractClusterParameters(ctx context.Context, t *testing.T, kube *Kubeclient) (ClusterParams, error) { podName, err := getHostNetworkDebugPodName(ctx, kube, t) if err != nil { - return nil, err + return ClusterParams{}, err } - var result = map[string]string{} - for file, sourceCmd := range commandList { - t.Logf("executing privileged command on pod %s/%s: %q", defaultNamespace, podName, sourceCmd) - - execResult, err := execOnPrivilegedPod(ctx, kube, defaultNamespace, podName, sourceCmd) + var resultErr error + exec := func(command string) *podExecResult { + t.Logf("executing privileged command on pod %s/%s: %q", defaultNamespace, podName, command) + execResult, err := execOnPrivilegedPod(ctx, kube, defaultNamespace, podName, command) if execResult != nil { execResult.dumpStderr(t) } if err != nil { - return nil, err + resultErr = err } - - result[file] = execResult.stdout.String() + return execResult } - return result, nil + return ClusterParams{ + AzureJSON: exec("cat /etc/kubernetes/azure.json").stdout.Bytes(), + CACert: exec("cat etc/kubernetes/certs/ca.crt").stdout.Bytes(), + BootstrapKubeconfig: exec("cat /var/lib/kubelet/bootstrap-kubeconfig").stdout.Bytes(), + }, resultErr } func execOnVM(ctx context.Context, kube *Kubeclient, vmPrivateIP, jumpboxPodName, sshPrivateKey, command string, isShellBuiltIn bool) (*podExecResult, error) { diff --git a/e2e/node_bootstrapper_test.go b/e2e/node_bootstrapper_test.go index dfb39a8b542..af7ce7baad6 100644 --- a/e2e/node_bootstrapper_test.go +++ b/e2e/node_bootstrapper_test.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/agentbaker/pkg/agent" "github.com/Azure/agentbaker/pkg/agent/datamodel" + nbcontractv1 "github.com/Azure/agentbaker/pkg/proto/nbcontract/v1" "github.com/Azure/agentbakere2e/config" "github.com/barkimedes/go-deepcopy" "github.com/stretchr/testify/require" @@ -52,10 +53,10 @@ func Test_ubuntu2204NodeBootstrapper(t *testing.T) { mobyComponentVersionValidator("runc", getExpectedPackageVersions("runc", "ubuntu", "r2204")[0], "apt"), FileHasContentsValidator("/var/log/azure/node-bootstrapper.log", "node-bootstrapper finished successfully"), }, - CSEOverride: CSENodeBootstrapper(ctx, t, cluster), - DisableCustomData: true, + CSEOverride: CSENodeBootstrapper(ctx, t, cluster), + DisableCustomData: true, + AKSNodeConfigMutator: func(config *nbcontractv1.Configuration) {}, }, - Tags: Tags{Scriptless: true}, }) } diff --git a/e2e/nodebootstrapping.go b/e2e/nodebootstrapping.go index 038767cfbf9..c58dcc23bd4 100644 --- a/e2e/nodebootstrapping.go +++ b/e2e/nodebootstrapping.go @@ -9,11 +9,11 @@ import ( "github.com/Azure/agentbakere2e/config" ) -func getBaseNodeBootstrappingConfiguration(clusterParams map[string]string) (*datamodel.NodeBootstrappingConfiguration, error) { +func getBaseNodeBootstrappingConfiguration(clusterParams ClusterParams) (*datamodel.NodeBootstrappingConfiguration, error) { nbc := baseTemplate(config.Config.Location) - nbc.ContainerService.Properties.CertificateProfile.CaCertificate = clusterParams["/etc/kubernetes/certs/ca.crt"] + nbc.ContainerService.Properties.CertificateProfile.CaCertificate = string(clusterParams.CACert) - bootstrapKubeconfig := clusterParams["/var/lib/kubelet/bootstrap-kubeconfig"] + bootstrapKubeconfig := string(clusterParams.BootstrapKubeconfig) bootstrapToken, err := extractKeyValuePair("token", bootstrapKubeconfig) if err != nil { diff --git a/e2e/scenario_helpers_test.go b/e2e/scenario_helpers_test.go index 81c158ec482..0d4698c82b2 100644 --- a/e2e/scenario_helpers_test.go +++ b/e2e/scenario_helpers_test.go @@ -56,7 +56,7 @@ func RunScenario(t *testing.T, s *Scenario) { ensureResourceGroupOnce(ctx) maybeSkipScenario(ctx, t, s) s.PrepareRuntime(ctx, t) - executeScenario(ctx, t, s) + createAndValidateVM(ctx, t, s) } func maybeSkipScenario(ctx context.Context, t *testing.T, s *Scenario) { @@ -95,7 +95,7 @@ func maybeSkipScenario(ctx context.Context, t *testing.T, s *Scenario) { t.Logf("running scenario %q with vhd: %q, tags %+v", t.Name(), vhd, s.Tags) } -func executeScenario(ctx context.Context, t *testing.T, scenario *Scenario) { +func createAndValidateVM(ctx context.Context, t *testing.T, scenario *Scenario) { rid, _ := scenario.VHD.VHDResourceID(ctx, t) t.Logf("running scenario %q with image %q in aks cluster %q", t.Name(), rid, *scenario.Runtime.Cluster.Model.ID) diff --git a/e2e/scenario_test.go b/e2e/scenario_test.go index 515958b6c21..e3c3ed1e0c6 100644 --- a/e2e/scenario_test.go +++ b/e2e/scenario_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/Azure/agentbaker/pkg/agent/datamodel" + nbcontractv1 "github.com/Azure/agentbaker/pkg/proto/nbcontract/v1" "github.com/Azure/agentbakere2e/config" "github.com/Azure/agentbakere2e/toolkit" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" @@ -577,18 +578,7 @@ func Test_ubuntu2204ScriptlessInstaller(t *testing.T) { LiveVMValidators: []*LiveVMValidator{ FileHasContentsValidator("/var/log/azure/node-bootstrapper.log", "node-bootstrapper finished successfully"), }, - // TODO: replace it with nbccontract - BootstrapConfigMutator: func(nbc *datamodel.NodeBootstrappingConfiguration) { - nbc.ContainerService.Properties.AgentPoolProfiles[0].Distro = "aks-ubuntu-containerd-22.04-gen2" - nbc.AgentPoolProfile.Distro = "aks-ubuntu-containerd-22.04-gen2" - // Check that we don't leak these secrets if they're - // set (which they mostly aren't in these scenarios). - nbc.ContainerService.Properties.CertificateProfile.ClientPrivateKey = "client cert private key" - nbc.ContainerService.Properties.ServicePrincipalProfile.Secret = "SP secret" - }, - }, - Tags: Tags{ - Scriptless: true, + AKSNodeConfigMutator: func(config *nbcontractv1.Configuration) {}, }, }) } diff --git a/e2e/template.go b/e2e/template.go index 95547ab8cdd..cf2c266d8f1 100644 --- a/e2e/template.go +++ b/e2e/template.go @@ -14,6 +14,8 @@ import ( func nbcToNbcContractV1(nbc *datamodel.NodeBootstrappingConfiguration) *nbcontractv1.Configuration { cs := nbc.ContainerService agentPool := nbc.AgentPoolProfile + // TODO: delete me + agent.ValidateAndSetLinuxNodeBootstrappingConfiguration(nbc) config := &nbcontractv1.Configuration{ Version: "v0", diff --git a/e2e/types.go b/e2e/types.go index 068fd29452c..a4923b2a6bd 100644 --- a/e2e/types.go +++ b/e2e/types.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/Azure/agentbaker/pkg/agent/datamodel" + nbcontractv1 "github.com/Azure/agentbaker/pkg/proto/nbcontract/v1" "github.com/Azure/agentbakere2e/config" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" @@ -115,8 +116,9 @@ type Scenario struct { } type ScenarioRuntime struct { - NBC *datamodel.NodeBootstrappingConfiguration - Cluster *Cluster + NBC *datamodel.NodeBootstrappingConfiguration + AKSNodeConfig *nbcontractv1.Configuration + Cluster *Cluster } // Config represents the configuration of an AgentBaker E2E scenario @@ -130,6 +132,9 @@ type Config struct { // BootstrapConfigMutator is a function which mutates the base NodeBootstrappingConfig according to the scenario's requirements BootstrapConfigMutator func(*datamodel.NodeBootstrappingConfiguration) + // AKSNodeConfigMutator if defined then aks-node-controller will be used to provision nodes + AKSNodeConfigMutator func(*nbcontractv1.Configuration) + // VMConfigMutator is a function which mutates the base VMSS model according to the scenario's requirements VMConfigMutator func(*armcompute.VirtualMachineScaleSet) @@ -167,19 +172,8 @@ type LiveVMValidator struct { IsPodNetwork bool } -// PrepareNodeBootstrappingConfiguration mutates the input NodeBootstrappingConfiguration by calling the -// scenario's BootstrapConfigMutator on it, if configured. -func (s *Scenario) PrepareNodeBootstrappingConfiguration(nbc *datamodel.NodeBootstrappingConfiguration) (*datamodel.NodeBootstrappingConfiguration, error) { - // avoid mutating cluster config - nbcAny, err := deepcopy.Anything(nbc) - if err != nil { - return nil, fmt.Errorf("deep copy NodeBootstrappingConfiguration: %w", err) - } - nbc = nbcAny.(*datamodel.NodeBootstrappingConfiguration) - if s.BootstrapConfigMutator != nil { - s.BootstrapConfigMutator(nbc) - } - return nbc, nil +func (s *Scenario) PrepareAKSNodeConfig() { + } // PrepareVMSSModel mutates the input VirtualMachineScaleSet based on the scenario's VMConfigMutator, if configured. @@ -224,10 +218,41 @@ func (s *Scenario) PrepareVMSSModel(ctx context.Context, t *testing.T, vmss *arm func (s *Scenario) PrepareRuntime(ctx context.Context, t *testing.T) { cluster, err := s.Config.Cluster(ctx, t) require.NoError(t, err) - nbc, err := s.PrepareNodeBootstrappingConfiguration(cluster.NodeBootstrappingConfiguration) - require.NoError(t, err) + s.Runtime = &ScenarioRuntime{ - NBC: nbc, Cluster: cluster, } + + if (s.BootstrapConfigMutator == nil) == (s.AKSNodeConfigMutator == nil) { + t.Fatalf("exactly one of BootstrapConfigMutator or AKSNodeConfigMutator must be set") + } + + if s.BootstrapConfigMutator != nil { + nbcAny, err := deepcopy.Anything(cluster.NodeBootstrappingConfiguration) + require.NoError(t, err) + nbc := nbcAny.(*datamodel.NodeBootstrappingConfiguration) + s.BootstrapConfigMutator(nbc) + s.Runtime.NBC = nbc + } + if s.AKSNodeConfigMutator != nil { + configAny, err := deepcopy.Anything(cluster.AKSNodeConfig) + require.NoError(t, err) + config := configAny.(*nbcontractv1.Configuration) + s.AKSNodeConfigMutator(config) + s.Runtime.AKSNodeConfig = config + } +} + +// scenario's BootstrapConfigMutator on it, if configured. +func (s *Scenario) PrepareNodeBootstrappingConfiguration(nbc *datamodel.NodeBootstrappingConfiguration) (*datamodel.NodeBootstrappingConfiguration, error) { + // avoid mutating cluster config + nbcAny, err := deepcopy.Anything(nbc) + if err != nil { + return nil, fmt.Errorf("deep copy NodeBootstrappingConfiguration: %w", err) + } + nbc = nbcAny.(*datamodel.NodeBootstrappingConfiguration) + if s.BootstrapConfigMutator != nil { + s.BootstrapConfigMutator(nbc) + } + return nbc, nil } diff --git a/e2e/vmss.go b/e2e/vmss.go index 8f18428f808..d544b6369fb 100644 --- a/e2e/vmss.go +++ b/e2e/vmss.go @@ -33,17 +33,15 @@ const ( func createVMSS(ctx context.Context, t *testing.T, vmssName string, scenario *Scenario, privateKeyBytes []byte, publicKeyBytes []byte) *armcompute.VirtualMachineScaleSet { cluster := scenario.Runtime.Cluster - nbc := scenario.Runtime.NBC t.Logf("creating VMSS %q in resource group %q", vmssName, *cluster.Model.Properties.NodeResourceGroup) var nodeBootstrapping *datamodel.NodeBootstrapping ab, err := agent.NewAgentBaker() require.NoError(t, err) - if scenario.Tags.Scriptless { - agent.ValidateAndSetLinuxNodeBootstrappingConfiguration(nbc) - nodeBootstrapping, err = ab.GetNodeBootstrappingForScriptless(ctx, nbcToNbcContractV1(nbc), scenario.VHD.Distro, datamodel.AzurePublicCloud) + if scenario.AKSNodeConfigMutator != nil { + nodeBootstrapping, err = ab.GetNodeBootstrappingForScriptless(ctx, scenario.Runtime.AKSNodeConfig, scenario.VHD.Distro, datamodel.AzurePublicCloud) require.NoError(t, err) } else { - nodeBootstrapping, err = ab.GetNodeBootstrapping(ctx, nbc) + nodeBootstrapping, err = ab.GetNodeBootstrapping(ctx, scenario.Runtime.NBC) require.NoError(t, err) }