Skip to content

Commit

Permalink
Merge pull request #11667 from sbueringer/pr-extend-scale-test
Browse files Browse the repository at this point in the history
✨ Extend scale test and make ExtensionConfig name in RuntimeSDK test configurable
  • Loading branch information
k8s-ci-robot authored Jan 23, 2025
2 parents a090145 + 94af8e9 commit c0cf7c6
Show file tree
Hide file tree
Showing 8 changed files with 331 additions and 181 deletions.
58 changes: 38 additions & 20 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"

Expand Down Expand Up @@ -90,9 +91,15 @@ type ClusterUpgradeWithRuntimeSDKSpecInput struct {
// If not specified, this is a no-op.
PostUpgrade func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string)

// ExtensionConfigName is the name of the ExtensionConfig. Defaults to "k8s-upgrade-with-runtimesdk".
// This value is provided to clusterctl as "EXTENSION_CONFIG_NAME" variable and can be used to template the
// name of the ExtensionConfig into the ClusterClass.
ExtensionConfigName string

// ExtensionServiceNamespace is the namespace where the service for the Runtime SDK is located
// and is used to configure in the test-namespace scoped ExtensionConfig.
ExtensionServiceNamespace string

// ExtensionServiceName is the name of the service to configure in the test-namespace scoped ExtensionConfig.
ExtensionServiceName string
}
Expand Down Expand Up @@ -133,6 +140,9 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl

Expect(input.ExtensionServiceNamespace).ToNot(BeEmpty())
Expect(input.ExtensionServiceName).ToNot(BeEmpty())
if input.ExtensionConfigName == "" {
input.ExtensionConfigName = specName
}

if input.ControlPlaneMachineCount == nil {
controlPlaneMachineCount = 1
Expand Down Expand Up @@ -161,8 +171,11 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl

By("Deploy Test Extension ExtensionConfig")

// In this test we are defaulting all handlers to blocking because we expect the handlers to block the
// cluster lifecycle by default. Setting defaultAllHandlersToBlocking to true enforces that the test-extension
// automatically creates the ConfigMap with blocking preloaded responses.
Expect(input.BootstrapClusterProxy.GetClient().Create(ctx,
extensionConfig(specName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName))).
extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, namespace.Name))).
To(Succeed(), "Failed to create the extension config")

By("Creating a workload cluster; creation waits for BeforeClusterCreateHook to gate the operation")
Expand All @@ -177,6 +190,11 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
infrastructureProvider = *input.InfrastructureProvider
}

variables := map[string]string{
// This is used to template the name of the ExtensionConfig into the ClusterClass.
"EXTENSION_CONFIG_NAME": input.ExtensionConfigName,
}

clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
Expand All @@ -190,6 +208,7 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom),
ControlPlaneMachineCount: ptr.To[int64](controlPlaneMachineCount),
WorkerMachineCount: ptr.To[int64](workerMachineCount),
ClusterctlVariables: variables,
},
PreWaitForCluster: func() {
beforeClusterCreateTestHandler(ctx,
Expand Down Expand Up @@ -304,8 +323,8 @@ func ClusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() Cl
if !input.SkipCleanup {
// Delete the extensionConfig first to ensure the BeforeDeleteCluster hook doesn't block deletion.
Eventually(func() error {
return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(specName, namespace.Name, input.ExtensionServiceNamespace, input.ExtensionServiceName))
}, 10*time.Second, 1*time.Second).Should(Succeed(), "delete extensionConfig failed")
return input.BootstrapClusterProxy.GetClient().Delete(ctx, extensionConfig(input.ExtensionConfigName, input.ExtensionServiceNamespace, input.ExtensionServiceName, true, namespace.Name))
}, 10*time.Second, 1*time.Second).Should(Succeed(), "Deleting ExtensionConfig failed")

Byf("Deleting cluster %s", klog.KObj(clusterResources.Cluster))
// While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance
Expand Down Expand Up @@ -429,8 +448,8 @@ func machineSetPreflightChecksTestHandler(ctx context.Context, c client.Client,
// We make sure this cluster-wide object does not conflict with others by using a random generated
// name and a NamespaceSelector selecting on the namespace of the current test.
// Thus, this object is "namespaced" to the current test even though it's a cluster-wide object.
func extensionConfig(name, namespace, extensionServiceNamespace, extensionServiceName string) *runtimev1.ExtensionConfig {
return &runtimev1.ExtensionConfig{
func extensionConfig(name, extensionServiceNamespace, extensionServiceName string, defaultAllHandlersToBlocking bool, namespaces ...string) *runtimev1.ExtensionConfig {
cfg := &runtimev1.ExtensionConfig{
ObjectMeta: metav1.ObjectMeta{
// Note: We have to use a constant name here as we have to be able to reference it in the ClusterClass
// when configuring external patches.
Expand All @@ -448,25 +467,24 @@ func extensionConfig(name, namespace, extensionServiceNamespace, extensionServic
Namespace: extensionServiceNamespace,
},
},
NamespaceSelector: &metav1.LabelSelector{
// Note: we are limiting the test extension to be used by the namespace where the test is run.
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "kubernetes.io/metadata.name",
Operator: metav1.LabelSelectorOpIn,
Values: []string{namespace},
},
},
},
Settings: map[string]string{
// In the E2E test we are defaulting all handlers to blocking because cluster_upgrade_runtimesdk_test
// expects the handlers to block the cluster lifecycle by default.
// Setting this value to true enforces that the test-extension automatically creates the ConfigMap with
// blocking preloaded responses.
"defaultAllHandlersToBlocking": "true",
"defaultAllHandlersToBlocking": strconv.FormatBool(defaultAllHandlersToBlocking),
},
},
}
if len(namespaces) > 0 {
cfg.Spec.NamespaceSelector = &metav1.LabelSelector{
// Note: we are limiting the test extension to be used by the namespace where the test is run.
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "kubernetes.io/metadata.name",
Operator: metav1.LabelSelectorOpIn,
Values: namespaces,
},
},
}
}
return cfg
}

// Check that each hook in hooks has been called at least once by checking if its actualResponseStatus is in the hook response configmap.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ spec:
patches:
- name: test-patch
external:
generateExtension: generate-patches.k8s-upgrade-with-runtimesdk
validateExtension: validate-topology.k8s-upgrade-with-runtimesdk
discoverVariablesExtension: discover-variables.k8s-upgrade-with-runtimesdk
generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"}
validateExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"}
discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerClusterTemplate
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ spec:
serviceDomain: ${SERVICE_DOMAIN:="cluster.local"}
topology:
class: in-memory
classNamespace: ${NAMESPACE}
version: ${KUBERNETES_VERSION}
controlPlane:
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
Expand All @@ -20,3 +21,8 @@ spec:
- class: default-worker
name: md-0
replicas: ${WORKER_MACHINE_COUNT}
variables:
- name: kubeadmControlPlaneMaxSurge
value: "1"
- name: imageRepository
value: "kindest"
129 changes: 67 additions & 62 deletions test/e2e/data/infrastructure-inmemory/main/clusterclass-in-memory.yaml
Original file line number Diff line number Diff line change
@@ -1,56 +1,3 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
name: in-memory
spec:
controlPlane:
metadata:
annotations:
machineInfrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryMachineTemplate
name: in-memory-control-plane
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlaneTemplate
name: in-memory-control-plane
machineHealthCheck:
unhealthyConditions:
- type: Ready
status: Unknown
timeout: 300s
- type: Ready
status: "False"
timeout: 300s
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryClusterTemplate
name: in-memory-cluster
workers:
machineDeployments:
- class: default-worker
template:
bootstrap:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: in-memory-default-worker-bootstraptemplate
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryMachineTemplate
name: in-memory-default-worker-machinetemplate
machineHealthCheck:
unhealthyConditions:
- type: Ready
status: Unknown
timeout: 300s
- type: Ready
status: "False"
timeout: 300s
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryClusterTemplate
metadata:
Expand Down Expand Up @@ -95,19 +42,19 @@ spec:
behaviour:
vm:
provisioning:
startupDuration: "30s"
startupDuration: "10s"
startupJitter: "0.2"
node:
provisioning:
startupDuration: "10s"
startupDuration: "2s"
startupJitter: "0.2"
apiServer:
provisioning:
startupDuration: "10s"
startupDuration: "2s"
startupJitter: "0.2"
etcd:
provisioning:
startupDuration: "10s"
startupDuration: "2s"
startupJitter: "0.2"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
Expand All @@ -120,19 +67,19 @@ spec:
behaviour:
vm:
provisioning:
startupDuration: "30s"
startupDuration: "10s"
startupJitter: "0.2"
node:
provisioning:
startupDuration: "10s"
startupDuration: "2s"
startupJitter: "0.2"
apiServer:
provisioning:
startupDuration: "10s"
startupDuration: "2s"
startupJitter: "0.2"
etcd:
provisioning:
startupDuration: "10s"
startupDuration: "2s"
startupJitter: "0.2"
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
Expand All @@ -146,4 +93,62 @@ spec:
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
kubeletExtraArgs:
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: ClusterClass
metadata:
name: in-memory
spec:
controlPlane:
metadata:
annotations:
machineInfrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryMachineTemplate
name: in-memory-control-plane
ref:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlaneTemplate
name: in-memory-control-plane
machineHealthCheck:
unhealthyConditions:
- type: Ready
status: Unknown
timeout: 300s
- type: Ready
status: "False"
timeout: 300s
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryClusterTemplate
name: in-memory-cluster
workers:
machineDeployments:
- class: default-worker
template:
bootstrap:
ref:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: in-memory-default-worker-bootstraptemplate
infrastructure:
ref:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: InMemoryMachineTemplate
name: in-memory-default-worker-machinetemplate
machineHealthCheck:
unhealthyConditions:
- type: Ready
status: Unknown
timeout: 300s
- type: Ready
status: "False"
timeout: 300s
patches:
- name: test-patch
external:
generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"scale"}
discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"scale"}
Loading

0 comments on commit c0cf7c6

Please sign in to comment.