From 075f6490c5f68642e8b4073ad69d4fbb41335836 Mon Sep 17 00:00:00 2001 From: Matthias Teich Date: Fri, 30 Aug 2024 11:17:39 +0200 Subject: [PATCH] :sparkles: Add IPAM for nodes (#142) **What is the purpose of this pull request/Why do we need it?** We would like to get IPs from a fixed pool of IPs instead of relying on the DHCP. **Issue #, if available:** #130 **Description of changes:** - added IPv4PoolRef/IPv6PoolRef to both IonosCloudMachine and Network - workflow: IonosCloudMachine controller checks for PoolRefs and creates IPAddressClaims when needed. It then waits for an external controller to create IPAddress objects from the IPAddressClaim. Then it uses the IP from the IPAddress object to create a server via Ionos cloud api. **Special notes for your reviewer:** I did not write tests yet as I am waiting for #137 to be merged and I'd like to get some feedback about this PR first. I am also unsure where I should put the docs, I did not find anything for the other api stuff beside the api definition itself. Maybe this is already enough? **Checklist:** - [ ] Documentation updated - [x] Unit Tests added - [ ] E2E Tests added - [x] Includes [emojis](https://github.com/kubernetes-sigs/kubebuilder-release-tools?tab=readme-ov-file#kubebuilder-project-versioning) --- api/v1alpha1/ionoscloudmachine_types.go | 27 +- api/v1alpha1/ionoscloudmachine_types_test.go | 53 +- api/v1alpha1/suite_test.go | 2 + api/v1alpha1/types.go | 21 + api/v1alpha1/zz_generated.deepcopy.go | 50 +- cmd/main.go | 3 + ...e.cluster.x-k8s.io_ionoscloudmachines.yaml | 135 ++++- ...r.x-k8s.io_ionoscloudmachinetemplates.yaml | 126 +++++ config/rbac/role.yaml | 19 + docs/ipam.md | 47 ++ .../ionoscloudmachine_controller.go | 11 +- internal/service/cloud/server.go | 47 +- internal/service/cloud/suite_test.go | 2 + internal/service/k8s/ipam.go | 309 ++++++++++++ internal/service/k8s/ipam_test.go | 405 +++++++++++++++ test/e2e/capic_test.go | 25 + test/e2e/config/ionoscloud.yaml | 14 + .../cluster-template-ipam.yaml | 465 ++++++++++++++++++ .../data/shared/capi-ipam/v0.1/metadata.yaml | 12 + test/e2e/env_test.go | 34 +- test/e2e/suite_test.go | 3 + 21 files changed, 1744 insertions(+), 66 deletions(-) create mode 100644 docs/ipam.md create mode 100644 internal/service/k8s/ipam.go create mode 100644 internal/service/k8s/ipam_test.go create mode 100644 test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml create mode 100644 test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml diff --git a/api/v1alpha1/ionoscloudmachine_types.go b/api/v1alpha1/ionoscloudmachine_types.go index b7e591f0..0a4abae3 100644 --- a/api/v1alpha1/ionoscloudmachine_types.go +++ b/api/v1alpha1/ionoscloudmachine_types.go @@ -151,8 +151,12 @@ type IonosCloudMachineSpec struct { Disk *Volume `json:"disk"` // AdditionalNetworks defines the additional network configurations for the VM. + // //+optional - AdditionalNetworks Networks `json:"additionalNetworks,omitempty"` + AdditionalNetworks []Network `json:"additionalNetworks,omitempty"` + + // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. + IPAMConfig `json:",inline"` // FailoverIP can be set to enable failover for VMs in the same MachineDeployment. // It can be either set to an already reserved IPv4 address, or it can be set to "AUTO" @@ -172,10 +176,6 @@ type IonosCloudMachineSpec struct { Type ServerType `json:"type,omitempty"` } -// Networks contains a list of additional LAN IDs -// that should be attached to the VM. -type Networks []Network - // Network contains the config for additional LANs. type Network struct { // NetworkID represents an ID an existing LAN in the data center. @@ -192,6 +192,9 @@ type Network struct { //+kubebuilder:default=true //+optional DHCP *bool `json:"dhcp,omitempty"` + + // IPAMConfig allows to obtain IP Addresses from existing IP pools instead of using DHCP. + IPAMConfig `json:",inline"` } // Volume is the physical storage on the VM. @@ -259,7 +262,7 @@ type IonosCloudMachineStatus struct { Ready bool `json:"ready"` // MachineNetworkInfo contains information about the network configuration of the VM. - // This information is only available after the VM has been provisioned. + //+optional MachineNetworkInfo *MachineNetworkInfo `json:"machineNetworkInfo,omitempty"` // FailureReason will be set in the event that there is a terminal problem @@ -315,6 +318,8 @@ type IonosCloudMachineStatus struct { } // MachineNetworkInfo contains information about the network configuration of the VM. +// Before the provisioning MachineNetworkInfo may contain IP addresses to be used for provisioning. +// After provisioning this information is available completely. type MachineNetworkInfo struct { // NICInfo holds information about the NICs, which are attached to the VM. //+optional @@ -324,10 +329,16 @@ type MachineNetworkInfo struct { // NICInfo provides information about the NIC of the VM. type NICInfo struct { // IPv4Addresses contains the IPv4 addresses of the NIC. - IPv4Addresses []string `json:"ipv4Addresses"` + // By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + // creating the VM this can be temporarily empty, e.g. we use DHCP for IPv4 and fixed IP for IPv6. + //+optional + IPv4Addresses []string `json:"ipv4Addresses,omitempty"` // IPv6Addresses contains the IPv6 addresses of the NIC. - IPv6Addresses []string `json:"ipv6Addresses"` + // By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + // creating the VM this can be temporarily empty, e.g. we use DHCP for IPv6 and fixed IP for IPv4. + //+optional + IPv6Addresses []string `json:"ipv6Addresses,omitempty"` // NetworkID is the ID of the LAN to which the NIC is connected. NetworkID int32 `json:"networkID"` diff --git a/api/v1alpha1/ionoscloudmachine_types_test.go b/api/v1alpha1/ionoscloudmachine_types_test.go index 94a5c833..8ce11396 100644 --- a/api/v1alpha1/ionoscloudmachine_types_test.go +++ b/api/v1alpha1/ionoscloudmachine_types_test.go @@ -55,7 +55,7 @@ func defaultMachine() *IonosCloudMachine { ID: "1eef-48ec-a246-a51a33aa4f3a", }, }, - AdditionalNetworks: Networks{ + AdditionalNetworks: []Network{ { NetworkID: 1, }, @@ -64,6 +64,20 @@ func defaultMachine() *IonosCloudMachine { } } +func setInvalidPoolRef(m *IonosCloudMachine, poolType string, kind, apiGroup, name string) { + ref := &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To(apiGroup), + Kind: kind, + Name: name, + } + switch poolType { + case "IPv6": + m.Spec.AdditionalNetworks[0].IPv6PoolRef = ref + case "IPv4": + m.Spec.AdditionalNetworks[0].IPv4PoolRef = ref + } +} + var _ = Describe("IonosCloudMachine Tests", func() { AfterEach(func() { m := &IonosCloudMachine{ @@ -354,6 +368,43 @@ var _ = Describe("IonosCloudMachine Tests", func() { m.Spec.AdditionalNetworks[0].NetworkID = -1 Expect(k8sClient.Create(context.Background(), m)).ToNot(Succeed()) }) + DescribeTable("should allow IPv4PoolRef.Kind GlobalInClusterIPPool and InClusterIPPool", func(kind string) { + m := defaultMachine() + m.Spec.AdditionalNetworks[0].IPv4PoolRef = &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: kind, + Name: "ipv4-pool", + } + Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) + }, + Entry("GlobalInClusterIPPool", "GlobalInClusterIPPool"), + Entry("InClusterIPPool", "InClusterIPPool"), + ) + DescribeTable("should allow IPv6PoolRef.Kind GlobalInClusterIPPool and InClusterIPPool", func(kind string) { + m := defaultMachine() + m.Spec.AdditionalNetworks[0].IPv6PoolRef = &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: kind, + Name: "ipv6-pool", + } + Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) + }, + Entry("GlobalInClusterIPPool", "GlobalInClusterIPPool"), + Entry("InClusterIPPool", "InClusterIPPool"), + ) + DescribeTable("must not allow invalid pool references", + func(poolType, kind, apiGroup, name string) { + m := defaultMachine() + setInvalidPoolRef(m, poolType, kind, apiGroup, name) + Expect(k8sClient.Create(context.Background(), m)).ToNot(Succeed()) + }, + Entry("invalid IPv6PoolRef with invalid kind", "IPv6", "SomeOtherIPPoolKind", "ipam.cluster.x-k8s.io", "ipv6-pool"), + Entry("invalid IPv6PoolRef with invalid apiGroup", "IPv6", "InClusterIPPool", "SomeWrongAPIGroup", "ipv6-pool"), + Entry("invalid IPv6PoolRef with empty name", "IPv6", "InClusterIPPool", "ipam.cluster.x-k8s.io", ""), + Entry("invalid IPv4PoolRef with invalid kind", "IPv4", "SomeOtherIPPoolKind", "ipam.cluster.x-k8s.io", "ipv4-pool"), + Entry("invalid IPv4PoolRef with invalid apiGroup", "IPv4", "InClusterIPPool", "SomeWrongAPIGroup", "ipv4-pool"), + Entry("invalid IPv4PoolRef with empty name", "IPv4", "InClusterIPPool", "ipam.cluster.x-k8s.io", ""), + ) It("DHCP should default to true", func() { m := defaultMachine() Expect(k8sClient.Create(context.Background(), m)).To(Succeed()) diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go index 96f12c78..f90453bf 100644 --- a/api/v1alpha1/suite_test.go +++ b/api/v1alpha1/suite_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -53,6 +54,7 @@ var _ = BeforeSuite(func() { scheme := runtime.NewScheme() Expect(AddToScheme(scheme)).To(Succeed()) + Expect(ipamv1.AddToScheme(scheme)).To(Succeed()) cfg, err := testEnv.Start() Expect(err).ToNot(HaveOccurred()) diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go index 4fd6c6b5..9eafdceb 100644 --- a/api/v1alpha1/types.go +++ b/api/v1alpha1/types.go @@ -16,6 +16,8 @@ limitations under the License. package v1alpha1 +import corev1 "k8s.io/api/core/v1" + // ProvisioningRequest is a definition of a provisioning request // in the IONOS Cloud. type ProvisioningRequest struct { @@ -30,3 +32,22 @@ type ProvisioningRequest struct { //+optional State string `json:"state,omitempty"` } + +// IPAMConfig optionally defines which IP Pools to use. +type IPAMConfig struct { + // IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + // The NIC will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv4PoolRef.name is required" + // +optional + IPv4PoolRef *corev1.TypedLocalObjectReference `json:"ipv4PoolRef,omitempty"` + + // IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + // The NIC will use an available IP address from the referenced pool. + // +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io" + // +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool" + // +kubebuilder:validation:XValidation:rule="self.name != ''",message="ipv6PoolRef.name is required" + // +optional + IPv6PoolRef *corev1.TypedLocalObjectReference `json:"ipv6PoolRef,omitempty"` +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a01890bb..736fcfaf 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -27,6 +27,31 @@ import ( "sigs.k8s.io/cluster-api/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.IPv4PoolRef != nil { + in, out := &in.IPv4PoolRef, &out.IPv4PoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.IPv6PoolRef != nil { + in, out := &in.IPv6PoolRef, &out.IPv6PoolRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSelector) DeepCopyInto(out *ImageSelector) { *out = *in @@ -358,11 +383,12 @@ func (in *IonosCloudMachineSpec) DeepCopyInto(out *IonosCloudMachineSpec) { } if in.AdditionalNetworks != nil { in, out := &in.AdditionalNetworks, &out.AdditionalNetworks - *out = make(Networks, len(*in)) + *out = make([]Network, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) if in.FailoverIP != nil { in, out := &in.FailoverIP, &out.FailoverIP *out = new(string) @@ -573,6 +599,7 @@ func (in *Network) DeepCopyInto(out *Network) { *out = new(bool) **out = **in } + in.IPAMConfig.DeepCopyInto(&out.IPAMConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. @@ -585,27 +612,6 @@ func (in *Network) DeepCopy() *Network { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Networks) DeepCopyInto(out *Networks) { - { - in := &in - *out = make(Networks, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networks. -func (in Networks) DeepCopy() Networks { - if in == nil { - return nil - } - out := new(Networks) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProvisioningRequest) DeepCopyInto(out *ProvisioningRequest) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index fb9dc11a..7a7042e8 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -28,6 +28,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util/flags" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -53,6 +54,8 @@ func init() { utilruntime.Must(clusterv1.AddToScheme(scheme)) utilruntime.Must(infrav1.AddToScheme(scheme)) + utilruntime.Must(ipamv1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml index 588f4084..2072c4d2 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachines.yaml @@ -75,6 +75,64 @@ spec: DHCP indicates whether DHCP is enabled for the LAN. The primary NIC will always have DHCP enabled. type: boolean + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' networkID: description: |- NetworkID represents an ID an existing LAN in the data center. @@ -192,6 +250,64 @@ spec: rule: self == oldSelf - message: failoverIP must be either 'AUTO' or a valid IPv4 address rule: self == "AUTO" || self.matches("((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$") + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' memoryMB: default: 3072 description: |- @@ -346,9 +462,8 @@ spec: provisioned in. type: string machineNetworkInfo: - description: |- - MachineNetworkInfo contains information about the network configuration of the VM. - This information is only available after the VM has been provisioned. + description: MachineNetworkInfo contains information about the network + configuration of the VM. properties: nicInfo: description: NICInfo holds information about the NICs, which are @@ -358,14 +473,18 @@ spec: VM. properties: ipv4Addresses: - description: IPv4Addresses contains the IPv4 addresses of - the NIC. + description: |- + IPv4Addresses contains the IPv4 addresses of the NIC. + By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + creating the VM this can be temporarily empty, e.g. we use DHCP for IPv4 and fixed IP for IPv6. items: type: string type: array ipv6Addresses: - description: IPv6Addresses contains the IPv6 addresses of - the NIC. + description: |- + IPv6Addresses contains the IPv6 addresses of the NIC. + By default, we enable dual-stack, but as we are storing the IP obtained from AddressClaims here before + creating the VM this can be temporarily empty, e.g. we use DHCP for IPv6 and fixed IP for IPv4. items: type: string type: array @@ -379,8 +498,6 @@ spec: NIC of the VM. type: boolean required: - - ipv4Addresses - - ipv6Addresses - networkID - primary type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml index 793490ca..2565b2a7 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudmachinetemplates.yaml @@ -85,6 +85,74 @@ spec: DHCP indicates whether DHCP is enabled for the LAN. The primary NIC will always have DHCP enabled. type: boolean + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup + ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool + or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind + == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' networkID: description: |- NetworkID represents an ID an existing LAN in the data center. @@ -205,6 +273,64 @@ spec: - message: failoverIP must be either 'AUTO' or a valid IPv4 address rule: self == "AUTO" || self.matches("((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$") + ipv4PoolRef: + description: |- + IPv4PoolRef is a reference to an IPAMConfig Pool resource, which exposes IPv4 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv4PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv4PoolRef.name is required + rule: self.name != '' + ipv6PoolRef: + description: |- + IPv6PoolRef is a reference to an IPAMConfig pool resource, which exposes IPv6 addresses. + The NIC will use an available IP address from the referenced pool. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: ipv6PoolRef allows only IPAMConfig apiGroup ipam.cluster.x-k8s.io + rule: self.apiGroup == 'ipam.cluster.x-k8s.io' + - message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool + rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool' + - message: ipv6PoolRef.name is required + rule: self.name != '' memoryMB: default: 3072 description: |- diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index dc0f6a9a..f7dc8d00 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -76,3 +76,22 @@ rules: - get - patch - update +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddressclaims + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - ipam.cluster.x-k8s.io + resources: + - ipaddresses + verbs: + - get + - list + - watch diff --git a/docs/ipam.md b/docs/ipam.md new file mode 100644 index 00000000..157e0a7a --- /dev/null +++ b/docs/ipam.md @@ -0,0 +1,47 @@ +# What is IPAM? +IPAM (IP Address Management) is a system used to manage IP address allocation and tracking within a network. In Kubernetes, IPAM is crucial for managing IP addresses across dynamic and often ephemeral workloads, ensuring each network interface within the cluster is assigned a unique and valid IP address. + +## Why Use IPAM? +- **Automation**: Simplifies network configuration by automating IP assignment. +- **Scalability**: Supports dynamic scaling of clusters by efficiently managing IP addresses. +- **Flexibility**: Works with various network topologies and can integrate with both cloud-based and on-premises IPAM solutions. +- **Security**: Reduces the risk of IP conflicts and unauthorized access by ensuring each node and pod has a unique IP address. + +## Prerequisites for Using IPAM in Kubernetes +To use IPAM, you need an IPAM provider. One such provider is the [Cluster API IPAM Provider In-Cluster](https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster). This provider allows Kubernetes to integrate IPAM functionalities directly into its cluster management workflow. + +## Setting Up an IPAM Provider +- **Install the IPAM Provider**: Deploy the IPAM provider in your Kubernetes cluster. This will typically involve deploying custom controllers and CRDs (Custom Resource Definitions) to manage IP pools. +- **Create IP Pools**: Define IP pools that will be used for assigning IPs to network interfaces. These can be specific to a cluster (InClusterIPPool) or shared across clusters (GlobalInClusterIPPool). + +## Using IPAM with IonosCloudMachine +### Example YAML Configuration +Let's explore how to integrate IPAM with your IonosCloudMachine resource in Kubernetes. +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudMachine +metadata: + name: example-machine +spec: + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: primary-node-ips + additionalNetworks: + - networkID: 3 + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: additional-node-ips +``` + ### Explanation of Configuration + +```yaml +ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: primary-node-ips +``` +- **apiGroup**: Specifies the API group for the IPAM configuration, ensuring the correct API resources are targeted. +- **kind**: The type of IP pool being referenced. In this case, it's InClusterIPPool, which is specific to the current cluster. +- **name**: The name of the IP pool (primary-node-ips) from which the primary NIC will obtain its IP address. diff --git a/internal/controller/ionoscloudmachine_controller.go b/internal/controller/ionoscloudmachine_controller.go index f3cbdea3..16e4c8e2 100644 --- a/internal/controller/ionoscloudmachine_controller.go +++ b/internal/controller/ionoscloudmachine_controller.go @@ -36,6 +36,7 @@ import ( infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/k8s" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) @@ -62,6 +63,8 @@ func NewIonosCloudMachineReconciler(mgr ctrl.Manager) *IonosCloudMachineReconcil //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=ionoscloudmachines/finalizers,verbs=update //+kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch +//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch +//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;update //+kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch @@ -135,11 +138,11 @@ func (r *IonosCloudMachineReconciler) Reconcile( return r.reconcileDelete(ctx, machineScope, cloudService) } - return r.reconcileNormal(ctx, cloudService, machineScope) + return r.reconcileNormal(ctx, machineScope, cloudService) } func (r *IonosCloudMachineReconciler) reconcileNormal( - ctx context.Context, cloudService *cloud.Service, machineScope *scope.Machine, + ctx context.Context, machineScope *scope.Machine, cloudService *cloud.Service, ) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) log.V(4).Info("Reconciling IonosCloudMachine") @@ -175,8 +178,10 @@ func (r *IonosCloudMachineReconciler) reconcileNormal( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } + k8sHelper := k8s.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ {"ReconcileLAN", cloudService.ReconcileLAN}, + {"ReconcileIPAddressClaims", k8sHelper.ReconcileIPAddresses}, {"ReconcileServer", cloudService.ReconcileServer}, {"ReconcileIPFailover", cloudService.ReconcileIPFailover}, {"FinalizeMachineProvisioning", cloudService.FinalizeMachineProvisioning}, @@ -215,6 +220,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( return ctrl.Result{RequeueAfter: reducedReconcileDuration}, nil } + ipamHelper := k8s.NewHelper(r.Client, log) reconcileSequence := []serviceReconcileStep[scope.Machine]{ // NOTE(avorima): NICs, which are configured in an IP failover configuration, cannot be deleted // by a request to delete the server. Therefore, during deletion, we need to remove the NIC from @@ -223,6 +229,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( {"ReconcileServerDeletion", cloudService.ReconcileServerDeletion}, {"ReconcileLANDeletion", cloudService.ReconcileLANDeletion}, {"ReconcileFailoverIPBlockDeletion", cloudService.ReconcileFailoverIPBlockDeletion}, + {"ReconcileIPAddressClaimsDeletion", ipamHelper.ReconcileIPAddressClaimsDeletion}, } for _, step := range reconcileSequence { diff --git a/internal/service/cloud/server.go b/internal/service/cloud/server.go index 3fff5ea8..7bbda6c5 100644 --- a/internal/service/cloud/server.go +++ b/internal/service/cloud/server.go @@ -412,29 +412,48 @@ func (s *Service) buildServerEntities(ms *scope.Machine, params serverEntityPara Items: &[]sdk.Volume{bootVolume}, } - // As we want to retrieve a public IP from the DHCP, we need to + primaryNIC := sdk.Nic{ + Properties: &sdk.NicProperties{ + Lan: ¶ms.lanID, + Name: ptr.To(s.nicName(ms.IonosMachine)), + Dhcp: ptr.To(true), + }, + } + + if ms.IonosMachine.Status.MachineNetworkInfo != nil { + nicInfo := ms.IonosMachine.Status.MachineNetworkInfo.NICInfo[0] + primaryNIC.Properties.Ips = ptr.To(nicInfo.IPv4Addresses) + primaryNIC.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) + } + + // In case we want to retrieve a public IP from the DHCP, we need to // create a NIC with empty IP addresses and patch the NIC afterward. + // To simplify the code we also follow this approach when using IP pools. serverNICs := sdk.Nics{ Items: &[]sdk.Nic{ - { - Properties: &sdk.NicProperties{ - Dhcp: ptr.To(true), - Lan: ¶ms.lanID, - Name: ptr.To(s.nicName(ms.IonosMachine)), - }, - }, + primaryNIC, }, } // Attach server to additional LANs if any. items := *serverNICs.Items - for _, nic := range ms.IonosMachine.Spec.AdditionalNetworks { - items = append(items, sdk.Nic{Properties: &sdk.NicProperties{ - Lan: &nic.NetworkID, - Vnet: nic.VNET, - Dhcp: nic.DHCP, - }}) + for i, nw := range ms.IonosMachine.Spec.AdditionalNetworks { + nic := sdk.Nic{ + Properties: &sdk.NicProperties{ + Lan: &nw.NetworkID, + Vnet: nw.VNET, + Dhcp: nw.DHCP, + }, + } + + if ms.IonosMachine.Status.MachineNetworkInfo != nil { + nicInfo := ms.IonosMachine.Status.MachineNetworkInfo.NICInfo[i+1] + nic.Properties.Ips = ptr.To(nicInfo.IPv4Addresses) + nic.Properties.Ipv6Ips = ptr.To(nicInfo.IPv6Addresses) + } + + items = append(items, nic) } serverNICs.Items = &items diff --git a/internal/service/cloud/suite_test.go b/internal/service/cloud/suite_test.go index 633e7760..c6463d9b 100644 --- a/internal/service/cloud/suite_test.go +++ b/internal/service/cloud/suite_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -173,6 +174,7 @@ func (s *ServiceTestSuite) SetupTest() { scheme := runtime.NewScheme() s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") + s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") s.NoError(infrav1.AddToScheme(scheme), "failed to extend scheme with IonosCloud types") s.NoError(clientgoscheme.AddToScheme(scheme)) diff --git a/internal/service/k8s/ipam.go b/internal/service/k8s/ipam.go new file mode 100644 index 00000000..e0b6207f --- /dev/null +++ b/internal/service/k8s/ipam.go @@ -0,0 +1,309 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package k8s offers services to interact with kubernetes. +package k8s + +import ( + "context" + "errors" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +const ( + // primaryNICFormat is the format used for IPAddressClaims for the primary NIC. + primaryNICFormat = "nic-%s" + + // additionalNICFormat is the format used for IPAddressClaims for additional nics. + additionalNICFormat = "nic-%s-%d" + + // ipV4Format is the IP v4 format. + ipV4Format = "ipv4" + + // ipV6Format is the IP v6 format. + ipV6Format = "ipv6" +) + +// Helper offers IP address management services for IONOS Cloud machine reconciliation. +type Helper struct { + logger logr.Logger + client client.Client +} + +// NewHelper creates new Helper. +func NewHelper(c client.Client, log logr.Logger) *Helper { + h := new(Helper) + h.client = c + h.logger = log + + return h +} + +// ReconcileIPAddresses prevents successful reconciliation of a IonosCloudMachine +// until an IPAMConfig Provider updates each IPAddressClaim associated to the +// IonosCloudMachine with a reference to an IPAddress. The IPAddress is stored in the status. +// This function is a no-op if the IonosCloudMachine has no associated IPAddressClaims. +func (h *Helper) ReconcileIPAddresses(ctx context.Context, machineScope *scope.Machine) (requeue bool, err error) { + log := h.logger.WithName("reconcileIPAddresses") + log.V(4).Info("reconciling IPAddresses.") + + networkInfos := &[]infrav1.NICInfo{} + + // primary NIC. + requeue, err = h.handlePrimaryNIC(ctx, machineScope, networkInfos) + if err != nil { + return true, errors.Join(err, errors.New("unable to handle primary nic")) + } + + if machineScope.IonosMachine.Spec.AdditionalNetworks != nil { + waitForAdditionalIP, err := h.handleAdditionalNICs(ctx, machineScope, networkInfos) + if err != nil { + return true, errors.Join(err, errors.New("unable to handle additional nics")) + } + requeue = requeue || waitForAdditionalIP + } + + // update the status + log.V(4).Info("updating IonosMachine.status.machineNetworkInfo.") + machineScope.IonosMachine.Status.MachineNetworkInfo = &infrav1.MachineNetworkInfo{NICInfo: *networkInfos} + + return requeue, nil +} + +// ReconcileIPAddressClaimsDeletion removes the MachineFinalizer from the IPAddressClaims. +func (h *Helper) ReconcileIPAddressClaimsDeletion(ctx context.Context, machineScope *scope.Machine) (requeue bool, err error) { + log := h.logger.WithName("reconcileIPAddressClaimsDeletion") + log.V(4).Info("removing finalizers from IPAddressClaims.") + + formats := []string{ipV4Format, ipV6Format} + nicNames := []string{fmt.Sprintf(primaryNICFormat, machineScope.IonosMachine.Name)} + + for _, network := range machineScope.IonosMachine.Spec.AdditionalNetworks { + nicName := fmt.Sprintf(additionalNICFormat, machineScope.IonosMachine.Name, network.NetworkID) + nicNames = append(nicNames, nicName) + } + + for _, format := range formats { + for _, nicName := range nicNames { + key := client.ObjectKey{ + Namespace: machineScope.IonosMachine.Namespace, + Name: fmt.Sprintf("%s-%s", nicName, format), + } + + claim, err := h.GetIPAddressClaim(ctx, key) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + return true, err + } + + if updated := controllerutil.RemoveFinalizer(claim, infrav1.MachineFinalizer); updated { + if err = h.client.Update(ctx, claim); err != nil { + return true, err + } + } + } + } + + return false, nil +} + +func (h *Helper) handlePrimaryNIC(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { + nic := infrav1.NICInfo{Primary: true} + ipamConfig := machineScope.IonosMachine.Spec.IPAMConfig + nicName := fmt.Sprintf(primaryNICFormat, machineScope.IonosMachine.Name) + + // default NIC IPv4. + if ipamConfig.IPv4PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV4Format, ipamConfig.IPv4PoolRef) + if err != nil { + return false, err + } + if ip == "" { + waitForIP = true + } else { + nic.IPv4Addresses = []string{ip} + } + } + + // default NIC IPv6. + if ipamConfig.IPv6PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV6Format, ipamConfig.IPv6PoolRef) + if err != nil { + return false, err + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + *nics = append(*nics, nic) + + return waitForIP, nil +} + +func (h *Helper) handleAdditionalNICs(ctx context.Context, machineScope *scope.Machine, nics *[]infrav1.NICInfo) (waitForIP bool, err error) { + for _, net := range machineScope.IonosMachine.Spec.AdditionalNetworks { + nic := infrav1.NICInfo{Primary: false} + nicName := fmt.Sprintf(additionalNICFormat, machineScope.IonosMachine.Name, net.NetworkID) + if net.IPv4PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV4Format, net.IPv4PoolRef) + if err != nil { + return false, errors.Join(err, fmt.Errorf("unable to handle IPv4Address for nic %s", nicName)) + } + if ip == "" { + waitForIP = true + } else { + nic.IPv4Addresses = []string{ip} + } + } + + if net.IPv6PoolRef != nil { + ip, err := h.handleIPAddressForNIC(ctx, machineScope, nicName, ipV6Format, net.IPv6PoolRef) + if err != nil { + return false, errors.Join(err, fmt.Errorf("unable to handle IPv6Address for nic %s", nicName)) + } + if ip == "" { + waitForIP = true + } else { + nic.IPv6Addresses = []string{ip} + } + } + + *nics = append(*nics, nic) + } + + return waitForIP, nil +} + +// handleIPAddressForNIC checks for an IPAddressClaim. If there is one it extracts the ip from the corresponding IPAddress object, otherwise it creates the IPAddressClaim and returns early. +func (h *Helper) handleIPAddressForNIC(ctx context.Context, machineScope *scope.Machine, nic, suffix string, poolRef *corev1.TypedLocalObjectReference) (ip string, err error) { + log := h.logger.WithName("handleIPAddressForNIC") + + key := client.ObjectKey{ + Namespace: machineScope.IonosMachine.Namespace, + Name: fmt.Sprintf("%s-%s", nic, suffix), + } + + claim, err := h.GetIPAddressClaim(ctx, key) + if err != nil { + if !apierrors.IsNotFound(err) { + return "", err + } + log.V(4).Info("IPAddressClaim not found, creating it.", "nic", nic) + err = h.CreateIPAddressClaim(ctx, machineScope.IonosMachine, key.Name, machineScope.ClusterScope.Cluster.Name, poolRef) + if err != nil { + return "", errors.Join(err, fmt.Errorf("unable to create IPAddressClaim for machine %s", machineScope.IonosMachine.Name)) + } + // we just created the claim, so we can return early and wait for the creation of the IPAddress. + return "", nil + } + + // we found a claim, lets see if there is an IPAddress + ipAddrName := claim.Status.AddressRef.Name + if ipAddrName == "" { + log.V(4).Info("No IPAddress found yet.", "nic", nic) + return "", nil + } + + ipAddrKey := types.NamespacedName{ + Namespace: machineScope.IonosMachine.Namespace, + Name: ipAddrName, + } + ipAddr, err := h.GetIPAddress(ctx, ipAddrKey) + if err != nil { + return "", errors.Join(err, fmt.Errorf("unable to get IPAddress specified in claim %s", claim.Name)) + } + + ip = ipAddr.Spec.Address + + log.V(4).Info("IPAddress found, ", "ip", ip, "nic", nic) + + return ip, nil +} + +// CreateIPAddressClaim creates an IPAddressClaim for a given object. +func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object, name string, cluster string, poolRef *corev1.TypedLocalObjectReference) error { + claimRef := types.NamespacedName{ + Namespace: owner.GetNamespace(), + Name: name, + } + ipAddrClaim := &ipamv1.IPAddressClaim{} + var err error + if err = h.client.Get(ctx, claimRef, ipAddrClaim); err != nil && !apierrors.IsNotFound(err) { + return err + } + + if !apierrors.IsNotFound(err) { + // IPAddressClaim already exists + return nil + } + + desired := &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimRef.Name, + Namespace: claimRef.Namespace, + Labels: map[string]string{clusterv1.ClusterNameLabel: cluster}, + }, + Spec: ipamv1.IPAddressClaimSpec{ + PoolRef: *poolRef, + }, + } + _, err = controllerutil.CreateOrUpdate(ctx, h.client, desired, func() error { + controllerutil.AddFinalizer(desired, infrav1.MachineFinalizer) + return controllerutil.SetControllerReference(owner, desired, h.client.Scheme()) + }) + + return err +} + +// GetIPAddress attempts to retrieve the IPAddress. +func (h *Helper) GetIPAddress(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddress, error) { + out := &ipamv1.IPAddress{} + err := h.client.Get(ctx, key, out) + if err != nil { + return nil, err + } + + return out, nil +} + +// GetIPAddressClaim attempts to retrieve the IPAddressClaim. +func (h *Helper) GetIPAddressClaim(ctx context.Context, key client.ObjectKey) (*ipamv1.IPAddressClaim, error) { + out := &ipamv1.IPAddressClaim{} + err := h.client.Get(ctx, key, out) + if err != nil { + return nil, err + } + + return out, nil +} diff --git a/internal/service/k8s/ipam_test.go b/internal/service/k8s/ipam_test.go new file mode 100644 index 00000000..68e0506f --- /dev/null +++ b/internal/service/k8s/ipam_test.go @@ -0,0 +1,405 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package k8s + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/ionoscloud/clienttest" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/ptr" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +type IpamTestSuite struct { + *require.Assertions + suite.Suite + k8sClient client.Client + ctx context.Context + machineScope *scope.Machine + clusterScope *scope.Cluster + log logr.Logger + service *cloud.Service + ipamHelper *Helper + capiCluster *clusterv1.Cluster + capiMachine *clusterv1.Machine + infraCluster *infrav1.IonosCloudCluster + infraMachine *infrav1.IonosCloudMachine + ionosClient *clienttest.MockClient +} + +func (s *IpamTestSuite) SetupSuite() { + s.log = logr.Discard() + s.ctx = context.Background() + s.Assertions = s.Require() +} + +func (s *IpamTestSuite) SetupTest() { + var err error + s.ionosClient = clienttest.NewMockClient(s.T()) + + s.capiCluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-cluster", + UID: "uid", + }, + Spec: clusterv1.ClusterSpec{}, + } + s.infraCluster = &infrav1.IonosCloudCluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: s.capiCluster.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: infrav1.IonosCloudClusterSpec{ + Location: "de/txl", + }, + Status: infrav1.IonosCloudClusterStatus{}, + } + s.capiMachine = &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: s.capiCluster.Name, + Version: ptr.To("v1.26.12"), + ProviderID: ptr.To("ionos://dd426c63-cd1d-4c02-aca3-13b4a27c2ebf"), + }, + } + s.infraMachine = &infrav1.IonosCloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + clusterv1.MachineDeploymentNameLabel: "test-md", + }, + }, + Spec: infrav1.IonosCloudMachineSpec{ + ProviderID: ptr.To("ionos://dd426c63-cd1d-4c02-aca3-13b4a27c2ebf"), + DatacenterID: "ccf27092-34e8-499e-a2f5-2bdee9d34a12", + NumCores: 2, + AvailabilityZone: infrav1.AvailabilityZoneAuto, + MemoryMB: 4096, + CPUFamily: ptr.To("AMD_OPTERON"), + Disk: &infrav1.Volume{ + Name: "test-machine-hdd", + DiskType: infrav1.VolumeDiskTypeHDD, + SizeGB: 20, + AvailabilityZone: infrav1.AvailabilityZoneAuto, + Image: &infrav1.ImageSpec{ + ID: "3e3e3e3e-3e3e-3e3e-3e3e-3e3e3e3e3e3e", + }, + }, + Type: infrav1.ServerTypeEnterprise, + }, + Status: infrav1.IonosCloudMachineStatus{}, + } + + scheme := runtime.NewScheme() + s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") + s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") + s.NoError(infrav1.AddToScheme(scheme), "failed to extend scheme with IonosCloud types") + s.NoError(clientgoscheme.AddToScheme(scheme)) + + initObjects := []client.Object{s.infraMachine, s.infraCluster, s.capiCluster, s.capiMachine} + s.k8sClient = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(initObjects...). + WithStatusSubresource(initObjects...). + Build() + + s.ipamHelper = NewHelper(s.k8sClient, s.log) + s.clusterScope, err = scope.NewCluster(scope.ClusterParams{ + Client: s.k8sClient, + Cluster: s.capiCluster, + IonosCluster: s.infraCluster, + Locker: locker.New(), + }) + s.NoError(err, "failed to create cluster scope") + + s.machineScope, err = scope.NewMachine(scope.MachineParams{ + Client: s.k8sClient, + Machine: s.capiMachine, + ClusterScope: s.clusterScope, + IonosMachine: s.infraMachine, + Locker: locker.New(), + }) + s.NoError(err, "failed to create machine scope") + + s.service, err = cloud.NewService(s.ionosClient, s.log) + s.NoError(err, "failed to create service") +} + +func TestIpamTestSuite(t *testing.T) { + suite.Run(t, new(IpamTestSuite)) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesDontCreateClaim() { + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + + // No PoolRefs provided, so the Reconcile must not create a claim. + list := &ipamv1.IPAddressClaimList{} + err = s.k8sClient.List(s.ctx, list) + s.Empty(list.Items) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv4CreateClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + s.machineScope.IonosMachine.Spec.IPv4PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultPrimaryIPv4Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv6CreateClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + s.machineScope.IonosMachine.Spec.IPv6PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultPrimaryIPv6Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv4GetIPFromClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + claim := defaultPrimaryIPv4Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv4-10-0-0-2" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv4Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.IPv4PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("10.0.0.2", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[0].IPv4Addresses[0]) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesPrimaryIpv6GetIPFromClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + claim := defaultPrimaryIPv6Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv6-2001-db8--" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv6Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.IPv6PoolRef = poolRef + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("2001:db8::", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[0].IPv6Addresses[0]) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv4CreateClaim() { + poolRef := defaultInClusterIPv4PoolRef() + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv4(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultAdditionalIPv4Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv6CreateClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv6(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + // IPAddressClaim was created, so we need to wait for the IPAddress to be created externally. + s.True(requeue) + s.NoError(err) + + claim := defaultAdditionalIPv6Claim() + err = s.k8sClient.Get(s.ctx, client.ObjectKeyFromObject(claim), claim) + s.NoError(err) +} + +func (s *IpamTestSuite) TestReconcileIPAddressesAdditionalIpv6GetIPFromClaim() { + poolRef := defaultInClusterIPv6PoolRef() + + claim := defaultAdditionalIPv6Claim() + claim.Status.AddressRef.Name = "nic-test-machine-ipv6-2001-db8--" + err := s.k8sClient.Create(s.ctx, claim) + s.NoError(err) + + ip := defaultIPv6Address(claim, poolRef) + err = s.k8sClient.Create(s.ctx, ip) + s.NoError(err) + + s.machineScope.IonosMachine.Spec.AdditionalNetworks = defaultAdditionalNetworksIpv6(poolRef) + requeue, err := s.ipamHelper.ReconcileIPAddresses(s.ctx, s.machineScope) + s.False(requeue) + s.NoError(err) + s.Equal("2001:db8::", s.machineScope.IonosMachine.Status.MachineNetworkInfo.NICInfo[1].IPv6Addresses[0]) +} + +func defaultInClusterIPv4PoolRef() *corev1.TypedLocalObjectReference { + return &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "incluster-ipv4-pool", + } +} + +func defaultInClusterIPv6PoolRef() *corev1.TypedLocalObjectReference { + return &corev1.TypedLocalObjectReference{ + APIGroup: ptr.To("ipam.cluster.x-k8s.io"), + Kind: "InClusterIPPool", + Name: "incluster-ipv6-pool", + } +} + +func defaultIPv4Address(claim *ipamv1.IPAddressClaim, poolRef *corev1.TypedLocalObjectReference) *ipamv1.IPAddress { + return &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv4-10-0-0-2", + Namespace: "default", + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: *localRef(claim), + PoolRef: *poolRef, + Address: "10.0.0.2", + Prefix: 16, + }, + } +} + +func defaultIPv6Address(claim *ipamv1.IPAddressClaim, poolRef *corev1.TypedLocalObjectReference) *ipamv1.IPAddress { + return &ipamv1.IPAddress{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv6-2001-db8--", + Namespace: "default", + }, + Spec: ipamv1.IPAddressSpec{ + ClaimRef: *localRef(claim), + PoolRef: *poolRef, + Address: "2001:db8::", + Prefix: 42, + }, + } +} + +func defaultPrimaryIPv4Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv4", + Namespace: "default", + }, + } +} + +func defaultAdditionalIPv4Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-1-ipv4", + Namespace: "default", + }, + } +} + +func defaultAdditionalIPv6Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-1-ipv6", + Namespace: "default", + }, + } +} + +func defaultAdditionalNetworksIpv6(poolRef *corev1.TypedLocalObjectReference) []infrav1.Network { + return []infrav1.Network{{ + NetworkID: 1, + IPAMConfig: infrav1.IPAMConfig{ + IPv6PoolRef: poolRef, + }, + }} +} + +func defaultAdditionalNetworksIpv4(poolRef *corev1.TypedLocalObjectReference) []infrav1.Network { + return []infrav1.Network{{ + NetworkID: 1, + IPAMConfig: infrav1.IPAMConfig{ + IPv4PoolRef: poolRef, + }, + }} +} + +func defaultPrimaryIPv6Claim() *ipamv1.IPAddressClaim { + return &ipamv1.IPAddressClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nic-test-machine-ipv6", + Namespace: "default", + }, + } +} + +func localRef(obj client.Object) *corev1.LocalObjectReference { + return &corev1.LocalObjectReference{ + Name: obj.GetName(), + } +} diff --git a/test/e2e/capic_test.go b/test/e2e/capic_test.go index 5aced80c..dea8ccac 100644 --- a/test/e2e/capic_test.go +++ b/test/e2e/capic_test.go @@ -20,6 +20,8 @@ limitations under the License. package e2e import ( + "os" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" capie2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" @@ -96,3 +98,26 @@ var _ = Describe("Should be able to create a cluster with 1 control-plane and 1 } }) }) + +var _ = Describe("Should be able to create a cluster with 1 control-plane using an IP from the IPAddressPool", func() { + capie2e.QuickStartSpec(ctx, func() capie2e.QuickStartSpecInput { + return capie2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ControlPlaneMachineCount: ptr.To(int64(1)), + WorkerMachineCount: ptr.To(int64(0)), + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + Flavor: ptr.To("ipam"), + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + PostNamespaceCreated: cloudEnv.createCredentialsSecretPNC, + PostMachinesProvisioned: func(managementClusterProxy framework.ClusterProxy, namespace, _ string) { + machines := &infrav1.IonosCloudMachineList{} + Expect(managementClusterProxy.GetClient().List(ctx, machines, runtimeclient.InNamespace(namespace))).NotTo(HaveOccurred()) + nic := machines.Items[0].Status.MachineNetworkInfo.NICInfo[0] + desired := os.Getenv("ADDITIONAL_IPS") + Expect(nic.IPv4Addresses).To(ContainElement(desired)) + }, + } + }) +}) diff --git a/test/e2e/config/ionoscloud.yaml b/test/e2e/config/ionoscloud.yaml index 3825e495..3f02a23f 100644 --- a/test/e2e/config/ionoscloud.yaml +++ b/test/e2e/config/ionoscloud.yaml @@ -38,6 +38,19 @@ providers: new: --metrics-addr=:8443 files: - sourcePath: "../data/shared/v1.8/metadata.yaml" + - name: in-cluster + type: IPAMProvider + versions: + - name: "{go://sigs.k8s.io/cluster-api-ipam-provider-in-cluster@v0.1}" # supported release in the v1alpha2 series + # Use manifest from source files + value: "https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/releases/download/{go://sigs.k8s.io/cluster-api-ipam-provider-in-cluster@v0.1}/ipam-components.yaml" + type: url + contract: v1beta1 + files: + - sourcePath: "../data/shared/capi-ipam/v0.1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: ionoscloud type: InfrastructureProvider versions: @@ -51,6 +64,7 @@ providers: files: - sourcePath: "../../../metadata.yaml" - sourcePath: "../data/infrastructure-ionoscloud/cluster-template.yaml" + - sourcePath: "../data/infrastructure-ionoscloud/cluster-template-ipam.yaml" variables: # Default variables for the e2e test; those values could be overridden via env variables, thus # allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation. diff --git a/test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml b/test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml new file mode 100644 index 00000000..7d1f28cf --- /dev/null +++ b/test/e2e/data/infrastructure-ionoscloud/cluster-template-ipam.yaml @@ -0,0 +1,465 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_HOST:-${CONTROL_PLANE_ENDPOINT_IP}} + port: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + location: ${CONTROL_PLANE_ENDPOINT_LOCATION} + credentialsRef: + name: "ionoscloud-credentials" +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: IonosCloudMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + - path: /etc/kubernetes/manifests/kube-vip.yaml + owner: root:root + content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "${CONTROL_PLANE_ENDPOINT_PORT:-6443}" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.7.1 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + - localhost + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + - path: /etc/kube-vip-prepare.sh + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + permissions: "0700" + + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + + - content: | + #!/bin/bash + set -e + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + NODE_IPv4_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet") | select(.scope=="global") | select(.dynamic) | .local') + if [[ $NODE_IPv4_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv4_ADDRESS"'/' /etc/default/kubelet + fi + # IPv6 currently not set, the ip is not set then this runs. Needs to be waited for. + NODE_IPv6_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet6") | select(.scope=="global") | .local') + if [[ $NODE_IPv6_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv6_ADDRESS"'/' /etc/default/kubelet + fi + owner: root:root + path: /etc/set-node-ip.sh + permissions: '0700' + + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + - /etc/kube-vip-prepare.sh + # workaround 1.29 IP issue + - /etc/set-node-ip.sh + postKubeadmCommands: + - > + sed -i 's#path: /etc/kubernetes/super-admin.conf#path: /etc/kubernetes/admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be installed for cluster-api + # to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + initConfiguration: + localAPIEndpoint: + bindPort: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + joinConfiguration: + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + version: "${KUBERNETES_VERSION}" +--- +kind: IonosCloudMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: ${CLUSTER_NAME} + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-4} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-8192} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + ipv4PoolRef: + apiGroup: ipam.cluster.x-k8s.io + kind: InClusterIPPool + name: ${CLUSTER_NAME} + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-2} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-4096} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + postKubeadmCommands: + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be + # installed for cluster-api to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + criSocket: unix:///run/containerd/containerd.sock +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap +--- +apiVersion: ipam.cluster.x-k8s.io/v1alpha2 +kind: InClusterIPPool +metadata: + name: ${CLUSTER_NAME} +spec: + prefix: ${IPAM_PREFIX:-24} + addresses: + - ${ADDITIONAL_IPS} diff --git a/test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml b/test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml new file mode 100644 index 00000000..56d61ac6 --- /dev/null +++ b/test/e2e/data/shared/capi-ipam/v0.1/metadata.yaml @@ -0,0 +1,12 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 0 + minor: 1 + contract: v1beta1 + diff --git a/test/e2e/env_test.go b/test/e2e/env_test.go index 740f2b45..69a86df6 100644 --- a/test/e2e/env_test.go +++ b/test/e2e/env_test.go @@ -24,6 +24,7 @@ import ( "fmt" "os" "strconv" + "strings" "github.com/google/uuid" sdk "github.com/ionos-cloud/sdk-go/v6" @@ -71,7 +72,7 @@ func (e *ionosCloudEnv) setup() { dcRequest := e.createDatacenter(ctx, location) By("Requesting an IP block") - ipbRequest := e.reserveIPBlock(ctx, location, 1) + ipbRequest := e.reserveIPBlock(ctx, location, 2) By("Waiting for requests to complete") e.waitForCreationRequests(ctx, dcRequest, ipbRequest) @@ -84,11 +85,14 @@ func (e *ionosCloudEnv) teardown() { By("Requesting the deletion of the data center") datacenterRequest := e.deleteDatacenter(ctx) + By("Waiting for the deletion request to complete") + e.waitForDataCenterDeletion(ctx, datacenterRequest) + By("Requesting the deletion of the IP Block") ipBlockRequest := e.deleteIPBlock(ctx) - By("Waiting for deletion requests to complete") - e.waitForDeletionRequests(ctx, datacenterRequest, ipBlockRequest) + By("Waiting for deletion request to complete") + e.waitForIPBlockDeletion(ctx, ipBlockRequest) } } @@ -141,7 +145,12 @@ func (e *ionosCloudEnv) reserveIPBlock(ctx context.Context, location string, siz if e.ciMode { e.writeToGithubOutput("IP_BLOCK_ID", *e.ipBlock.Id) } - Expect(os.Setenv("CONTROL_PLANE_ENDPOINT_IP", (*e.ipBlock.Properties.Ips)[0])).ToNot(HaveOccurred(), "Failed setting datacenter ID in environment variable") + + ips := (*e.ipBlock.Properties.Ips) + Expect(os.Setenv("CONTROL_PLANE_ENDPOINT_IP", ips[0])).ToNot(HaveOccurred(), "Failed setting control plane endpoint IP in environment variable") + if len(ips) > 1 { + Expect(os.Setenv("ADDITIONAL_IPS", strings.Join(ips[1:], ","))).ToNot(HaveOccurred(), "Failed setting additional IPs in environment variable") + } return res.Header.Get(apiLocationHeaderKey) } @@ -164,17 +173,22 @@ func (e *ionosCloudEnv) waitForCreationRequests(ctx context.Context, datacenterR Expect(err).ToNot(HaveOccurred(), "failed waiting for IP block reservation") } -func (e *ionosCloudEnv) waitForDeletionRequests(ctx context.Context, datacenterRequest, ipBlockRequest string) { - GinkgoLogr.Info("Waiting for data center and IP block deletion requests to complete", - "datacenterRequest", datacenterRequest, - "datacenterID", e.datacenterID, +func (e *ionosCloudEnv) waitForIPBlockDeletion(ctx context.Context, ipBlockRequest string) { + GinkgoLogr.Info("Waiting for IP block deletion requests to complete", "ipBlockRequest", ipBlockRequest, "ipBlockID", *e.ipBlock.Id) + _, err := e.api.WaitForRequest(ctx, ipBlockRequest) + Expect(err).ToNot(HaveOccurred(), "failed waiting for IP block deletion") +} + +func (e *ionosCloudEnv) waitForDataCenterDeletion(ctx context.Context, datacenterRequest string) { + GinkgoLogr.Info("Waiting for data center deletion requests to complete", + "datacenterRequest", datacenterRequest, + "datacenterID", e.datacenterID) + _, err := e.api.WaitForRequest(ctx, datacenterRequest) Expect(err).ToNot(HaveOccurred(), "failed waiting for data center deletion") - _, err = e.api.WaitForRequest(ctx, ipBlockRequest) - Expect(err).ToNot(HaveOccurred(), "failed waiting for IP block deletion") } // createCredentialsSecretPNC creates a secret with the IONOS Cloud credentials. This secret should be used as the diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 69e87830..7c024523 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -199,6 +200,7 @@ var _ = SynchronizedAfterSuite(func() { func initScheme() *runtime.Scheme { s := runtime.NewScheme() framework.TryAddDefaultSchemes(s) + Expect(ipamv1.AddToScheme(s)).To(Succeed()) Expect(infrav1.AddToScheme(s)).To(Succeed()) return s } @@ -254,6 +256,7 @@ func initBootstrapCluster() { clusterctl.InitManagementClusterAndWatchControllerLogs(watchesCtx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{ ClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfigPath, + IPAMProviders: e2eConfig.IPAMProviders(), InfrastructureProviders: e2eConfig.InfrastructureProviders(), LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...)