From f53a477868477ec06b3d4990c2393c9d9d74e6ac Mon Sep 17 00:00:00 2001 From: Richard Case Date: Wed, 19 Jan 2022 09:03:38 +0000 Subject: [PATCH 1/7] feat: add controller for external load balancer --- .../externalloadbalancer_controller.go | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 controllers/externalloadbalancer_controller.go diff --git a/controllers/externalloadbalancer_controller.go b/controllers/externalloadbalancer_controller.go new file mode 100644 index 0000000..3e7889d --- /dev/null +++ b/controllers/externalloadbalancer_controller.go @@ -0,0 +1,51 @@ +// Copyright 2022 Weaveworks or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MPL-2.0. + +package controllers + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1 "github.com/weaveworks/cluster-api-provider-microvm/api/v1alpha1" +) + +type ExternalLoadBalancerReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile +func (r *ExternalLoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx) + + loadbalancer := &infrav1.ExternalLoadBalancer{} + if err := r.Get(ctx, req.NamespacedName, loadbalancer); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "error getting externalloadbalancer", "id", req.NamespacedName) + + return ctrl.Result{}, err + } + +} From 050ad6c17a2ece23381a0184607b7f226630ba60 Mon Sep 17 00:00:00 2001 From: Josh Michielsen Date: Tue, 25 Jan 2022 10:17:56 +0000 Subject: [PATCH 2/7] feat: add external loadbalancer endpoint types and controller - The controller does a simple HTTP get on the endpoint. - Any non-5xx response is acceptable. - The MvmCluster checks the endpointRef for it's ready status - If it is not ready, it requeues the reconciliation. --- api/v1alpha1/externalloadbalancer_types.go | 23 ++++- api/v1alpha1/microvmcluster_types.go | 10 +- api/v1alpha1/zz_generated.deepcopy.go | 22 ++++- ...luster.x-k8s.io_externalloadbalancers.yaml | 4 +- ...ture.cluster.x-k8s.io_microvmclusters.yaml | 68 ++++++++++--- config/rbac/role.yaml | 20 ++++ controllers/errors.go | 12 +-- .../externalloadbalancer_controller.go | 59 +++++++++++ controllers/microvmcluster_controller.go | 28 +++--- controllers/microvmcluster_controller_test.go | 99 +++++++------------ templates/cluster-template.yaml | 17 +++- 11 files changed, 248 insertions(+), 114 deletions(-) diff --git a/api/v1alpha1/externalloadbalancer_types.go b/api/v1alpha1/externalloadbalancer_types.go index bffe45d..2621f47 100644 --- a/api/v1alpha1/externalloadbalancer_types.go +++ b/api/v1alpha1/externalloadbalancer_types.go @@ -4,15 +4,34 @@ package v1alpha1 import ( + "strconv" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +type ExternalLoadBalancerEndpoint struct { + // The hostname on which the API server is serving. + // +required + Host string `json:"host"` + + // The port on which the API server is serving. + // +optional + // +kubebuilder:default=6443 + Port int32 `json:"port"` +} + +func (ep *ExternalLoadBalancerEndpoint) String() string { + port := strconv.Itoa(int(ep.Port)) + + return ep.Host + ":" + port +} + // ExternalLoadBalancerSpec defines the desired state for a ExternalLoadBalancer. type ExternalLoadBalancerSpec struct { // Endpoint represents the endpoint for the load balancer. This endpoint will - // best tested to see if its available. - Endpoint clusterv1.APIEndpoint `json:"endpoint"` + // be tested to see if its available. + Endpoint ExternalLoadBalancerEndpoint `json:"endpoint"` } type ExternalLoadBalancerStatus struct { diff --git a/api/v1alpha1/microvmcluster_types.go b/api/v1alpha1/microvmcluster_types.go index 4586bce..200139d 100644 --- a/api/v1alpha1/microvmcluster_types.go +++ b/api/v1alpha1/microvmcluster_types.go @@ -4,19 +4,13 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) // MicrovmClusterSpec defines the desired state of MicrovmCluster. type MicrovmClusterSpec struct { - // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. - // - // See https://cluster-api.sigs.k8s.io/developer/architecture/controllers/cluster.html - // for more details. - // - // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` // SSHPublicKey is an SSH public key that will be used with the default user. If specified // this will apply to all machine created unless you specify a different key at the // machine level. @@ -25,6 +19,8 @@ type MicrovmClusterSpec struct { // Placement specifies how machines for the cluster should be placed onto hosts (i.e. where the microvms are created). // +kubebuilder:validation:Required Placement Placement `json:"placement"` + // EndpointRef + EndpointRef *corev1.ObjectReference `json:"endpointRef,omitempty"` } // MicrovmClusterStatus defines the observed state of MicrovmCluster. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 5818eb1..39441fd 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -9,6 +9,7 @@ package v1alpha1 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/errors" @@ -56,6 +57,21 @@ func (in *ExternalLoadBalancer) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalLoadBalancerEndpoint) DeepCopyInto(out *ExternalLoadBalancerEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalLoadBalancerEndpoint. +func (in *ExternalLoadBalancerEndpoint) DeepCopy() *ExternalLoadBalancerEndpoint { + if in == nil { + return nil + } + out := new(ExternalLoadBalancerEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalLoadBalancerList) DeepCopyInto(out *ExternalLoadBalancerList) { *out = *in @@ -188,8 +204,12 @@ func (in *MicrovmClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MicrovmClusterSpec) DeepCopyInto(out *MicrovmClusterSpec) { *out = *in - out.ControlPlaneEndpoint = in.ControlPlaneEndpoint in.Placement.DeepCopyInto(&out.Placement) + if in.EndpointRef != nil { + in, out := &in.EndpointRef, &out.EndpointRef + *out = new(v1.ObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrovmClusterSpec. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml index b61c13f..ff47496 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml @@ -52,18 +52,18 @@ spec: properties: endpoint: description: Endpoint represents the endpoint for the load balancer. - This endpoint will best tested to see if its available. + This endpoint will be tested to see if its available. properties: host: description: The hostname on which the API server is serving. type: string port: + default: 6443 description: The port on which the API server is serving. format: int32 type: integer required: - host - - port type: object required: - endpoint diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml index c75fbed..dee75b3 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml @@ -53,21 +53,63 @@ spec: spec: description: MicrovmClusterSpec defines the desired state of MicrovmCluster. properties: - controlPlaneEndpoint: - description: "ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. \n See https://cluster-api.sigs.k8s.io/developer/architecture/controllers/cluster.html - for more details." + endpointRef: + description: 'ObjectReference contains enough information to let you + inspect or modify the referred object. --- New uses of this type + are discouraged because of difficulty describing its usage when + embedded in APIs. 1. Ignored fields. It includes many fields which + are not generally honored. For instance, ResourceVersion and FieldPath + are both very rarely valid in actual usage. 2. Invalid usage help. It + is impossible to add specific help for individual usage. In most + embedded usages, there are particular restrictions like, "must refer + only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. 3. Inconsistent validation. Because + the usages are different, the validation rules are different by + usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not + a precise mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is on + the group,resource tuple and the version of the actual struct is + irrelevant. 5. We cannot easily change it. Because this type is + embedded in many locations, updates to this type will affect numerous + schemas. Don''t make new APIs embed an underspecified API type + they do not control. Instead of using this type, create a locally + provided and used type that is well-focused on your reference. For + example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + .' properties: - host: - description: The hostname on which the API server is serving. + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' type: string - port: - description: The port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port type: object placement: description: Placement specifies how machines for the cluster should diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 72efa86..6b1282b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -50,6 +50,26 @@ rules: - get - list - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - externalloadbalancers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - externalloadbalancers/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/controllers/errors.go b/controllers/errors.go index be408b4..e8d9ca2 100644 --- a/controllers/errors.go +++ b/controllers/errors.go @@ -6,10 +6,10 @@ package controllers import "errors" var ( - errControlplaneEndpointRequired = errors.New("controlplane endpoint is required on cluster or mvmcluster") - errClientFactoryFuncRequired = errors.New("factory function required to create grpc client") - errMicrovmFailed = errors.New("microvm is in a failed state") - errMicrovmUnknownState = errors.New("microvm is in an unknown/unsupported state") - errExpectedMicrovmCluster = errors.New("expected microvm cluster") - errNoPlacement = errors.New("no placement specified") + errExternalLoadBalancerEndpointRefRequired = errors.New("endpointRef is required on mvmcluster") + errClientFactoryFuncRequired = errors.New("factory function required to create grpc client") + errMicrovmFailed = errors.New("microvm is in a failed state") + errMicrovmUnknownState = errors.New("microvm is in an unknown/unsupported state") + errExpectedMicrovmCluster = errors.New("expected microvm cluster") + errNoPlacement = errors.New("no placement specified") ) diff --git a/controllers/externalloadbalancer_controller.go b/controllers/externalloadbalancer_controller.go index 3e7889d..ca0e028 100644 --- a/controllers/externalloadbalancer_controller.go +++ b/controllers/externalloadbalancer_controller.go @@ -5,11 +5,15 @@ package controllers import ( "context" + "net/http" + "os" + "time" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" + "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -48,4 +52,59 @@ func (r *ExternalLoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, err } + if ownerRef := loadbalancer.GetOwnerReferences(); len(ownerRef) == 0 { + // What should we do here if the OwnerReference is empty, simply requeue?? + return ctrl.Result{RequeueAfter: requeuePeriod}, nil + } + + if !loadbalancer.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("loadbalancer being deleted, nothing to do") + + return ctrl.Result{}, nil + } + + client := &http.Client{ + Timeout: 5 * time.Second, + } + + resp, err := client.Get(loadbalancer.Spec.Endpoint.String()) + if err != nil { + if os.IsTimeout(err) { + log.Error(err, "request timed out attempting to contact endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + + return ctrl.Result{}, err + } + log.Error(err, "attempting to contact specified endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + + return ctrl.Result{}, err + } + defer resp.Body.Close() + if resp.StatusCode >= 500 { + // Do we requeue here? How do we track retries, or will this be handled automatically (CrashLoopBackoff) + log.V(2).Info("endpoint returned a 5XX status code", "endpoint", loadbalancer.Spec.Endpoint.String()) + + return ctrl.Result{}, nil + } + + loadbalancer.Status.Ready = true + + defer func() { + if err := r.Patch(loadbalancer); err != nil { + log.Error(err, "attempting to patch loadbalancer object") + } + }() + + return ctrl.Result{}, nil +} + +func (r *ExternalLoadBalancerReconciler) Patch(lb *infrav1.ExternalLoadBalancer) error { + patchHelper, err := patch.NewHelper(lb, r.Client) + if err != nil { + return err + } + if patchErr := patchHelper.Patch(context.TODO(), lb); patchErr != nil { + return err + } + + return nil } diff --git a/controllers/microvmcluster_controller.go b/controllers/microvmcluster_controller.go index ef702c6..968fa44 100644 --- a/controllers/microvmcluster_controller.go +++ b/controllers/microvmcluster_controller.go @@ -8,9 +8,9 @@ import ( "fmt" "time" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -51,6 +51,7 @@ type MicrovmClusterReconciler struct { // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=microvmclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=microvmclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=microvmclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancerendpoint,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -127,8 +128,8 @@ func (r *MicrovmClusterReconciler) reconcileDelete(_ context.Context, clusterSco func (r *MicrovmClusterReconciler) reconcileNormal(ctx context.Context, clusterScope *scope.ClusterScope) (reconcile.Result, error) { clusterScope.Info("Reconciling MicrovmCluster") - if clusterScope.Cluster.Spec.ControlPlaneEndpoint.IsZero() && clusterScope.MvmCluster.Spec.ControlPlaneEndpoint.IsZero() { - return reconcile.Result{}, errControlplaneEndpointRequired + if clusterScope.MvmCluster.Spec.EndpointRef == nil { + return reconcile.Result{}, errExternalLoadBalancerEndpointRefRequired } clusterScope.MvmCluster.Status.Ready = true @@ -149,25 +150,18 @@ func (r *MicrovmClusterReconciler) reconcileNormal(ctx context.Context, clusterS } func (r *MicrovmClusterReconciler) isAPIServerAvailable(ctx context.Context, clusterScope *scope.ClusterScope) bool { - clusterScope.V(defaults.LogLevelDebug).Info("checking if api server is available", "cluster", clusterScope.ClusterName()) - - clusterKey := client.ObjectKey{ - Name: clusterScope.Cluster.Name, - Namespace: clusterScope.Cluster.Namespace, + var endpoint = &infrav1.ExternalLoadBalancer{} + eprnn := types.NamespacedName{ + Namespace: clusterScope.MvmCluster.ObjectMeta.Namespace, + Name: clusterScope.MvmCluster.Spec.EndpointRef.Name, } - - remoteClient, err := r.RemoteClientGetter(ctx, clusterScope.ClusterName(), r.Client, clusterKey) - if err != nil { - clusterScope.Error(err, "creating remote cluster client") + if err := r.Get(ctx, eprnn, endpoint); err != nil { + clusterScope.Error(err, "get referenced ExternalLoadBalancerEndpoint") return false } - nodes := &corev1.NodeList{} - if err = remoteClient.List(ctx, nodes); err != nil { - return false - } - if len(nodes.Items) == 0 { + if !endpoint.Status.Ready { return false } diff --git a/controllers/microvmcluster_controller_test.go b/controllers/microvmcluster_controller_test.go index 76abb9b..4be73de 100644 --- a/controllers/microvmcluster_controller_test.go +++ b/controllers/microvmcluster_controller_test.go @@ -44,75 +44,35 @@ func TestClusterReconciliationNoEndpoint(t *testing.T) { g.Expect(c).To(BeNil()) } -func TestClusterReconciliationWithClusterEndpoint(t *testing.T) { - g := NewWithT(t) - - cluster := createCluster(testClusterName, testClusterNamespace) - cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: "192.168.8.15", - Port: 6443, - } - - tenantClusterNodes := &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - }, - }, - } - - objects := []runtime.Object{ - cluster, - createMicrovmCluster(testClusterName, testClusterNamespace), - tenantClusterNodes, - } - - client := createFakeClient(g, objects) - result, err := reconcileCluster(client) - - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(result.Requeue).To(BeFalse()) - g.Expect(result.RequeueAfter).To(Equal(time.Duration(0))) - - reconciled, err := getMicrovmCluster(context.TODO(), client, testClusterName, testClusterNamespace) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(reconciled.Status.Ready).To(BeTrue()) - g.Expect(reconciled.Status.FailureDomains).To(HaveLen(1)) - - c := conditions.Get(reconciled, infrav1.LoadBalancerAvailableCondition) - g.Expect(c).ToNot(BeNil()) - g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) - - c = conditions.Get(reconciled, clusterv1.ReadyCondition) - g.Expect(c).ToNot(BeNil()) - g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) -} - func TestClusterReconciliationWithMvmClusterEndpoint(t *testing.T) { g := NewWithT(t) mvmCluster := createMicrovmCluster(testClusterName, testClusterNamespace) - mvmCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: "192.168.8.15", - Port: 6443, + mvmCluster.Spec.EndpointRef = &corev1.ObjectReference{ + Kind: "ExternalLoadBalancerEndpoint", + Name: "tenant1-elb-endpoint", } - tenantClusterNodes := &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, + endpoint := &infrav1.ExternalLoadBalancer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tenant1-elb-endpoint", + Namespace: "ns1", + }, + Spec: infrav1.ExternalLoadBalancerSpec{ + Endpoint: infrav1.ExternalLoadBalancerEndpoint{ + Host: "localhost", + Port: 6443, }, }, + Status: infrav1.ExternalLoadBalancerStatus{ + Ready: true, + }, } objects := []runtime.Object{ createCluster(testClusterName, testClusterNamespace), mvmCluster, - tenantClusterNodes, + endpoint, } client := createFakeClient(g, objects) @@ -140,19 +100,32 @@ func TestClusterReconciliationWithClusterEndpointAPIServerNotReady(t *testing.T) g := NewWithT(t) cluster := createCluster(testClusterName, testClusterNamespace) - cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: "192.168.8.15", - Port: 6443, + mvmCluster := createMicrovmCluster(testClusterName, testClusterNamespace) + mvmCluster.Spec.EndpointRef = &corev1.ObjectReference{ + Kind: "ExternalLoadBalancerEndpoint", + Name: "tenant1-elb-endpoint", } - tenantClusterNodes := &corev1.NodeList{ - Items: []corev1.Node{}, + endpoint := &infrav1.ExternalLoadBalancer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tenant1-elb-endpoint", + Namespace: "ns1", + }, + Spec: infrav1.ExternalLoadBalancerSpec{ + Endpoint: infrav1.ExternalLoadBalancerEndpoint{ + Host: "localhost", + Port: 6443, + }, + }, + Status: infrav1.ExternalLoadBalancerStatus{ + Ready: false, + }, } objects := []runtime.Object{ cluster, - createMicrovmCluster(testClusterName, testClusterNamespace), - tenantClusterNodes, + mvmCluster, + endpoint, } client := createFakeClient(g, objects) diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index 86138ba..eb1b6c5 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -20,19 +20,30 @@ spec: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 name: "${CLUSTER_NAME}-control-plane" --- +# ExternalLoadBalancer Definition +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ExternalLoadBalancer +metadata: + name: "${CLUSTER_NAME}-api-server" +spec: + endpoint: + host: ${CONTROL_PLANE_VIP} + port: 6443 +--- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: MicrovmCluster metadata: name: "${CLUSTER_NAME}" spec: - controlPlaneEndpoint: - host: "${CONTROL_PLANE_VIP}" - port: 6443 placement: staticPool: hosts: - endpoint: "${HOST_ENDPOINT:=127.0.0.1:9090}" controlplaneAllowed: true + loadBalancerRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ExternalLoadBalancer + name: "${CLUSTER_NAME}-api-server" --- kind: KubeadmControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta1 From 4f59f7c722b46a84235276579f865f9cfe27a3c8 Mon Sep 17 00:00:00 2001 From: Josh Michielsen Date: Tue, 25 Jan 2022 10:36:55 +0000 Subject: [PATCH 3/7] fix: lint errors --- .../externalloadbalancer_controller.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/controllers/externalloadbalancer_controller.go b/controllers/externalloadbalancer_controller.go index ca0e028..0e5bbda 100644 --- a/controllers/externalloadbalancer_controller.go +++ b/controllers/externalloadbalancer_controller.go @@ -28,6 +28,12 @@ type ExternalLoadBalancerReconciler struct { WatchFilterValue string } +const ( + httpErrorStatusCode = 500 + warningLogVerbosity = 2 + defaultHTTPTimeout = 5 * time.Second +) + // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancers/status,verbs=get;update;patch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch @@ -64,10 +70,15 @@ func (r *ExternalLoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl } client := &http.Client{ - Timeout: 5 * time.Second, + Timeout: defaultHTTPTimeout, + } + + epReq, err := http.NewRequestWithContext(ctx, http.MethodGet, loadbalancer.Spec.Endpoint.String(), nil) + if err != nil { + log.Error(err, "creating endpoint request", "id", req.NamespacedName) } - resp, err := client.Get(loadbalancer.Spec.Endpoint.String()) + resp, err := client.Do(epReq) if err != nil { if os.IsTimeout(err) { log.Error(err, "request timed out attempting to contact endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) @@ -79,9 +90,9 @@ func (r *ExternalLoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, err } defer resp.Body.Close() - if resp.StatusCode >= 500 { + if resp.StatusCode >= httpErrorStatusCode { // Do we requeue here? How do we track retries, or will this be handled automatically (CrashLoopBackoff) - log.V(2).Info("endpoint returned a 5XX status code", "endpoint", loadbalancer.Spec.Endpoint.String()) + log.V(warningLogVerbosity).Info("endpoint returned a 5XX status code", "endpoint", loadbalancer.Spec.Endpoint.String()) return ctrl.Result{}, nil } From 1173a2bde36d6e53c22fb8c392a6f7e1ec05caa4 Mon Sep 17 00:00:00 2001 From: Josh Michielsen Date: Tue, 25 Jan 2022 10:57:51 +0000 Subject: [PATCH 4/7] fix: gofumpt not gofmt --- controllers/microvmcluster_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/microvmcluster_controller.go b/controllers/microvmcluster_controller.go index 968fa44..b86f87f 100644 --- a/controllers/microvmcluster_controller.go +++ b/controllers/microvmcluster_controller.go @@ -150,7 +150,7 @@ func (r *MicrovmClusterReconciler) reconcileNormal(ctx context.Context, clusterS } func (r *MicrovmClusterReconciler) isAPIServerAvailable(ctx context.Context, clusterScope *scope.ClusterScope) bool { - var endpoint = &infrav1.ExternalLoadBalancer{} + endpoint := &infrav1.ExternalLoadBalancer{} eprnn := types.NamespacedName{ Namespace: clusterScope.MvmCluster.ObjectMeta.Namespace, Name: clusterScope.MvmCluster.Spec.EndpointRef.Name, From 66b8c53cc1ee71e285f3944fb6a5fc2506851475 Mon Sep 17 00:00:00 2001 From: Josh Michielsen Date: Wed, 9 Feb 2022 12:07:03 +0000 Subject: [PATCH 5/7] refactor: update test request logic and improve patch Among a variety of other changes. - Patch is called earlier and we now have a ExternalLoadBalancerEndpointAvailableCondition that we can use to convey failure information. - Rather than simply checking the OwnerReferences the ClusterName is used to ensure the owner reference exists. - Added SetupWithManager method which is called from main. - Moved test request logic to separate function. The test request now calls the {ENDPOINT}/livez which should reach the Kubernetes API server --- api/v1alpha1/condition_consts.go | 12 ++ api/v1alpha1/externalloadbalancer_types.go | 3 +- api/v1alpha1/microvmcluster_types.go | 4 +- api/v1alpha1/zz_generated.deepcopy.go | 4 +- ...luster.x-k8s.io_externalloadbalancers.yaml | 3 + ...ture.cluster.x-k8s.io_microvmclusters.yaml | 26 +-- controllers/errors.go | 1 + .../externalloadbalancer_controller.go | 183 ++++++++++++++---- controllers/microvmcluster_controller.go | 6 +- controllers/microvmcluster_controller_test.go | 4 +- main.go | 9 + templates/cluster-template-cilium.yaml | 17 +- templates/cluster-template.yaml | 1 + 13 files changed, 203 insertions(+), 70 deletions(-) diff --git a/api/v1alpha1/condition_consts.go b/api/v1alpha1/condition_consts.go index 1540802..78c8511 100644 --- a/api/v1alpha1/condition_consts.go +++ b/api/v1alpha1/condition_consts.go @@ -48,3 +48,15 @@ const ( // to be available before proceeding. WaitingForBootstrapDataReason = "WaitingForBoostrapData" ) + +const ( + // ExternalLoadBalancerEndpointAvailableCondition is a condition that indicates that the API server Load Balancer is available. + ExternalLoadBalancerEndpointAvailableCondition clusterv1.ConditionType = "ExternalLoadBalancerEndpointAvailable" + + // ExternalLoadBalancerEndpointNotAvailableReason is used to indicate any error with the + // availability of the load balancer. + ExternalLoadBalancerEndpointFailedReason = "ExternalLoadBalancerEndpointFailed" + + // ExternalLoadBalancerEndpointNotAvailableReason is used to indicate that the load balancer isn't available. + ExternalLoadBalancerEndpointNotAvailableReason = "ExternalLoadBalancerEndpointNotAvailable" +) diff --git a/api/v1alpha1/externalloadbalancer_types.go b/api/v1alpha1/externalloadbalancer_types.go index 2621f47..16c0855 100644 --- a/api/v1alpha1/externalloadbalancer_types.go +++ b/api/v1alpha1/externalloadbalancer_types.go @@ -31,7 +31,8 @@ func (ep *ExternalLoadBalancerEndpoint) String() string { type ExternalLoadBalancerSpec struct { // Endpoint represents the endpoint for the load balancer. This endpoint will // be tested to see if its available. - Endpoint ExternalLoadBalancerEndpoint `json:"endpoint"` + Endpoint ExternalLoadBalancerEndpoint `json:"endpoint"` + ClusterName string `json:"clusterName"` } type ExternalLoadBalancerStatus struct { diff --git a/api/v1alpha1/microvmcluster_types.go b/api/v1alpha1/microvmcluster_types.go index 200139d..787197b 100644 --- a/api/v1alpha1/microvmcluster_types.go +++ b/api/v1alpha1/microvmcluster_types.go @@ -19,8 +19,8 @@ type MicrovmClusterSpec struct { // Placement specifies how machines for the cluster should be placed onto hosts (i.e. where the microvms are created). // +kubebuilder:validation:Required Placement Placement `json:"placement"` - // EndpointRef - EndpointRef *corev1.ObjectReference `json:"endpointRef,omitempty"` + // LoadBalancerRef + LoadBalancerRef *corev1.ObjectReference `json:"loadBalancerRef,omitempty"` } // MicrovmClusterStatus defines the observed state of MicrovmCluster. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 39441fd..5b18023 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -205,8 +205,8 @@ func (in *MicrovmClusterList) DeepCopyObject() runtime.Object { func (in *MicrovmClusterSpec) DeepCopyInto(out *MicrovmClusterSpec) { *out = *in in.Placement.DeepCopyInto(&out.Placement) - if in.EndpointRef != nil { - in, out := &in.EndpointRef, &out.EndpointRef + if in.LoadBalancerRef != nil { + in, out := &in.LoadBalancerRef, &out.LoadBalancerRef *out = new(v1.ObjectReference) **out = **in } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml index ff47496..a0b3bb3 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml @@ -50,6 +50,8 @@ spec: description: ExternalLoadBalancerSpec defines the desired state for a ExternalLoadBalancer. properties: + clusterName: + type: string endpoint: description: Endpoint represents the endpoint for the load balancer. This endpoint will be tested to see if its available. @@ -66,6 +68,7 @@ spec: - host type: object required: + - clusterName - endpoint type: object status: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml index dee75b3..a8c0391 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml @@ -53,30 +53,8 @@ spec: spec: description: MicrovmClusterSpec defines the desired state of MicrovmCluster. properties: - endpointRef: - description: 'ObjectReference contains enough information to let you - inspect or modify the referred object. --- New uses of this type - are discouraged because of difficulty describing its usage when - embedded in APIs. 1. Ignored fields. It includes many fields which - are not generally honored. For instance, ResourceVersion and FieldPath - are both very rarely valid in actual usage. 2. Invalid usage help. It - is impossible to add specific help for individual usage. In most - embedded usages, there are particular restrictions like, "must refer - only to types A and B" or "UID not honored" or "name must be restricted". - Those cannot be well described when embedded. 3. Inconsistent validation. Because - the usages are different, the validation rules are different by - usage, which makes it hard for users to predict what will happen. - 4. The fields are both imprecise and overly precise. Kind is not - a precise mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is on - the group,resource tuple and the version of the actual struct is - irrelevant. 5. We cannot easily change it. Because this type is - embedded in many locations, updates to this type will affect numerous - schemas. Don''t make new APIs embed an underspecified API type - they do not control. Instead of using this type, create a locally - provided and used type that is well-focused on your reference. For - example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - .' + loadBalancerRef: + description: LoadBalancerRef properties: apiVersion: description: API version of the referent. diff --git a/controllers/errors.go b/controllers/errors.go index e8d9ca2..b908ce4 100644 --- a/controllers/errors.go +++ b/controllers/errors.go @@ -12,4 +12,5 @@ var ( errMicrovmUnknownState = errors.New("microvm is in an unknown/unsupported state") errExpectedMicrovmCluster = errors.New("expected microvm cluster") errNoPlacement = errors.New("no placement specified") + errInvalidLoadBalancerResponseStatusCode = errors.New("endpoint returned a 5XX status code") ) diff --git a/controllers/externalloadbalancer_controller.go b/controllers/externalloadbalancer_controller.go index 0e5bbda..221befa 100644 --- a/controllers/externalloadbalancer_controller.go +++ b/controllers/externalloadbalancer_controller.go @@ -5,20 +5,33 @@ package controllers import ( "context" + "errors" + "fmt" "net/http" "os" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/source" infrav1 "github.com/weaveworks/cluster-api-provider-microvm/api/v1alpha1" + "github.com/weaveworks/cluster-api-provider-microvm/internal/defaults" ) type ExternalLoadBalancerReconciler struct { @@ -26,11 +39,11 @@ type ExternalLoadBalancerReconciler struct { Scheme *runtime.Scheme Recorder record.EventRecorder WatchFilterValue string + HTTPClient *http.Client } const ( - httpErrorStatusCode = 500 - warningLogVerbosity = 2 + httpErrorStatusCode = 50 defaultHTTPTimeout = 5 * time.Second ) @@ -58,9 +71,19 @@ func (r *ExternalLoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, err } - if ownerRef := loadbalancer.GetOwnerReferences(); len(ownerRef) == 0 { - // What should we do here if the OwnerReference is empty, simply requeue?? - return ctrl.Result{RequeueAfter: requeuePeriod}, nil + defer func() { + if err := r.Patch(ctx, loadbalancer); err != nil { + log.Error(err, "attempting to patch loadbalancer object") + } + }() + + if err := r.ensureClusterOwnerRef(ctx, req, loadbalancer); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "retrieving cluster from clusterName", "clusterName", loadbalancer.ClusterName) + + return ctrl.Result{}, err } if !loadbalancer.ObjectMeta.DeletionTimestamp.IsZero() { @@ -69,52 +92,146 @@ func (r *ExternalLoadBalancerReconciler) Reconcile(ctx context.Context, req ctrl return ctrl.Result{}, nil } - client := &http.Client{ - Timeout: defaultHTTPTimeout, + if err := r.sendTestRequest(ctx, loadbalancer); err != nil { + if os.IsTimeout(err) { + log.Error(err, "request timed out attempting to contact endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + conditions.MarkFalse( + loadbalancer, + infrav1.ExternalLoadBalancerEndpointAvailableCondition, + infrav1.ExternalLoadBalancerEndpointNotAvailableReason, + clusterv1.ConditionSeverityInfo, "request to loadbalancer endpoint timed out", + ) + + return ctrl.Result{}, fmt.Errorf("request timed out attempting to contact endpoint: %s: %w", loadbalancer.Spec.Endpoint.String(), err) + } + + if errors.Is(err, errInvalidLoadBalancerResponseStatusCode) { + log.Error(err, "request to endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + conditions.MarkFalse( + loadbalancer, + infrav1.ExternalLoadBalancerEndpointAvailableCondition, + infrav1.ExternalLoadBalancerEndpointNotAvailableReason, + clusterv1.ConditionSeverityInfo, "loadbalancer endpoint responded with error", + ) + + return ctrl.Result{}, nil + } + + log.Error(err, "attempting to contact specified endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + conditions.MarkFalse( + loadbalancer, + infrav1.ExternalLoadBalancerEndpointAvailableCondition, + infrav1.ExternalLoadBalancerEndpointFailedReason, + clusterv1.ConditionSeverityInfo, "request to loadbalancer endpoint failed: %s", + err.Error(), + ) + + return ctrl.Result{}, fmt.Errorf("attempting to contact specified endpoint: %s: %w", loadbalancer.Spec.Endpoint.String(), err) } - epReq, err := http.NewRequestWithContext(ctx, http.MethodGet, loadbalancer.Spec.Endpoint.String(), nil) - if err != nil { - log.Error(err, "creating endpoint request", "id", req.NamespacedName) + loadbalancer.Status.Ready = true + conditions.MarkTrue(loadbalancer, infrav1.ExternalLoadBalancerEndpointAvailableCondition) + + return ctrl.Result{}, nil +} + +// Patch persists the resource and status. +func (r *ExternalLoadBalancerReconciler) Patch(ctx context.Context, lb *infrav1.ExternalLoadBalancer) error { + applicableConditions := []clusterv1.ConditionType{ + infrav1.ExternalLoadBalancerEndpointAvailableCondition, } - resp, err := client.Do(epReq) + conditions.SetSummary(lb, + conditions.WithConditions(applicableConditions...), + conditions.WithStepCounterIf(lb.DeletionTimestamp.IsZero()), + conditions.WithStepCounter(), + ) + + patchHelper, err := patch.NewHelper(lb, r.Client) if err != nil { - if os.IsTimeout(err) { - log.Error(err, "request timed out attempting to contact endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + return err + } + if patchErr := patchHelper.Patch( + ctx, + lb, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + infrav1.LoadBalancerAvailableCondition, + }}); patchErr != nil { + return err + } - return ctrl.Result{}, err - } - log.Error(err, "attempting to contact specified endpoint", "endpoint", loadbalancer.Spec.Endpoint.String()) + return nil +} - return ctrl.Result{}, err +// sendTestRequest makes an HTTP call to ${KUBE_VIP_HOST}:${KUBE_VIP_PORT}/livez, which, if the loadbalancer is live, +// should reach the /livez endpoint on the Kubernetes API server. +func (r *ExternalLoadBalancerReconciler) sendTestRequest(ctx context.Context, lb *infrav1.ExternalLoadBalancer) error { + endpoint := lb.Spec.Endpoint.String() + "/livez" + epReq, err := http.NewRequestWithContext(ctx, http.MethodGet, lb.Spec.Endpoint.String()+"/livez", nil) // use livez endpoint + if err != nil { + return fmt.Errorf("creating endpoint request: %w", err) + } + + log.Log.V(defaults.LogLevelDebug).Info("attempting request to API server livez endpoint via loadbalancer", "endpoint_address", endpoint) + resp, err := r.HTTPClient.Do(epReq) + if err != nil { + return err } defer resp.Body.Close() + if resp.StatusCode >= httpErrorStatusCode { - // Do we requeue here? How do we track retries, or will this be handled automatically (CrashLoopBackoff) - log.V(warningLogVerbosity).Info("endpoint returned a 5XX status code", "endpoint", loadbalancer.Spec.Endpoint.String()) + return errInvalidLoadBalancerResponseStatusCode + } - return ctrl.Result{}, nil + return nil +} + +func (r *ExternalLoadBalancerReconciler) ensureClusterOwnerRef(ctx context.Context, req ctrl.Request, lb *infrav1.ExternalLoadBalancer) error { + clusterNamespaceName := types.NamespacedName{ + Namespace: req.NamespacedName.Namespace, + Name: lb.ClusterName, } - loadbalancer.Status.Ready = true + cluster := &clusterv1.Cluster{} + if err := r.Get(ctx, clusterNamespaceName, cluster); err != nil { + return err + } - defer func() { - if err := r.Patch(loadbalancer); err != nil { - log.Error(err, "attempting to patch loadbalancer object") - } - }() + lb.OwnerReferences = util.EnsureOwnerRef(lb.OwnerReferences, metav1.OwnerReference{ + APIVersion: cluster.APIVersion, + Kind: cluster.Kind, + Name: cluster.Name, + UID: cluster.UID, + }) - return ctrl.Result{}, nil + return nil } -func (r *ExternalLoadBalancerReconciler) Patch(lb *infrav1.ExternalLoadBalancer) error { - patchHelper, err := patch.NewHelper(lb, r.Client) - if err != nil { - return err +func (r *ExternalLoadBalancerReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := ctrl.LoggerFrom(ctx) + + if r.HTTPClient == nil { + r.HTTPClient = &http.Client{Timeout: defaultHTTPTimeout} } - if patchErr := patchHelper.Patch(context.TODO(), lb); patchErr != nil { - return err + + builder := ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + For(&infrav1.ExternalLoadBalancer{}). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). + WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)). + Watches( + &source.Kind{Type: &clusterv1.Cluster{}}, + handler.EnqueueRequestsFromMapFunc( + util.ClusterToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("ExternalLoadBalancer")), + ), + builder.WithPredicates( + predicates.ClusterUnpaused(log), + ), + ) + + if err := builder.Complete(r); err != nil { + return fmt.Errorf("creating external loadbalancer controller: %w", err) } return nil diff --git a/controllers/microvmcluster_controller.go b/controllers/microvmcluster_controller.go index b86f87f..d233fc6 100644 --- a/controllers/microvmcluster_controller.go +++ b/controllers/microvmcluster_controller.go @@ -51,7 +51,7 @@ type MicrovmClusterReconciler struct { // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=microvmclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=microvmclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=microvmclusters/finalizers,verbs=update -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancerendpoint,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=externalloadbalancers,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -128,7 +128,7 @@ func (r *MicrovmClusterReconciler) reconcileDelete(_ context.Context, clusterSco func (r *MicrovmClusterReconciler) reconcileNormal(ctx context.Context, clusterScope *scope.ClusterScope) (reconcile.Result, error) { clusterScope.Info("Reconciling MicrovmCluster") - if clusterScope.MvmCluster.Spec.EndpointRef == nil { + if clusterScope.MvmCluster.Spec.LoadBalancerRef == nil { return reconcile.Result{}, errExternalLoadBalancerEndpointRefRequired } @@ -153,7 +153,7 @@ func (r *MicrovmClusterReconciler) isAPIServerAvailable(ctx context.Context, clu endpoint := &infrav1.ExternalLoadBalancer{} eprnn := types.NamespacedName{ Namespace: clusterScope.MvmCluster.ObjectMeta.Namespace, - Name: clusterScope.MvmCluster.Spec.EndpointRef.Name, + Name: clusterScope.MvmCluster.Spec.LoadBalancerRef.Name, } if err := r.Get(ctx, eprnn, endpoint); err != nil { clusterScope.Error(err, "get referenced ExternalLoadBalancerEndpoint") diff --git a/controllers/microvmcluster_controller_test.go b/controllers/microvmcluster_controller_test.go index 4be73de..32eedc7 100644 --- a/controllers/microvmcluster_controller_test.go +++ b/controllers/microvmcluster_controller_test.go @@ -48,7 +48,7 @@ func TestClusterReconciliationWithMvmClusterEndpoint(t *testing.T) { g := NewWithT(t) mvmCluster := createMicrovmCluster(testClusterName, testClusterNamespace) - mvmCluster.Spec.EndpointRef = &corev1.ObjectReference{ + mvmCluster.Spec.LoadBalancerRef = &corev1.ObjectReference{ Kind: "ExternalLoadBalancerEndpoint", Name: "tenant1-elb-endpoint", } @@ -101,7 +101,7 @@ func TestClusterReconciliationWithClusterEndpointAPIServerNotReady(t *testing.T) cluster := createCluster(testClusterName, testClusterNamespace) mvmCluster := createMicrovmCluster(testClusterName, testClusterNamespace) - mvmCluster.Spec.EndpointRef = &corev1.ObjectReference{ + mvmCluster.Spec.LoadBalancerRef = &corev1.ObjectReference{ Kind: "ExternalLoadBalancerEndpoint", Name: "tenant1-elb-endpoint", } diff --git a/main.go b/main.go index 2a213d6..f0b701c 100644 --- a/main.go +++ b/main.go @@ -286,6 +286,15 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) error { return fmt.Errorf("unable to create microvm machine controller: %w", err) } + if err := (&controllers.ExternalLoadBalancerReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("externalloadbalancer-controller"), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: microvmMachineConcurrency, RecoverPanic: true}); err != nil { + return fmt.Errorf("unable to create external loadbalancer controller: %w", err) + } + return nil } diff --git a/templates/cluster-template-cilium.yaml b/templates/cluster-template-cilium.yaml index 55549ef..57ccc1c 100644 --- a/templates/cluster-template-cilium.yaml +++ b/templates/cluster-template-cilium.yaml @@ -25,18 +25,29 @@ spec: name: "${CLUSTER_NAME}-control-plane" --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ExternalLoadBalancer +metadata: + name: "${CLUSTER_NAME}-api-server" +spec: + clusterName: ${CLUSTER_NAME} + endpoint: + host: ${CONTROL_PLANE_VIP} + port: 6443 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: MicrovmCluster metadata: name: "${CLUSTER_NAME}" spec: - controlPlaneEndpoint: - host: "${CONTROL_PLANE_VIP}" - port: 6443 placement: staticPool: hosts: - endpoint: "${HOST_ENDPOINT:=127.0.0.1:9090}" controlplaneAllowed: true + loadBalancerRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ExternalLoadBalancer + name: "${CLUSTER_NAME}-api-server" --- kind: KubeadmControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1beta1 diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index eb1b6c5..b0c8c97 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -26,6 +26,7 @@ kind: ExternalLoadBalancer metadata: name: "${CLUSTER_NAME}-api-server" spec: + clusterName: ${CLUSTER_NAME} endpoint: host: ${CONTROL_PLANE_VIP} port: 6443 From 1b9fab3f2c4834beda47c0d88e0b9524ad8706b2 Mon Sep 17 00:00:00 2001 From: Richard Case Date: Wed, 9 Feb 2022 15:08:12 +0000 Subject: [PATCH 6/7] wip: changes as part of testing Signed-off-by: Richard Case --- config/crd/kustomization.yaml | 1 + controllers/externalloadbalancer_controller.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c9471d7..3a64cdb 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,6 +8,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_microvmclusters.yaml - bases/infrastructure.cluster.x-k8s.io_microvmmachines.yaml - bases/infrastructure.cluster.x-k8s.io_microvmmachinetemplates.yaml +- bases/infrastructure.cluster.x-k8s.io_externalloadbalancers.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: diff --git a/controllers/externalloadbalancer_controller.go b/controllers/externalloadbalancer_controller.go index 221befa..8bddcc6 100644 --- a/controllers/externalloadbalancer_controller.go +++ b/controllers/externalloadbalancer_controller.go @@ -167,7 +167,7 @@ func (r *ExternalLoadBalancerReconciler) Patch(ctx context.Context, lb *infrav1. // sendTestRequest makes an HTTP call to ${KUBE_VIP_HOST}:${KUBE_VIP_PORT}/livez, which, if the loadbalancer is live, // should reach the /livez endpoint on the Kubernetes API server. func (r *ExternalLoadBalancerReconciler) sendTestRequest(ctx context.Context, lb *infrav1.ExternalLoadBalancer) error { - endpoint := lb.Spec.Endpoint.String() + "/livez" + endpoint := fmt.Sprintf("https://%s/livez", lb.Spec.Endpoint.String()) epReq, err := http.NewRequestWithContext(ctx, http.MethodGet, lb.Spec.Endpoint.String()+"/livez", nil) // use livez endpoint if err != nil { return fmt.Errorf("creating endpoint request: %w", err) @@ -190,7 +190,7 @@ func (r *ExternalLoadBalancerReconciler) sendTestRequest(ctx context.Context, lb func (r *ExternalLoadBalancerReconciler) ensureClusterOwnerRef(ctx context.Context, req ctrl.Request, lb *infrav1.ExternalLoadBalancer) error { clusterNamespaceName := types.NamespacedName{ Namespace: req.NamespacedName.Namespace, - Name: lb.ClusterName, + Name: lb.Spec.ClusterName, } cluster := &clusterv1.Cluster{} From 675c2836c596d1e0874fce72a11f5ef00f5946b0 Mon Sep 17 00:00:00 2001 From: Richard Case Date: Thu, 10 Feb 2022 15:52:00 +0000 Subject: [PATCH 7/7] wip Signed-off-by: Richard Case --- .../externalloadbalancer_controller.go | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/controllers/externalloadbalancer_controller.go b/controllers/externalloadbalancer_controller.go index 8bddcc6..e7fcb6b 100644 --- a/controllers/externalloadbalancer_controller.go +++ b/controllers/externalloadbalancer_controller.go @@ -23,12 +23,9 @@ import ( "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/source" infrav1 "github.com/weaveworks/cluster-api-provider-microvm/api/v1alpha1" "github.com/weaveworks/cluster-api-provider-microvm/internal/defaults" @@ -168,7 +165,7 @@ func (r *ExternalLoadBalancerReconciler) Patch(ctx context.Context, lb *infrav1. // should reach the /livez endpoint on the Kubernetes API server. func (r *ExternalLoadBalancerReconciler) sendTestRequest(ctx context.Context, lb *infrav1.ExternalLoadBalancer) error { endpoint := fmt.Sprintf("https://%s/livez", lb.Spec.Endpoint.String()) - epReq, err := http.NewRequestWithContext(ctx, http.MethodGet, lb.Spec.Endpoint.String()+"/livez", nil) // use livez endpoint + epReq, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) // use livez endpoint if err != nil { return fmt.Errorf("creating endpoint request: %w", err) } @@ -219,16 +216,16 @@ func (r *ExternalLoadBalancerReconciler) SetupWithManager(ctx context.Context, m WithOptions(options). For(&infrav1.ExternalLoadBalancer{}). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). - WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)). - Watches( - &source.Kind{Type: &clusterv1.Cluster{}}, - handler.EnqueueRequestsFromMapFunc( - util.ClusterToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("ExternalLoadBalancer")), - ), - builder.WithPredicates( - predicates.ClusterUnpaused(log), - ), - ) + WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)) //. + // Watches( + // &source.Kind{Type: &clusterv1.Cluster{}}, + // handler.EnqueueRequestsFromMapFunc( + // util.ClusterToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("ExternalLoadBalancer")), + // ), + // builder.WithPredicates( + // predicates.ClusterUnpaused(log), + // ), + // ) if err := builder.Complete(r); err != nil { return fmt.Errorf("creating external loadbalancer controller: %w", err)