diff --git a/.golangci.yml b/.golangci.yml index cc9eff3..bd60057 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -64,6 +64,7 @@ linters-settings: locale: US ignore-words: - "Flavour" + - "Flavours" nolintlint: allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) allow-unused: false # report any unused nolint directives diff --git a/README.md b/README.md index 7867405..ac8961e 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -# - +

WP3 - FLUIDOS Node

diff --git a/apis/nodecore/v1alpha1/allocation_status.go b/apis/nodecore/v1alpha1/allocation_status.go new file mode 100644 index 0000000..051acc3 --- /dev/null +++ b/apis/nodecore/v1alpha1/allocation_status.go @@ -0,0 +1,50 @@ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import "github.com/fluidos-project/node/pkg/utils/tools" + +// SetStatus sets the status of the allocation. +func (allocation *Allocation) SetStatus(status Status, msg string) { + allocation.Status.Status = status + allocation.Status.LastUpdateTime = tools.GetTimeNow() + allocation.Status.Message = msg +} + +/* +// SetPurchasePhase sets the ReserveAndBuy phase of the solver +func (allocation *Allocation) SetReserveAndBuyStatus(phase Phase) { + solver.Status.ReserveAndBuy = phase + solver.Status.SolverPhase.LastChangeTime = tools.GetTimeNow() +} + +// SetFindCandidateStatus sets the FindCandidate phase of the solver +func (allocation *Allocation) SetFindCandidateStatus(phase Phase) { + solver.Status.FindCandidate = phase + solver.Status.SolverPhase.LastChangeTime = tools.GetTimeNow() +} + +// SetDiscoveryStatus sets the discovery phase of the solver +func (allocation *Allocation) SetDiscoveryStatus(phase Phase) { + solver.Status.DiscoveryPhase = phase + solver.Status.SolverPhase.LastChangeTime = tools.GetTimeNow() +} + +// SetReservationStatus sets the reservation phase of the solver +func (allocation *Allocation) SetReservationStatus(phase Phase) { + solver.Status.ReservationPhase = phase + solver.Status.SolverPhase.LastChangeTime = tools.GetTimeNow() +} +*/ diff --git a/apis/nodecore/v1alpha1/allocation_types.go b/apis/nodecore/v1alpha1/allocation_types.go index 69b76ed..5e7a0ad 100644 --- a/apis/nodecore/v1alpha1/allocation_types.go +++ b/apis/nodecore/v1alpha1/allocation_types.go @@ -18,62 +18,86 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +//nolint:revive // Do not need to repeat the same comment type NodeType string + +//nolint:revive // Do not need to repeat the same comment type Status string +//nolint:revive // Do not need to repeat the same comment +type Destination string + +// NodeType is the type of the node: Node (Physical node of the cluster) or VirtualNode (Remote node owned by a different cluster). const ( Node NodeType = "Node" VirtualNode NodeType = "VirtualNode" ) +// Status is the status of the allocation. const ( Active Status = "Active" Reserved Status = "Reserved" Released Status = "Released" Inactive Status = "Inactive" + Error Status = "Error" +) + +// Destination is the destination of the allocation: Local (the allocation will be used locally) +// or Remote (the allocation will be used from a remote cluster). +const ( + Remote Destination = "Remote" + Local Destination = "Local" ) // AllocationSpec defines the desired state of Allocation type AllocationSpec struct { + // This is the ID of the cluster that owns the allocation. + RemoteClusterID string `json:"remoteClusterID,omitempty"` - // CustomerID - - // This is the ID of the intent for which the allocation was created. It is used by the Node Orchestrator to identify the correct allocation for a given intent + // This is the ID of the intent for which the allocation was created. + // It is used by the Node Orchestrator to identify the correct allocation for a given intent IntentID string `json:"intentID"` - // This is the corresponding Node or VirtualNode name + // This is the corresponding Node or VirtualNode local name NodeName string `json:"nodeName"` // This specifies the type of the node: Node (Physical node of the cluster) or VirtualNode (Remote node owned by a different cluster) Type NodeType `json:"type"` - // This flag indicates if the allocation is a forwarding allocation, if true it represents only a placeholder to undertand that the cluster is just a proxy to another cluster + // This specifies if the destination of the allocation is local or remote so if the allocation will be used locally or from a remote cluster + Destination Destination `json:"destination"` + + // This flag indicates if the allocation is a forwarding allocation + // if true it represents only a placeholder to undertand that the cluster is just a proxy to another cluster Forwarding bool `json:"forwarding,omitempty"` // This Flavour describes the characteristics of the allocation, it is based on the Flavour CRD from which it was created Flavour Flavour `json:"flavour"` - // This is the dimension of the allocation, it is based on the Flavour CRD from which it was created - Partition *Partition `json:"partition,omitempty"` + // This flags indicates if the Flavour from which the allocation was created was partitioned or not + Partitioned bool `json:"partitioned"` + + // This is the dimension of the allocation + Resources Characteristics `json:"resources,omitempty"` } -// AllocationStatus defines the observed state of Allocation +// AllocationStatus defines the observed state of Allocation. type AllocationStatus struct { // This allow to know the current status of the allocation - Status Status `json:"status"` - - // The creation time of the allocation object - CreationTime metav1.Time `json:"creationTime"` + Status Status `json:"status,omitempty"` // The last time the allocation was updated - LastUpdateTime metav1.Time `json:"lastUpdateTime"` + LastUpdateTime string `json:"lastUpdateTime,omitempty"` + + // Message contains the last message of the allocation + Message string `json:"message,omitempty"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// Allocation is the Schema for the allocations API +// Allocation is the Schema for the allocations API. type Allocation struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/apis/nodecore/v1alpha1/common.go b/apis/nodecore/v1alpha1/common.go index 11320fb..abfa044 100644 --- a/apis/nodecore/v1alpha1/common.go +++ b/apis/nodecore/v1alpha1/common.go @@ -16,17 +16,17 @@ package v1alpha1 import "k8s.io/apimachinery/pkg/api/resource" +// Set of constants for the phases of the FLUIDOS Node modules. const ( - //PhaseReady Phase = "Ready" - PhaseSolved Phase = "Solved" - PhaseFailed Phase = "Failed" - PhaseRunning Phase = "Running" - PhaseIdle Phase = "Idle" - PhaseTimeout Phase = "Timed Out" - PhaseBackoff Phase = "Backoff" - PhaseActive Phase = "Active" - PhasePending Phase = "Pending" - PhaseInactive Phase = "Inactive" + PhaseSolved Phase = "Solved" + PhaseFailed Phase = "Failed" + PhaseRunning Phase = "Running" + PhaseAllocating Phase = "Allocating" + PhaseIdle Phase = "Idle" + PhaseTimeout Phase = "Timed Out" + PhaseActive Phase = "Active" + PhasePending Phase = "Pending" + PhaseInactive Phase = "Inactive" ) // GenericRef represents a reference to a generic Kubernetes resource, @@ -57,6 +57,14 @@ type Partition struct { Storage resource.Quantity `json:"storage,omitempty"` } +// LiqoCredentials contains the credentials of a Liqo cluster to enstablish a peering. +type LiqoCredentials struct { + ClusterID string `json:"clusterID"` + ClusterName string `json:"clusterName"` + Token string `json:"token"` + Endpoint string `json:"endpoint"` +} + // toString() returns a string representation of the GenericRef. /* func (r GenericRef) toString() string { if r.Namespace != "" { diff --git a/apis/nodecore/v1alpha1/solver_status.go b/apis/nodecore/v1alpha1/solver_status.go index 7ed0878..39e2beb 100644 --- a/apis/nodecore/v1alpha1/solver_status.go +++ b/apis/nodecore/v1alpha1/solver_status.go @@ -26,6 +26,12 @@ func (solver *Solver) SetPhase(phase Phase, msg string) { solver.Status.SolverPhase.EndTime = t } +// SetPeeringStatus sets the Peering phase of the solver. +func (solver *Solver) SetPeeringStatus(phase Phase) { + solver.Status.Peering = phase + solver.Status.SolverPhase.LastChangeTime = tools.GetTimeNow() +} + // SetPurchasePhase sets the ReserveAndBuy phase of the solver func (solver *Solver) SetReserveAndBuyStatus(phase Phase) { solver.Status.ReserveAndBuy = phase diff --git a/apis/nodecore/v1alpha1/solver_types.go b/apis/nodecore/v1alpha1/solver_types.go index 70fc7dc..e368c45 100644 --- a/apis/nodecore/v1alpha1/solver_types.go +++ b/apis/nodecore/v1alpha1/solver_types.go @@ -133,6 +133,12 @@ type SolverStatus struct { // It can correspond to a virtual node // The Node Orchestrator will use this allocation to fullfill the intent. Allocation GenericRef `json:"allocation,omitempty"` + + // Contract contains the Contract that the Contract Manager has eventually created with the candidate. + Contract GenericRef `json:"contract,omitempty"` + + // Credentials contains the LiqoCredentials found in the Contract. + Credentials LiqoCredentials `json:"credentials,omitempty"` } //+kubebuilder:object:root=true diff --git a/apis/nodecore/v1alpha1/zz_generated.deepcopy.go b/apis/nodecore/v1alpha1/zz_generated.deepcopy.go index 16cded4..9cb9408 100644 --- a/apis/nodecore/v1alpha1/zz_generated.deepcopy.go +++ b/apis/nodecore/v1alpha1/zz_generated.deepcopy.go @@ -43,7 +43,7 @@ func (in *Allocation) DeepCopyInto(out *Allocation) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + out.Status = in.Status } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Allocation. @@ -100,11 +100,7 @@ func (in *AllocationList) DeepCopyObject() runtime.Object { func (in *AllocationSpec) DeepCopyInto(out *AllocationSpec) { *out = *in in.Flavour.DeepCopyInto(&out.Flavour) - if in.Partition != nil { - in, out := &in.Partition, &out.Partition - *out = new(Partition) - (*in).DeepCopyInto(*out) - } + in.Resources.DeepCopyInto(&out.Resources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationSpec. @@ -120,8 +116,6 @@ func (in *AllocationSpec) DeepCopy() *AllocationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllocationStatus) DeepCopyInto(out *AllocationStatus) { *out = *in - in.CreationTime.DeepCopyInto(&out.CreationTime) - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationStatus. @@ -288,6 +282,21 @@ func (in *GenericRef) DeepCopy() *GenericRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LiqoCredentials) DeepCopyInto(out *LiqoCredentials) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiqoCredentials. +func (in *LiqoCredentials) DeepCopy() *LiqoCredentials { + if in == nil { + return nil + } + out := new(LiqoCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchSelector) DeepCopyInto(out *MatchSelector) { *out = *in @@ -542,6 +551,8 @@ func (in *SolverStatus) DeepCopyInto(out *SolverStatus) { out.SolverPhase = in.SolverPhase out.PeeringCandidate = in.PeeringCandidate out.Allocation = in.Allocation + out.Contract = in.Contract + out.Credentials = in.Credentials } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SolverStatus. diff --git a/apis/reservation/v1alpha1/contract_types.go b/apis/reservation/v1alpha1/contract_types.go index 0b9aa04..826ad59 100644 --- a/apis/reservation/v1alpha1/contract_types.go +++ b/apis/reservation/v1alpha1/contract_types.go @@ -20,14 +20,6 @@ import ( nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" ) -// LiqoCredentials contains the credentials of a Liqo cluster to enstablish a peering. -type LiqoCredentials struct { - ClusterID string `json:"clusterID"` - ClusterName string `json:"clusterName"` - Token string `json:"token"` - Endpoint string `json:"endpoint"` -} - // ContractSpec defines the desired state of Contract. type ContractSpec struct { // This is the flavour on which the contract is based. It is used to lifetime maintain the critical characteristics of the contract. @@ -50,7 +42,7 @@ type ContractSpec struct { Seller nodecorev1alpha1.NodeIdentity `json:"seller"` // This credentials will be used by the customer to connect and enstablish a peering with the seller FLUIDOS Node through Liqo. - SellerCredentials LiqoCredentials `json:"sellerCredentials"` + SellerCredentials nodecorev1alpha1.LiqoCredentials `json:"sellerCredentials"` // This is the expiration time of the contract. It can be empty if the contract is not time limited. ExpirationTime string `json:"expirationTime,omitempty"` diff --git a/apis/reservation/v1alpha1/zz_generated.deepcopy.go b/apis/reservation/v1alpha1/zz_generated.deepcopy.go index 1cb8e25..341ac43 100644 --- a/apis/reservation/v1alpha1/zz_generated.deepcopy.go +++ b/apis/reservation/v1alpha1/zz_generated.deepcopy.go @@ -129,21 +129,6 @@ func (in *ContractStatus) DeepCopy() *ContractStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LiqoCredentials) DeepCopyInto(out *LiqoCredentials) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LiqoCredentials. -func (in *LiqoCredentials) DeepCopy() *LiqoCredentials { - if in == nil { - return nil - } - out := new(LiqoCredentials) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Reservation) DeepCopyInto(out *Reservation) { *out = *in diff --git a/pkg/utils/doc.go b/cmd/local-resource-manager/doc.go similarity index 86% rename from pkg/utils/doc.go rename to cmd/local-resource-manager/doc.go index 42eeac0..f433465 100644 --- a/pkg/utils/doc.go +++ b/cmd/local-resource-manager/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package utils contains shared utility methods for the FLUIDOS environment. -package utils +// Package main is the entrypoint for the local resource manager +package main diff --git a/cmd/local-resource-manager/main.go b/cmd/local-resource-manager/main.go index a601d71..624583f 100644 --- a/cmd/local-resource-manager/main.go +++ b/cmd/local-resource-manager/main.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" - localResourceManager "github.com/fluidos-project/node/pkg/local-resource-manager" + localresourcemanager "github.com/fluidos-project/node/pkg/local-resource-manager" "github.com/fluidos-project/node/pkg/utils/flags" ) @@ -50,14 +50,15 @@ func main() { flag.StringVar(&flags.AMOUNT, "amount", "", "Amount of money set for the flavours of this node") flag.StringVar(&flags.CURRENCY, "currency", "", "Currency of the money set for the flavours of this node") flag.StringVar(&flags.PERIOD, "period", "", "Period set for the flavours of this node") - flag.StringVar(&flags.RESOURCE_TYPE, "resources-types", "k8s-fluidos", "Type of the Flavour related to k8s resources") - flag.StringVar(&flags.CPU_MIN, "cpu-min", "0", "Minimum CPU value") - flag.StringVar(&flags.MEMORY_MIN, "memory-min", "0", "Minimum memory value") - flag.StringVar(&flags.CPU_STEP, "cpu-step", "0", "CPU step value") - flag.StringVar(&flags.MEMORY_STEP, "memory-step", "0", "Memory step value") - flag.Int64Var(&flags.MIN_COUNT, "min-count", 0, "Minimum number of flavours") - flag.Int64Var(&flags.MAX_COUNT, "max-count", 0, "Maximum number of flavours") - flag.StringVar(&flags.RESOURCE_NODE_LABEL, "node-resource-label", "node-role.fluidos.eu/resources", "Label used to filter the k8s nodes from which create flavours") + flag.StringVar(&flags.ResourceType, "resources-types", "k8s-fluidos", "Type of the Flavour related to k8s resources") + flag.StringVar(&flags.CPUMin, "cpu-min", "0", "Minimum CPU value") + flag.StringVar(&flags.MemoryMin, "memory-min", "0", "Minimum memory value") + flag.StringVar(&flags.CPUStep, "cpu-step", "0", "CPU step value") + flag.StringVar(&flags.MemoryStep, "memory-step", "0", "Memory step value") + flag.Int64Var(&flags.MinCount, "min-count", 0, "Minimum number of flavours") + flag.Int64Var(&flags.MaxCount, "max-count", 0, "Maximum number of flavours") + flag.StringVar(&flags.ResourceNodeLabel, "node-resource-label", "node-role.fluidos.eu/resources", + "Label used to filter the k8s nodes from which create flavours") flag.Parse() @@ -68,7 +69,7 @@ func main() { os.Exit(1) } - err = localResourceManager.Start(context.Background(), cl) + err = localresourcemanager.Start(context.Background(), cl) if err != nil { setupLog.Error(err, "Unable to start LocalResourceManager") os.Exit(1) @@ -78,6 +79,7 @@ func main() { mux.HandleFunc("/healthz", healthHandler) // health check endpoint mux.HandleFunc("/readyz", healthHandler) // readiness check endpoint + //nolint:gosec // We don't need this kind of security check server := &http.Server{ Addr: probeAddr, Handler: mux, @@ -90,7 +92,7 @@ func main() { } } -func healthHandler(w http.ResponseWriter, r *http.Request) { +func healthHandler(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte("OK")) + _, _ = w.Write([]byte("OK")) } diff --git a/cmd/rear-controller/doc.go b/cmd/rear-controller/doc.go new file mode 100644 index 0000000..37bbf23 --- /dev/null +++ b/cmd/rear-controller/doc.go @@ -0,0 +1,16 @@ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main is the entrypoint for the REAR Controller. +package main diff --git a/cmd/rear-controller/main.go b/cmd/rear-controller/main.go index 6f19bdc..d3dbcaa 100644 --- a/cmd/rear-controller/main.go +++ b/cmd/rear-controller/main.go @@ -1,18 +1,16 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package main @@ -63,9 +61,10 @@ func main() { var probeAddr string flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.StringVar(&flags.GRPC_PORT, "grpc-port", "2710", "Port of the HTTP server") - flag.StringVar(&flags.HTTP_PORT, "http-port", "3004", "Port of the HTTP server") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.StringVar(&flags.GRPCPort, "grpc-port", "2710", "Port of the HTTP server") + flag.StringVar(&flags.HTTPPort, "http-port", "3004", "Port of the HTTP server") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") opts := zap.Options{ Development: true, } @@ -143,13 +142,13 @@ func main() { } // Periodically clear the transaction cache - if err := mgr.Add(manager.RunnableFunc(gw.CacheRefresher(flags.REFRESH_CACHE_INTERVAL))); err != nil { + if err := mgr.Add(manager.RunnableFunc(gw.CacheRefresher(flags.RefreshCacheInterval))); err != nil { klog.Errorf("Unable to set up transaction cache refresher: %s", err) os.Exit(1) } // Periodically check if Liqo is ready - if err := mgr.Add(manager.RunnableFunc(gw.LiqoChecker(flags.LIQO_CHECK_INTERVAL))); err != nil { + if err := mgr.Add(manager.RunnableFunc(gw.LiqoChecker(flags.LiqoCheckInterval))); err != nil { klog.Errorf("Unable to set up Liqo checker: %s", err) os.Exit(1) } @@ -166,19 +165,8 @@ func main() { os.Exit(1) } - // Start the REAR Gateway HTTP server - /* go func() { - gw.Start() - }() */ - - // Start the REAR GRPC server - /* go func() { - grpcServer.Start() - }() */ - - // TODO: Uncomment this when the webhook is ready. For now it does not work (Ale) + //nolint:gocritic // This code is needed to register the webhook // pcv := discoverymanager.NewPCValidator(mgr.GetClient()) - // mgr.GetWebhookServer().Register("/validate/peeringcandidate", &webhook.Admission{Handler: pcv}) setupLog.Info("starting manager") diff --git a/cmd/rear-manager/doc.go b/cmd/rear-manager/doc.go new file mode 100644 index 0000000..d93beee --- /dev/null +++ b/cmd/rear-manager/doc.go @@ -0,0 +1,16 @@ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main is the entrypoint for the REAR Manager. +package main diff --git a/cmd/rear-manager/main.go b/cmd/rear-manager/main.go index 30b4f81..b5b444e 100644 --- a/cmd/rear-manager/main.go +++ b/cmd/rear-manager/main.go @@ -1,32 +1,33 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package main import ( + "context" "flag" "os" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + liqodiscovery "github.com/liqotech/liqo/apis/discovery/v1alpha1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -46,6 +47,7 @@ func init() { utilruntime.Must(nodecorev1alpha1.AddToScheme(scheme)) utilruntime.Must(advertisementv1alpha1.AddToScheme(scheme)) utilruntime.Must(reservationv1alpha1.AddToScheme(scheme)) + utilruntime.Must(liqodiscovery.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -79,6 +81,19 @@ func main() { os.Exit(1) } + cache := mgr.GetCache() + + // Index the RemoteClusterID field of the Allocation CRD + indexFuncAllocation := func(obj client.Object) []string { + allocation := obj.(*nodecorev1alpha1.Allocation) + return []string{allocation.Spec.RemoteClusterID} + } + + if err := cache.IndexField(context.Background(), &nodecorev1alpha1.Allocation{}, "spec.remoteClusterID", indexFuncAllocation); err != nil { + setupLog.Error(err, "unable to create index for field", "field", "spec.remoteClusterID") + os.Exit(1) + } + if err = (&rearmanager.SolverReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), @@ -105,9 +120,9 @@ func main() { os.Exit(1) } - //av := rearmanager.NewValidator(mgr.GetClient()) - - //mgr.GetWebhookServer().Register("/validate/allocation", &webhook.Admission{Handler: av}) + //nolint:gocritic // This code is needed to register the webhook + // av := rearmanager.NewValidator(mgr.GetClient()) + // mgr.GetWebhookServer().Register("/validate/allocation", &webhook.Admission{Handler: av}) setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { diff --git a/deployments/node/crds/advertisement.fluidos.eu_peeringcandidates.yaml b/deployments/node/crds/advertisement.fluidos.eu_peeringcandidates.yaml index e28e641..eae98db 100644 --- a/deployments/node/crds/advertisement.fluidos.eu_peeringcandidates.yaml +++ b/deployments/node/crds/advertisement.fluidos.eu_peeringcandidates.yaml @@ -35,7 +35,7 @@ spec: description: PeeringCandidateSpec defines the desired state of PeeringCandidate properties: flavour: - description: Flavour is the Schema for the flavours API + description: Flavour is the Schema for the flavours API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this @@ -255,7 +255,7 @@ spec: - type type: object status: - description: FlavourStatus defines the observed state of Flavour + description: FlavourStatus defines the observed state of Flavour. properties: creationTime: description: This field represents the creation time of the diff --git a/deployments/node/crds/nodecore.fluidos.eu_allocations.yaml b/deployments/node/crds/nodecore.fluidos.eu_allocations.yaml index 0e11ca7..ee8b792 100644 --- a/deployments/node/crds/nodecore.fluidos.eu_allocations.yaml +++ b/deployments/node/crds/nodecore.fluidos.eu_allocations.yaml @@ -17,7 +17,7 @@ spec: - name: v1alpha1 schema: openAPIV3Schema: - description: Allocation is the Schema for the allocations API + description: Allocation is the Schema for the allocations API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -34,6 +34,11 @@ spec: spec: description: AllocationSpec defines the desired state of Allocation properties: + destination: + description: This specifies if the destination of the allocation is + local or remote so if the allocation will be used locally or from + a remote cluster + type: string flavour: description: This Flavour describes the characteristics of the allocation, it is based on the Flavour CRD from which it was created @@ -256,7 +261,7 @@ spec: - type type: object status: - description: FlavourStatus defines the observed state of Flavour + description: FlavourStatus defines the observed state of Flavour. properties: creationTime: description: This field represents the creation time of the @@ -279,7 +284,7 @@ spec: type: object forwarding: description: This flag indicates if the allocation is a forwarding - allocation, if true it represents only a placeholder to undertand + allocation if true it represents only a placeholder to undertand that the cluster is just a proxy to another cluster type: boolean intentID: @@ -288,42 +293,56 @@ spec: correct allocation for a given intent type: string nodeName: - description: This is the corresponding Node or VirtualNode name + description: This is the corresponding Node or VirtualNode local name + type: string + partitioned: + description: This flags indicates if the Flavour from which the allocation + was created was partitioned or not + type: boolean + remoteClusterID: + description: This is the ID of the cluster that owns the allocation. type: string - partition: - description: This is the dimension of the allocation, it is based - on the Flavour CRD from which it was created + resources: + description: This is the dimension of the allocation properties: architecture: + description: Architecture is the architecture of the Flavour. type: string cpu: anyOf: - type: integer - type: string + description: CPU is the number of CPU cores of the Flavour. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true ephemeral-storage: anyOf: - type: integer - type: string + description: EphemeralStorage is the amount of ephemeral storage + of the Flavour. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true gpu: anyOf: - type: integer - type: string + description: GPU is the number of GPU cores of the Flavour. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true memory: anyOf: - type: integer - type: string + description: Memory is the amount of RAM of the Flavour. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - storage: + persistent-storage: anyOf: - type: integer - type: string + description: PersistentStorage is the amount of persistent storage + of the Flavour. pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true required: @@ -337,29 +356,25 @@ spec: cluster)' type: string required: + - destination - flavour - intentID - nodeName + - partitioned - type type: object status: - description: AllocationStatus defines the observed state of Allocation + description: AllocationStatus defines the observed state of Allocation. properties: - creationTime: - description: The creation time of the allocation object - format: date-time - type: string lastUpdateTime: description: The last time the allocation was updated - format: date-time + type: string + message: + description: Message contains the last message of the allocation type: string status: description: This allow to know the current status of the allocation type: string - required: - - creationTime - - lastUpdateTime - - status type: object type: object served: true diff --git a/deployments/node/crds/nodecore.fluidos.eu_flavours.yaml b/deployments/node/crds/nodecore.fluidos.eu_flavours.yaml index e50823b..a8cc9da 100644 --- a/deployments/node/crds/nodecore.fluidos.eu_flavours.yaml +++ b/deployments/node/crds/nodecore.fluidos.eu_flavours.yaml @@ -46,7 +46,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: Flavour is the Schema for the flavours API + description: Flavour is the Schema for the flavours API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -245,7 +245,7 @@ spec: - type type: object status: - description: FlavourStatus defines the observed state of Flavour + description: FlavourStatus defines the observed state of Flavour. properties: creationTime: description: This field represents the creation time of the Flavour. diff --git a/deployments/node/crds/nodecore.fluidos.eu_solvers.yaml b/deployments/node/crds/nodecore.fluidos.eu_solvers.yaml index d716922..6ff8368 100644 --- a/deployments/node/crds/nodecore.fluidos.eu_solvers.yaml +++ b/deployments/node/crds/nodecore.fluidos.eu_solvers.yaml @@ -224,6 +224,36 @@ spec: where the VFM (Liqo) is enstablishing a peering with the candidate node. type: string + contract: + description: Contract contains the Contract that the Contract Manager + has eventually created with the candidate. + properties: + name: + description: The name of the resource to be referenced. + type: string + namespace: + description: The namespace containing the resource to be referenced. + It should be left empty in case of cluster-wide resources. + type: string + type: object + credentials: + description: Credentials contains the LiqoCredentials found in the + Contract. + properties: + clusterID: + type: string + clusterName: + type: string + endpoint: + type: string + token: + type: string + required: + - clusterID + - clusterName + - endpoint + - token + type: object discoveryPhase: description: DiscoveryPhase describes the status of the Discovery where the Discovery Manager is looking for matching flavours outside diff --git a/deployments/node/crds/reservation.fluidos.eu_contracts.yaml b/deployments/node/crds/reservation.fluidos.eu_contracts.yaml index a9e1102..f207890 100644 --- a/deployments/node/crds/reservation.fluidos.eu_contracts.yaml +++ b/deployments/node/crds/reservation.fluidos.eu_contracts.yaml @@ -316,7 +316,7 @@ spec: - type type: object status: - description: FlavourStatus defines the observed state of Flavour + description: FlavourStatus defines the observed state of Flavour. properties: creationTime: description: This field represents the creation time of the diff --git a/deployments/node/files/node-rear-controller-ClusterRole.yaml b/deployments/node/files/node-rear-controller-ClusterRole.yaml index 2a605ce..c774c97 100644 --- a/deployments/node/files/node-rear-controller-ClusterRole.yaml +++ b/deployments/node/files/node-rear-controller-ClusterRole.yaml @@ -67,6 +67,32 @@ rules: - get - list - watch +- apiGroups: + - nodecore.fluidos.eu + resources: + - allocations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nodecore.fluidos.eu + resources: + - allocations/finalizers + verbs: + - update +- apiGroups: + - nodecore.fluidos.eu + resources: + - allocations/status + verbs: + - get + - patch + - update - apiGroups: - nodecore.fluidos.eu resources: diff --git a/deployments/node/files/node-rear-manager-ClusterRole.yaml b/deployments/node/files/node-rear-manager-ClusterRole.yaml index ec852e9..ab3e65f 100644 --- a/deployments/node/files/node-rear-manager-ClusterRole.yaml +++ b/deployments/node/files/node-rear-manager-ClusterRole.yaml @@ -54,10 +54,46 @@ rules: - apiGroups: - "" resources: - - configmaps + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.liqo.io + resources: + - foreignclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.liqo.io + resources: + - foreignclusters/finalizers verbs: - get + - patch + - update +- apiGroups: + - discovery.liqo.io + resources: + - foreignclusters/status + verbs: + - create + - delete + - get - list + - patch + - update - watch - apiGroups: - nodecore.fluidos.eu diff --git a/deployments/node/samples/solver.yaml b/deployments/node/samples/solver.yaml index 4808837..d7ed59b 100644 --- a/deployments/node/samples/solver.yaml +++ b/deployments/node/samples/solver.yaml @@ -4,13 +4,20 @@ metadata: name: solver-sample namespace: fluidos spec: + # This is the Selector used to find a Flavour (FLUIDOS node) that matches the requirements selector: + # ONLY k8s-fluidos is supported at the moment type: k8s-fluidos - architecture: amd64 + # REMEMBER: the architecture is the one of the node, not the one of the container. Change it accordingly + architecture: arm64 + # ONLY rangeSelector is supported at the moment rangeSelector: minCpu: 1 minMemory: 1 intentID: "intent-sample" + # This flag is used to indicate that the solver should find a candidate (FLUIDOS node) findCandidate: true + # This flag is used to indicate that the solver should reserve and buy the resources from the candidate (FLUIDOS node) reserveAndBuy: true - enstablishPeering: false \ No newline at end of file + # This flag is used to indicate that the solver should establish peering with the candidate (FLUIDOS node) + enstablishPeering: true \ No newline at end of file diff --git a/pkg/local-resource-manager/controller_manager.go b/pkg/local-resource-manager/controller_manager.go index 798e009..956790f 100644 --- a/pkg/local-resource-manager/controller_manager.go +++ b/pkg/local-resource-manager/controller_manager.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package localResourceManager +package localresourcemanager import ( "context" @@ -37,15 +37,14 @@ import ( // ensure to check and subtract the already allocated resources from the node // resources calculation. -// Start starts the controller +// Start starts the controller. func Start(ctx context.Context, cl client.Client) error { - klog.Info("Getting FLUIDOS Node identity...") nodeIdentity := getters.GetNodeIdentity(ctx, cl) if nodeIdentity == nil { klog.Info("Error getting FLUIDOS Node identity") - return fmt.Errorf("Error getting FLUIDOS Node identity") + return fmt.Errorf("error getting FLUIDOS Node identity") } klog.Info("Getting nodes resources...") @@ -55,11 +54,11 @@ func Start(ctx context.Context, cl client.Client) error { return err } - klog.Infof("Creating Flavours: found %d nodes", len(*nodes)) + klog.Infof("Creating Flavours: found %d nodes", len(nodes)) // For each node create a Flavour - for _, node := range *nodes { - flavour := resourceforge.ForgeFlavourFromMetrics(node, *nodeIdentity) + for i := range nodes { + flavour := resourceforge.ForgeFlavourFromMetrics(&nodes[i], *nodeIdentity) err := cl.Create(ctx, flavour) if err != nil { log.Printf("Error creating Flavour: %v", err) diff --git a/pkg/local-resource-manager/doc.go b/pkg/local-resource-manager/doc.go index 053d25e..53e24db 100644 --- a/pkg/local-resource-manager/doc.go +++ b/pkg/local-resource-manager/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package localResourceManager implements the utility functions for the local resource manager controller -package localResourceManager +// Package localresourcemanager implements the utility functions for the local resource manager controller +package localresourcemanager diff --git a/pkg/local-resource-manager/node_services.go b/pkg/local-resource-manager/node_services.go index 9177c47..e219418 100644 --- a/pkg/local-resource-manager/node_services.go +++ b/pkg/local-resource-manager/node_services.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package localResourceManager +package localresourcemanager import ( "context" @@ -27,10 +27,10 @@ import ( "github.com/fluidos-project/node/pkg/utils/models" ) -// GetNodesResources retrieves the metrics from all the worker nodes in the cluster -func GetNodesResources(ctx context.Context, cl client.Client) (*[]models.NodeInfo, error) { +// GetNodesResources retrieves the metrics from all the worker nodes in the cluster. +func GetNodesResources(ctx context.Context, cl client.Client) ([]models.NodeInfo, error) { // Set a label selector to filter worker nodes - labelSelector := labels.Set{flags.RESOURCE_NODE_LABEL: "true"}.AsSelector() + labelSelector := labels.Set{flags.ResourceNodeLabel: "true"}.AsSelector() // Get a list of nodes nodes := &corev1.NodeList{} @@ -54,9 +54,11 @@ func GetNodesResources(ctx context.Context, cl client.Client) (*[]models.NodeInf var nodesInfo []models.NodeInfo // Print the name of each node - for _, node := range nodes.Items { - for _, metrics := range nodesMetrics.Items { - if node.Name != metrics.Name { + for n := range nodes.Items { + for m := range nodesMetrics.Items { + node := nodes.Items[n] + metrics := nodesMetrics.Items[m] + if nodes.Items[n].Name != nodesMetrics.Items[m].Name { // So that we can select just the nodes that we want continue } @@ -66,10 +68,10 @@ func GetNodesResources(ctx context.Context, cl client.Client) (*[]models.NodeInf } } - return &nodesInfo, nil + return nodesInfo, nil } -// forgeResourceMetrics creates from params a new ResourceMetrics Struct +// forgeResourceMetrics creates from params a new ResourceMetrics Struct. func forgeResourceMetrics(nodeMetrics *metricsv1beta1.NodeMetrics, node *corev1.Node) *models.ResourceMetrics { // Get the total and used resources cpuTotal := node.Status.Allocatable.Cpu() @@ -93,7 +95,7 @@ func forgeResourceMetrics(nodeMetrics *metricsv1beta1.NodeMetrics, node *corev1. } } -// forgeNodeInfo creates from params a new NodeInfo Struct +// forgeNodeInfo creates from params a new NodeInfo struct. func forgeNodeInfo(node *corev1.Node, metrics *models.ResourceMetrics) *models.NodeInfo { return &models.NodeInfo{ UID: string(node.UID), diff --git a/pkg/rear-controller/contract-manager/doc.go b/pkg/rear-controller/contract-manager/doc.go index 73e7cad..69b1c6d 100644 --- a/pkg/rear-controller/contract-manager/doc.go +++ b/pkg/rear-controller/contract-manager/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package contractmanager implements the utility functions for the contract manager controller +// Package contractmanager implements the utility functions for the contract manager controller package contractmanager diff --git a/pkg/rear-controller/contract-manager/models.go b/pkg/rear-controller/contract-manager/models.go index de779d0..5733222 100644 --- a/pkg/rear-controller/contract-manager/models.go +++ b/pkg/rear-controller/contract-manager/models.go @@ -24,21 +24,21 @@ type Selector struct { EphemeralStorage int `json:"ephemeral-storage,omitempty"` } -// Transaction contains information regarding the transaction for a flavour +// Transaction contains information regarding the transaction for a flavour. type Transaction struct { TransactionID string `json:"transactionID"` FlavourID string `json:"flavourID"` StartTime time.Time `json:"startTime,omitempty"` } -// Purchase contains information regarding the purchase for a flavour +// Purchase contains information regarding the purchase for a flavour. type Purchase struct { TransactionID string `json:"transactionID"` FlavourID string `json:"flavourID"` BuyerID string `json:"buyerID"` } -// ResponsePurchase contain information after purchase a Flavour +// ResponsePurchase contain information after purchase a Flavour. type ResponsePurchase struct { FlavourID string `json:"flavourID"` BuyerID string `json:"buyerID"` diff --git a/pkg/rear-controller/contract-manager/reservation_controller.go b/pkg/rear-controller/contract-manager/reservation_controller.go index 28a4b45..c37b6d5 100644 --- a/pkg/rear-controller/contract-manager/reservation_controller.go +++ b/pkg/rear-controller/contract-manager/reservation_controller.go @@ -1,18 +1,16 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package contractmanager @@ -34,7 +32,7 @@ import ( "github.com/fluidos-project/node/pkg/utils/tools" ) -// ReservationReconciler reconciles a Reservation object +// ReservationReconciler reconciles a Reservation object. type ReservationReconciler struct { client.Client Scheme *runtime.Scheme @@ -59,12 +57,9 @@ type ReservationReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile func (r *ReservationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.Info("Reconciling Reservation") log := ctrl.LoggerFrom(ctx, "reservation", req.NamespacedName) ctx = ctrl.LoggerInto(ctx, log) - // var contract *reservationv1alpha1.Contract - var reservation reservationv1alpha1.Reservation if err := r.Get(ctx, req.NamespacedName, &reservation); client.IgnoreNotFound(err) != nil { klog.Errorf("Error when getting Reservation %s before reconcile: %s", req.NamespacedName, err) @@ -88,18 +83,7 @@ func (r *ReservationReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - if reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseSolved && - reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseTimeout && - reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseFailed && - reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseRunning && - reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseIdle { - - klog.Infof("Reservation %s started", reservation.Name) - reservation.Status.Phase.StartTime = tools.GetTimeNow() - reservation.SetPhase(nodecorev1alpha1.PhaseRunning, "Reservation started") - reservation.SetReserveStatus(nodecorev1alpha1.PhaseIdle) - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseIdle) - + if checkInitialStatus(&reservation) { if err := r.updateReservationStatus(ctx, &reservation); err != nil { klog.Errorf("Error when updating Reservation %s status before reconcile: %s", req.NamespacedName, err) return ctrl.Result{}, err @@ -107,174 +91,207 @@ func (r *ReservationReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } + if reservation.Status.Phase.Phase == nodecorev1alpha1.PhaseSolved || + reservation.Status.Phase.Phase == nodecorev1alpha1.PhaseTimeout || + reservation.Status.Phase.Phase == nodecorev1alpha1.PhaseFailed { + return ctrl.Result{}, nil + } + if reservation.Spec.Reserve { - reservePhase := reservation.Status.ReservePhase - switch reservePhase { - case nodecorev1alpha1.PhaseRunning: - klog.Infof("Reservation %s: Reserve phase running", reservation.Name) - flavourID := namings.RetrieveFlavourNameFromPC(reservation.Spec.PeeringCandidate.Name) - res, err := r.Gateway.ReserveFlavour(ctx, &reservation, flavourID) - if err != nil { - if res != nil { - klog.Infof("Transaction is non correctly set, Retrying...") - return ctrl.Result{Requeue: true}, nil - } - klog.Errorf("Error when reserving flavour for Reservation %s: %s", req.NamespacedName, err) - reservation.SetReserveStatus(nodecorev1alpha1.PhaseFailed) - reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed: error when reserving flavour") - if err := r.updateReservationStatus(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, err - } + if reservation.Status.ReservePhase != nodecorev1alpha1.PhaseSolved { + return r.handleReserve(ctx, req, &reservation) + } + klog.Infof("Reservation %s: Reserve phase solved", reservation.Name) + } - klog.Infof("Transaction: %v", res) + if reservation.Spec.Purchase && reservation.Status.ReservePhase == nodecorev1alpha1.PhaseSolved { + if reservation.Status.PurchasePhase != nodecorev1alpha1.PhaseSolved { + return r.handlePurchase(ctx, req, &reservation) + } + klog.Infof("Reservation %s: Purchase phase solved", reservation.Name) + } - // Create a Transaction CR starting from the transaction object - transaction := resourceforge.ForgeTransactionFromObj(res) + return ctrl.Result{}, nil +} - if err := r.Create(ctx, transaction); err != nil { - klog.Errorf("Error when creating Transaction %s: %s", transaction.Name, err) - return ctrl.Result{}, err - } +// updateSolverStatus updates the status of the discovery. +func (r *ReservationReconciler) updateReservationStatus(ctx context.Context, reservation *reservationv1alpha1.Reservation) error { + return r.Status().Update(ctx, reservation) +} - klog.Infof("Transaction %s created", transaction.Name) - reservation.Status.TransactionID = res.TransactionID - reservation.SetReserveStatus(nodecorev1alpha1.PhaseSolved) +// SetupWithManager sets up the controller with the Manager. +func (r *ReservationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&reservationv1alpha1.Reservation{}). + Complete(r) +} - // Update the status for reconcile - if err := r.updateReservationStatus(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } +func checkInitialStatus(reservation *reservationv1alpha1.Reservation) bool { + if reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseSolved && + reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseTimeout && + reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseFailed && + reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseRunning && + reservation.Status.Phase.Phase != nodecorev1alpha1.PhaseIdle { + klog.Infof("Reservation %s started", reservation.Name) + reservation.Status.Phase.StartTime = tools.GetTimeNow() + reservation.SetPhase(nodecorev1alpha1.PhaseRunning, "Reservation started") + reservation.SetReserveStatus(nodecorev1alpha1.PhaseIdle) + reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseIdle) - return ctrl.Result{}, nil + return true + } + return false +} - case nodecorev1alpha1.PhaseSolved: - klog.Infof("Reserve %s solved", reservation.Name) - case nodecorev1alpha1.PhaseFailed: - klog.Infof("Reserve %s failed", reservation.Name) - return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseIdle: - klog.Infof("Reserve %s idle", reservation.Name) - reservation.SetReserveStatus(nodecorev1alpha1.PhaseRunning) - if err := r.updateReservationStatus(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err +func (r *ReservationReconciler) handleReserve(ctx context.Context, + req ctrl.Request, reservation *reservationv1alpha1.Reservation) (ctrl.Result, error) { + reservePhase := reservation.Status.ReservePhase + switch reservePhase { + case nodecorev1alpha1.PhaseRunning: + klog.Infof("Reservation %s: Reserve phase running", reservation.Name) + flavourID := namings.RetrieveFlavourNameFromPC(reservation.Spec.PeeringCandidate.Name) + res, err := r.Gateway.ReserveFlavour(ctx, reservation, flavourID) + if err != nil { + if res != nil { + klog.Infof("Transaction is non correctly set, Retrying...") + return ctrl.Result{Requeue: true}, nil } - return ctrl.Result{}, nil - default: - klog.Infof("Reserve %s unknown phase", reservation.Name) - reservation.SetReserveStatus(nodecorev1alpha1.PhaseIdle) - if err := r.updateReservationStatus(ctx, &reservation); err != nil { + klog.Errorf("Error when reserving flavour for Reservation %s: %s", req.NamespacedName, err) + reservation.SetReserveStatus(nodecorev1alpha1.PhaseFailed) + reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed: error when reserving flavour") + if err := r.updateReservationStatus(ctx, reservation); err != nil { klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } - return ctrl.Result{}, nil + return ctrl.Result{}, err } - } - if reservation.Spec.Purchase && reservation.Status.ReservePhase == nodecorev1alpha1.PhaseSolved { - purchasePhase := reservation.Status.PurchasePhase - switch purchasePhase { - case nodecorev1alpha1.PhaseIdle: - klog.Infof("Purchase phase for the reservation %s idle, starting...", reservation.Name) - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseRunning) - if err := r.updateReservationStatus(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseRunning: - if reservation.Status.TransactionID == "" { - klog.Infof("TransactionID not set for Reservation %s", reservation.Name) - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseFailed) - reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed: TransactionID not set") - if err := r.updateReservationStatus(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - } + klog.Infof("Transaction: %v", res) - transactionID := reservation.Status.TransactionID - resPurchase, err := r.Gateway.PurchaseFlavour(ctx, transactionID, reservation.Spec.Seller) - if err != nil { - klog.Errorf("Error when purchasing flavour for Reservation %s: %s", req.NamespacedName, err) - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseFailed) - reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed: error when purchasing flavour") - if err := r.updateReservationStatus(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, err - } - - klog.Infof("Purchase completed with status %s", resPurchase.Status) + // Create a Transaction CR starting from the transaction object + transaction := resourceforge.ForgeTransactionFromObj(res) - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseRunning) - - if err := r.Update(ctx, &reservation); err != nil { - klog.Errorf("Error when updating Reservation %s: %s", reservation.Name, err) - return ctrl.Result{}, err - } + if err := r.Create(ctx, transaction); err != nil { + klog.Errorf("Error when creating Transaction %s: %s", transaction.Name, err) + return ctrl.Result{}, err + } - klog.Infof("Reservation %s updated", reservation.Name) + klog.Infof("Transaction %s created", transaction.Name) + reservation.Status.TransactionID = res.TransactionID + reservation.SetReserveStatus(nodecorev1alpha1.PhaseSolved) - // Create a contract CR now that the reservation is solved - contract := resourceforge.ForgeContractFromObj(resPurchase.Contract) - err = r.Create(ctx, contract) - if errors.IsAlreadyExists(err) { - klog.Errorf("Error when creating Contract %s: %s", contract.Name, err) - } else if err != nil { - klog.Errorf("Error when creating Contract %s: %s", contract.Name, err) - return ctrl.Result{}, err - } - klog.Infof("Contract %s created", contract.Name) + // Update the status for reconcile + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseSolved) - reservation.Status.Contract = nodecorev1alpha1.GenericRef{ - Name: contract.Name, - Namespace: contract.Namespace, - } - reservation.SetPhase(nodecorev1alpha1.PhaseSolved, "Reservation solved") + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseFailed: + klog.Infof("Reserve %s failed", reservation.Name) + reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed during the 'Reserve' phase") + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseIdle: + klog.Infof("Reserve %s idle", reservation.Name) + reservation.SetReserveStatus(nodecorev1alpha1.PhaseRunning) + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + klog.Infof("Reserve %s unknown phase", reservation.Name) + reservation.SetReserveStatus(nodecorev1alpha1.PhaseIdle) + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } +} - if err := r.updateReservationStatus(ctx, &reservation); err != nil { +func (r *ReservationReconciler) handlePurchase(ctx context.Context, + req ctrl.Request, reservation *reservationv1alpha1.Reservation) (ctrl.Result, error) { + purchasePhase := reservation.Status.PurchasePhase + switch purchasePhase { + case nodecorev1alpha1.PhaseIdle: + klog.Infof("Purchase phase for the reservation %s idle, starting...", reservation.Name) + reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseRunning) + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseRunning: + if reservation.Status.TransactionID == "" { + klog.Infof("TransactionID not set for Reservation %s", reservation.Name) + reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseFailed) + reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed: TransactionID not set") + if err := r.updateReservationStatus(ctx, reservation); err != nil { klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } - - return ctrl.Result{}, nil - - case nodecorev1alpha1.PhaseFailed: - klog.Infof("Purchase phase for the reservation %s failed", reservation.Name) return ctrl.Result{}, nil + } - case nodecorev1alpha1.PhaseSolved: - klog.Infof("Purchase phase for the reservation %s solved", reservation.Name) - default: - klog.Infof("Purchase phase for the reservation %s unknown", reservation.Name) - reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseIdle) - if err := r.updateReservationStatus(ctx, &reservation); err != nil { + transactionID := reservation.Status.TransactionID + resPurchase, err := r.Gateway.PurchaseFlavour(ctx, transactionID, reservation.Spec.Seller) + if err != nil { + klog.Errorf("Error when purchasing flavour for Reservation %s: %s", req.NamespacedName, err) + reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseFailed) + reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed: error when purchasing flavour") + if err := r.updateReservationStatus(ctx, reservation); err != nil { klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } - return ctrl.Result{}, nil + return ctrl.Result{}, err } - } - return ctrl.Result{}, nil -} + klog.Infof("Purchase completed with status %s", resPurchase.Status) -// updateSolverStatus updates the status of the discovery -func (r *ReservationReconciler) updateReservationStatus(ctx context.Context, reservation *reservationv1alpha1.Reservation) error { - return r.Status().Update(ctx, reservation) -} + // Create a contract CR now that the reservation is solved + contract := resourceforge.ForgeContractFromObj(&resPurchase.Contract) + err = r.Create(ctx, contract) + if errors.IsAlreadyExists(err) { + klog.Errorf("Error when creating Contract %s: %s", contract.Name, err) + } else if err != nil { + klog.Errorf("Error when creating Contract %s: %s", contract.Name, err) + return ctrl.Result{}, err + } + klog.Infof("Contract %s created", contract.Name) -// SetupWithManager sets up the controller with the Manager. -func (r *ReservationReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&reservationv1alpha1.Reservation{}). - Complete(r) + reservation.Status.Contract = nodecorev1alpha1.GenericRef{ + Name: contract.Name, + Namespace: contract.Namespace, + } + reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseSolved) + reservation.SetPhase(nodecorev1alpha1.PhaseSolved, "Reservation solved") + + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseFailed: + klog.Infof("Purchase phase for the reservation %s failed", reservation.Name) + reservation.SetPhase(nodecorev1alpha1.PhaseFailed, "Reservation failed during the 'Purchase' phase") + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + klog.Infof("Purchase phase for the reservation %s unknown", reservation.Name) + reservation.SetPurchaseStatus(nodecorev1alpha1.PhaseIdle) + if err := r.updateReservationStatus(ctx, reservation); err != nil { + klog.Errorf("Error when updating Reservation %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } } diff --git a/pkg/rear-controller/discovery-manager/discovery_controller.go b/pkg/rear-controller/discovery-manager/discovery_controller.go index 39d7c86..ef4d872 100644 --- a/pkg/rear-controller/discovery-manager/discovery_controller.go +++ b/pkg/rear-controller/discovery-manager/discovery_controller.go @@ -1,18 +1,16 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package discoverymanager @@ -31,7 +29,7 @@ import ( "github.com/fluidos-project/node/pkg/utils/tools" ) -// DiscoveryReconciler reconciles a Discovery object +// DiscoveryReconciler reconciles a Discovery object. type DiscoveryReconciler struct { client.Client Scheme *runtime.Scheme @@ -49,10 +47,6 @@ type DiscoveryReconciler struct { // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Discovery object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile @@ -79,7 +73,6 @@ func (r *DiscoveryReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( discovery.Status.Phase.Phase != nodecorev1alpha1.PhaseFailed && discovery.Status.Phase.Phase != nodecorev1alpha1.PhaseRunning && discovery.Status.Phase.Phase != nodecorev1alpha1.PhaseIdle { - discovery.Status.Phase.StartTime = tools.GetTimeNow() discovery.SetPhase(nodecorev1alpha1.PhaseRunning, "Discovery started") @@ -90,9 +83,10 @@ func (r *DiscoveryReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } + //nolint:exhaustive // We don't need to handle all the cases switch discovery.Status.Phase.Phase { case nodecorev1alpha1.PhaseRunning: - flavours, err := r.Gateway.DiscoverFlavours(discovery.Spec.Selector) + flavours, err := r.Gateway.DiscoverFlavours(ctx, discovery.Spec.Selector) if err != nil { klog.Errorf("Error when getting Flavour: %s", err) discovery.SetPhase(nodecorev1alpha1.PhaseFailed, "Error when getting Flavour") @@ -116,7 +110,7 @@ func (r *DiscoveryReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( klog.Infof("Flavours found: %d", len(flavours)) // TODO: check if a corresponding PeeringCandidate already exists!! - var first bool = true + first := true for _, flavour := range flavours { if first { // We refer to the first peering candidate as the one that is reserved @@ -159,7 +153,7 @@ func (r *DiscoveryReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } -// updateDiscoveryStatus updates the status of the discovery +// updateDiscoveryStatus updates the status of the discovery. func (r *DiscoveryReconciler) updateDiscoveryStatus(ctx context.Context, discovery *advertisementv1alpha1.Discovery) error { return r.Status().Update(ctx, discovery) } diff --git a/pkg/rear-controller/discovery-manager/doc.go b/pkg/rear-controller/discovery-manager/doc.go index 9b32566..b19a2a4 100644 --- a/pkg/rear-controller/discovery-manager/doc.go +++ b/pkg/rear-controller/discovery-manager/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package discoverymanager implements the utility functions for the discovery manager controller +// Package discoverymanager implements the utility functions for the discovery manager controller package discoverymanager diff --git a/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go b/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go index 94c93c8..4fa34c4 100644 --- a/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go +++ b/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go @@ -28,19 +28,25 @@ import ( advertisementv1alpha1 "github.com/fluidos-project/node/apis/advertisement/v1alpha1" ) +//nolint:lll // This is a long line +// clusterRole //+kubebuilder:webhook:path=/validate/peeringcandidate,mutating=false,failurePolicy=ignore,groups=advertisement.node.fluidos.io,resources=peeringcandidates,verbs=create;update;delete,versions=v1alpha1,name=pc.validate.fluidos.eu,sideEffects=None,admissionReviewVersions={v1,v1beta1} +// PCValidator is the PeerinCandidate validator. type PCValidator struct { client client.Client decoder *admission.Decoder } -func NewPCValidator(client client.Client) *PCValidator { - return &PCValidator{client: client, decoder: admission.NewDecoder(runtime.NewScheme())} +// NewPCValidator creates a new PCValidator. +func NewPCValidator(c client.Client) *PCValidator { + return &PCValidator{client: c, decoder: admission.NewDecoder(runtime.NewScheme())} } +// Handle manages the validation of the PeeringCandidate. +// +//nolint:gocritic // This function cannot be changed func (v *PCValidator) Handle(ctx context.Context, req admission.Request) admission.Response { - switch req.Operation { case admissionv1.Create: return v.HandleCreate(ctx, req) @@ -53,11 +59,14 @@ func (v *PCValidator) Handle(ctx context.Context, req admission.Request) admissi } } -func (v *PCValidator) HandleCreate(ctx context.Context, req admission.Request) admission.Response { +// HandleCreate manages the validation of the PeeringCandidate creation. +// +//nolint:gocritic // This function cannot be changed +func (v *PCValidator) HandleCreate(_ context.Context, req admission.Request) admission.Response { pc, err := v.DecodePeeringCandidate(req.Object) if err != nil { klog.Errorf("Failed to decode peering candidate: %v", err) - return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering candidate: %v", err)) + return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering candidate: %w", err)) } if pc.Spec.Reserved && pc.Spec.SolverID == "" { @@ -71,24 +80,32 @@ func (v *PCValidator) HandleCreate(ctx context.Context, req admission.Request) a return admission.Allowed("") } -func (v *PCValidator) HandleDelete(ctx context.Context, req admission.Request) admission.Response { - // Here we could check if the peering candidate is reserved and if so, we need to check if the solver ID matches the one of the solver that is deleting the peering candidate +// HandleDelete manages the validation of the PeeringCandidate deletion. +// +//nolint:gocritic // This function cannot be changed +func (v *PCValidator) HandleDelete(_ context.Context, req admission.Request) admission.Response { + // Here we could check if the peering candidate is reserved and if so,we need to check if the solver ID + // matches the one of the solver that is deleting the peering candidate // or if the solver ID is empty, we need to check if there is a Contract that is using this peering candidate // Maybe this is not the right logic but it need to be discussed and implemented + _ = req return admission.Allowed("") } -func (v *PCValidator) HandleUpdate(ctx context.Context, req admission.Request) admission.Response { +// HandleUpdate manages the validation of the PeeringCandidate update. +// +//nolint:gocritic // This function cannot be changed +func (v *PCValidator) HandleUpdate(_ context.Context, req admission.Request) admission.Response { pc, err := v.DecodePeeringCandidate(req.Object) if err != nil { klog.Errorf("Failed to decode peering candidate: %v", err) - return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering candidate: %v", err)) + return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering candidate: %w", err)) } pcOld, err := v.DecodePeeringCandidate(req.OldObject) if err != nil { klog.Errorf("Failed to decode peering old candidate: %v", err) - return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering old candidate: %v", err)) + return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering old candidate: %w", err)) } // PC can be updated only if: @@ -103,7 +120,8 @@ func (v *PCValidator) HandleUpdate(ctx context.Context, req admission.Request) a return admission.Allowed("") } - return admission.Denied("Peering candidate can be updated only if it is not reserved or if both Reserved flag and SolverID are set and you want to clear both in the same time") + //nolint:lll // This is a long line + return admission.Denied("peering candidate can be updated only if it is not reserved or if both Reserved flag and SolverID are set and you want to clear both in the same time") } /* func (v *PCValidator) InjectDecoder(d *admission.Decoder) error { @@ -111,6 +129,7 @@ func (v *PCValidator) HandleUpdate(ctx context.Context, req admission.Request) a return nil } */ +// DecodePeeringCandidate decodes the PeeringCandidate. func (v *PCValidator) DecodePeeringCandidate(obj runtime.RawExtension) (pc *advertisementv1alpha1.PeeringCandidate, err error) { pc = &advertisementv1alpha1.PeeringCandidate{} err = v.decoder.DecodeRaw(obj, pc) diff --git a/pkg/rear-controller/gateway/client.go b/pkg/rear-controller/gateway/client.go index 735a3e8..be837f7 100644 --- a/pkg/rear-controller/gateway/client.go +++ b/pkg/rear-controller/gateway/client.go @@ -31,7 +31,8 @@ import ( ) // TODO: move this function into the REAR Gateway package -// reserveFlavour reserves a flavour with the given flavourID + +// ReserveFlavour reserves a flavour with the given flavourID. func (g *Gateway) ReserveFlavour(ctx context.Context, reservation *reservationv1alpha1.Reservation, flavourID string) (*models.Transaction, error) { err := checkLiqoReadiness(g.LiqoReady) if err != nil { @@ -75,11 +76,11 @@ func (g *Gateway) ReserveFlavour(ctx context.Context, reservation *reservationv1 // TODO: this url should be taken from the nodeIdentity of the flavour bodyBytes := bytes.NewBuffer(selectorBytes) - url := fmt.Sprintf("http://%s%s%s", reservation.Spec.Seller.IP, RESERVE_FLAVOUR_PATH, flavourID) + url := fmt.Sprintf("http://%s%s%s", reservation.Spec.Seller.IP, ReserveFlavourPath, flavourID) klog.Infof("Sending request to %s", url) - resp, err := makeRequest("POST", url, bodyBytes) + resp, err := makeRequest(ctx, "POST", url, bodyBytes) if err != nil { return nil, err } @@ -105,12 +106,12 @@ func (g *Gateway) ReserveFlavour(ctx context.Context, reservation *reservationv1 klog.Infof("Flavour %s reserved: transaction ID %s", flavourID, transaction.TransactionID) - g.addNewTransacion(transaction) + g.addNewTransacion(&transaction) return &transaction, nil } -// PurchaseFlavour purchases a flavour with the given flavourID +// PurchaseFlavour purchases a flavour with the given flavourID. func (g *Gateway) PurchaseFlavour(ctx context.Context, transactionID string, seller nodecorev1alpha1.NodeIdentity) (*models.ResponsePurchase, error) { err := checkLiqoReadiness(g.LiqoReady) if err != nil { @@ -136,9 +137,9 @@ func (g *Gateway) PurchaseFlavour(ctx context.Context, transactionID string, sel bodyBytes := bytes.NewBuffer(selectorBytes) // TODO: this url should be taken from the nodeIdentity of the flavour - url := fmt.Sprintf("http://%s%s%s", seller.IP, PURCHASE_FLAVOUR_PATH, transactionID) + url := fmt.Sprintf("http://%s%s%s", seller.IP, PurchaseFlavourPath, transactionID) - resp, err := makeRequest("POST", url, bodyBytes) + resp, err := makeRequest(ctx, "POST", url, bodyBytes) if err != nil { return nil, err } @@ -156,8 +157,8 @@ func (g *Gateway) PurchaseFlavour(ctx context.Context, transactionID string, sel return &purchase, nil } -// SearchFlavour is a function that returns an array of Flavour that fit the Selector by performing a get request to an http server -func (g *Gateway) DiscoverFlavours(selector *nodecorev1alpha1.FlavourSelector) ([]*nodecorev1alpha1.Flavour, error) { +// DiscoverFlavours is a function that returns an array of Flavour that fit the Selector by performing a get request to an http server. +func (g *Gateway) DiscoverFlavours(ctx context.Context, selector *nodecorev1alpha1.FlavourSelector) ([]*nodecorev1alpha1.Flavour, error) { err := checkLiqoReadiness(g.LiqoReady) if err != nil { return nil, err @@ -166,7 +167,7 @@ func (g *Gateway) DiscoverFlavours(selector *nodecorev1alpha1.FlavourSelector) ( var s *models.Selector var flavoursCR []*nodecorev1alpha1.Flavour - if selector == nil { + if selector != nil { s = parseutil.ParseFlavourSelector(selector) } @@ -174,7 +175,7 @@ func (g *Gateway) DiscoverFlavours(selector *nodecorev1alpha1.FlavourSelector) ( // Send the POST request to all the servers in the list for _, provider := range providers { - flavour, err := discover(s, provider) + flavour, err := discover(ctx, s, provider) if err != nil { klog.Errorf("Error when searching Flavour: %s", err) return nil, err @@ -186,17 +187,19 @@ func (g *Gateway) DiscoverFlavours(selector *nodecorev1alpha1.FlavourSelector) ( return flavoursCR, nil } -func discover(s *models.Selector, provider string) (*nodecorev1alpha1.Flavour, error) { +func discover(ctx context.Context, s *models.Selector, provider string) (*nodecorev1alpha1.Flavour, error) { if s != nil { - return searchFlavourWithSelector(s, provider) + klog.Infof("Searching Flavour with selector %v", s) + return searchFlavourWithSelector(ctx, s, provider) } - return searchFlavour(provider) + klog.Infof("Searching Flavour with no selector") + return searchFlavour(ctx, provider) } func checkLiqoReadiness(b bool) error { if !b { klog.Errorf("Liqo is not ready, please check or wait for the Liqo installation") - return fmt.Errorf("Liqo is not ready, please check or wait for the Liqo installation") + return fmt.Errorf("liqo is not ready, please check or wait for the Liqo installation") } return nil } diff --git a/pkg/rear-controller/gateway/doc.go b/pkg/rear-controller/gateway/doc.go index e695cad..fd5dce0 100644 --- a/pkg/rear-controller/gateway/doc.go +++ b/pkg/rear-controller/gateway/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package gateway contains the HTTP Server and the utility functions for the REAR Gateway +// Package gateway contains the HTTP Server and the utility functions for the REAR Gateway package gateway diff --git a/pkg/rear-controller/gateway/gateway.go b/pkg/rear-controller/gateway/gateway.go index 541c294..17b6dad 100644 --- a/pkg/rear-controller/gateway/gateway.go +++ b/pkg/rear-controller/gateway/gateway.go @@ -36,20 +36,29 @@ import ( ) // clusterRole -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=*,verbs=get;list;watch +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/finalizers,verbs=update +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/finalizers,verbs=update +// +kubebuilder:rbac:groups=core,resources=*,verbs=get;list;watch const ( - LIST_FLAVOURS_PATH = "/api/listflavours" - LIST_FLAVOUR_BY_ID_PATH = "/api/listflavours/" - RESERVE_FLAVOUR_PATH = "/api/reserveflavour/" - PURCHASE_FLAVOUR_PATH = "/api/purchaseflavour/" - LIST_FLAVOURS_BY_SELECTOR_PATH = "/api/listflavours/selector" + // ListFlavoursPath is the path to get the list of flavours. + ListFlavoursPath = "/api/listflavours" + // ListFlavourByIDPath is the path to get a flavour by ID. + ListFlavourByIDPath = "/api/listflavours/" + // ReserveFlavourPath is the path to reserve a flavour. + ReserveFlavourPath = "/api/reserveflavour/" + // PurchaseFlavourPath is the path to purchase a flavour. + PurchaseFlavourPath = "/api/purchaseflavour/" + // ListFlavoursBySelectorPath is the path to get the list of flavours by selector. + ListFlavoursBySelectorPath = "/api/listflavours/selector" ) +// Gateway is the object that contains all the logical data stractures of the REAR Gateway. type Gateway struct { // NodeIdentity is the identity of the FLUIDOS Node ID *nodecorev1alpha1.NodeIdentity @@ -67,6 +76,7 @@ type Gateway struct { ClusterID string } +// NewGateway creates a new Gateway object. func NewGateway(c client.Client) *Gateway { return &Gateway{ client: c, @@ -76,14 +86,14 @@ func NewGateway(c client.Client) *Gateway { } } -// Start starts a new HTTP server +// Start starts a new HTTP server. func (g *Gateway) Start(ctx context.Context) error { klog.Info("Getting FLUIDOS Node identity...") nodeIdentity := getters.GetNodeIdentity(ctx, g.client) if nodeIdentity == nil { klog.Info("Error getting FLUIDOS Node identity") - return fmt.Errorf("Error getting FLUIDOS Node identity") + return fmt.Errorf("error getting FLUIDOS Node identity") } g.RegisterNodeIdentity(nodeIdentity) @@ -97,23 +107,26 @@ func (g *Gateway) Start(ctx context.Context) error { router.Use(g.readinessMiddleware) // Gateway endpoints - router.HandleFunc(LIST_FLAVOURS_PATH, g.getFlavours).Methods("GET") - //router.HandleFunc(LIST_FLAVOUR_BY_ID_PATH+"{flavourID}", g.getFlavourByID).Methods("GET") - router.HandleFunc(LIST_FLAVOURS_BY_SELECTOR_PATH, g.getFlavoursBySelector).Methods("POST") - router.HandleFunc(RESERVE_FLAVOUR_PATH+"{flavourID}", g.reserveFlavour).Methods("POST") - router.HandleFunc(PURCHASE_FLAVOUR_PATH+"{transactionID}", g.purchaseFlavour).Methods("POST") + router.HandleFunc(ListFlavoursPath, g.getFlavours).Methods("GET") + //nolint:gocritic // For the moment we are not using this endpoint + // router.HandleFunc(LIST_FLAVOUR_BY_ID_PATH+"{flavourID}", g.getFlavourByID).Methods("GET") + router.HandleFunc(ListFlavoursBySelectorPath, g.getFlavoursBySelector).Methods("POST") + router.HandleFunc(ReserveFlavourPath+"{flavourID}", g.reserveFlavour).Methods("POST") + router.HandleFunc(PurchaseFlavourPath+"{transactionID}", g.purchaseFlavour).Methods("POST") // Configure the HTTP server + //nolint:gosec // we are not using a TLS certificate srv := &http.Server{ Handler: router, - Addr: ":" + flags.HTTP_PORT, + Addr: ":" + flags.HTTPPort, } // Start server HTTP - klog.Infof("Starting HTTP server on port %s", flags.HTTP_PORT) + klog.Infof("Starting HTTP server on port %s", flags.HTTPPort) return srv.ListenAndServe() } +// RegisterNodeIdentity registers the FLUIDOS Node identity into the Gateway. func (g *Gateway) RegisterNodeIdentity(nodeIdentity *nodecorev1alpha1.NodeIdentity) { g.ID = nodeIdentity } @@ -137,17 +150,20 @@ func (g *Gateway) readinessMiddleware(next http.Handler) http.Handler { }) } +// CacheRefresher is a function that periodically checks the cache and removes expired transactions. func (g *Gateway) CacheRefresher(interval time.Duration) func(ctx context.Context) error { return func(ctx context.Context) error { return wait.PollUntilContextCancel(ctx, interval, false, g.refreshCache) } } -// check expired transactions and remove them from the cache +// check expired transactions and remove them from the cache. +// +//nolint:revive // we need to pass ctx as parameter to be compliant with the Poller interface func (g *Gateway) refreshCache(ctx context.Context) (bool, error) { klog.Infof("Refreshing cache") for transactionID, transaction := range g.Transactions { - if tools.CheckExpiration(transaction.StartTime, flags.EXPIRATION_TRANSACTION) { + if tools.CheckExpiration(transaction.StartTime, flags.ExpirationTransaction) { klog.Infof("Transaction %s expired, removing it from cache...", transactionID) g.removeTransaction(transactionID) return false, nil @@ -156,12 +172,14 @@ func (g *Gateway) refreshCache(ctx context.Context) (bool, error) { return false, nil } +// LiqoChecker is a function that periodically checks if Liqo is ready. func (g *Gateway) LiqoChecker(interval time.Duration) func(ctx context.Context) error { return func(ctx context.Context) error { return wait.PollUntilContextCancel(ctx, interval, false, g.checkLiqoReadiness) } } +// check if Liqo is ready and set the LiqoReady flag to true. func (g *Gateway) checkLiqoReadiness(ctx context.Context) (bool, error) { klog.Infof("Checking Liqo readiness") if g.LiqoReady && g.ClusterID != "" { diff --git a/pkg/rear-controller/gateway/provider.go b/pkg/rear-controller/gateway/provider.go index 30c661e..6acbfae 100644 --- a/pkg/rear-controller/gateway/provider.go +++ b/pkg/rear-controller/gateway/provider.go @@ -38,10 +38,8 @@ import ( "github.com/fluidos-project/node/pkg/utils/tools" ) -// TODO: all these functions should be moved into the REAR Gateway package - -// getFlavours gets all the flavours CRs from the cluster -func (g *Gateway) getFlavours(w http.ResponseWriter, r *http.Request) { +// getFlavours gets all the flavours CRs from the cluster. +func (g *Gateway) getFlavours(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "application/json") klog.Infof("Processing request for getting all Flavours...") @@ -56,8 +54,8 @@ func (g *Gateway) getFlavours(w http.ResponseWriter, r *http.Request) { klog.Infof("Found %d Flavours in the cluster", len(flavours)) // Filtering only the available flavours - for i, f := range flavours { - if !f.Spec.OptionalFields.Availability { + for i := range flavours { + if !flavours[i].Spec.OptionalFields.Availability { flavours = append(flavours[:i], flavours[i+1:]...) } } @@ -71,16 +69,18 @@ func (g *Gateway) getFlavours(w http.ResponseWriter, r *http.Request) { // Select the flavour with the max CPU max := resource.MustParse("0") - var selected nodecorev1alpha1.Flavour - for _, f := range flavours { - if f.Spec.Characteristics.Cpu.Cmp(max) == 1 { - max = f.Spec.Characteristics.Cpu - selected = f + index := 0 + for i := range flavours { + if flavours[i].Spec.Characteristics.Cpu.Cmp(max) == 1 { + max = flavours[i].Spec.Characteristics.Cpu + index = i } } + selected := *flavours[index].DeepCopy() + klog.Infof("Flavour %s selected - Parsing...", selected.Name) - parsed := parseutil.ParseFlavour(selected) + parsed := parseutil.ParseFlavour(&selected) klog.Infof("Flavour parsed: %v", parsed) @@ -117,7 +117,7 @@ func (g *Gateway) getFlavours(w http.ResponseWriter, r *http.Request) { } */ -// getFlavourBySelectorHandler gets the flavour CRs from the cluster that match the selector +// getFlavourBySelectorHandler gets the flavour CRs from the cluster that match the selector. func (g *Gateway) getFlavoursBySelector(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") @@ -148,8 +148,8 @@ func (g *Gateway) getFlavoursBySelector(w http.ResponseWriter, r *http.Request) klog.Infof("Found %d Flavours in the cluster", len(flavours)) // Filtering only the available flavours - for i, f := range flavours { - if !f.Spec.OptionalFields.Availability { + for i := range flavours { + if !flavours[i].Spec.OptionalFields.Availability { flavours = append(flavours[:i], flavours[i+1:]...) } } @@ -185,16 +185,19 @@ func (g *Gateway) getFlavoursBySelector(w http.ResponseWriter, r *http.Request) // Select the flavour with the max CPU max := resource.MustParse("0") - var selected nodecorev1alpha1.Flavour - for _, f := range flavoursSelected { - if f.Spec.Characteristics.Cpu.Cmp(max) == 1 { - max = f.Spec.Characteristics.Cpu - selected = f + index := 0 + + for i := range flavoursSelected { + if flavours[i].Spec.Characteristics.Cpu.Cmp(max) == 1 { + max = flavours[i].Spec.Characteristics.Cpu + index = i } } + selected := *flavoursSelected[index].DeepCopy() + klog.Infof("Flavour %s selected - Parsing...", selected.Name) - parsed := parseutil.ParseFlavour(selected) + parsed := parseutil.ParseFlavour(&selected) klog.Infof("Flavour parsed: %v", parsed) @@ -202,12 +205,12 @@ func (g *Gateway) getFlavoursBySelector(w http.ResponseWriter, r *http.Request) encodeResponse(w, parsed) } -// reserveFlavour reserves a Flavour by its flavourID +// reserveFlavour reserves a Flavour by its flavourID. func (g *Gateway) reserveFlavour(w http.ResponseWriter, r *http.Request) { // Get the flavourID value from the URL parameters params := mux.Vars(r) flavourID := params["flavourID"] - var transaction models.Transaction + var transaction *models.Transaction var request models.ReserveRequest if err := json.NewDecoder(r.Body).Decode(&request); err != nil { @@ -216,6 +219,8 @@ func (g *Gateway) reserveFlavour(w http.ResponseWriter, r *http.Request) { return } + klog.Infof("Partition: %v", *request.Partition) + if flavourID != request.FlavourID { klog.Infof("Mismatch body & param: %s != %s", flavourID, request.FlavourID) http.Error(w, "Mismatch body & param", http.StatusConflict) @@ -247,7 +252,7 @@ func (g *Gateway) reserveFlavour(w http.ResponseWriter, r *http.Request) { } // Create a new transaction - transaction := resourceforge.ForgeTransactionObj(transactionID, request) + transaction = resourceforge.ForgeTransactionObj(transactionID, &request) // Add the transaction to the transactions map g.addNewTransacion(transaction) @@ -258,7 +263,7 @@ func (g *Gateway) reserveFlavour(w http.ResponseWriter, r *http.Request) { encodeResponse(w, transaction) } -// purchaseFlavour is an handler for purchasing a Flavour +// purchaseFlavour is an handler for purchasing a Flavour. func (g *Gateway) purchaseFlavour(w http.ResponseWriter, r *http.Request) { // Get the flavourID value from the URL parameters params := mux.Vars(r) @@ -288,7 +293,7 @@ func (g *Gateway) purchaseFlavour(w http.ResponseWriter, r *http.Request) { klog.Infof("Flavour requested: %s", transaction.FlavourID) - if tools.CheckExpiration(transaction.StartTime, flags.EXPIRATION_TRANSACTION) { + if tools.CheckExpiration(transaction.StartTime, flags.ExpirationTransaction) { klog.Infof("Transaction %s expired", transaction.TransactionID) http.Error(w, "Error: transaction Timeout", http.StatusRequestTimeout) g.removeTransaction(transaction.TransactionID) @@ -343,7 +348,7 @@ func (g *Gateway) purchaseFlavour(w http.ResponseWriter, r *http.Request) { // Create a new contract klog.Infof("Creating a new contract...") - contract = *resourceforge.ForgeContract(*flavourSold, transaction, liqoCredentials) + contract = *resourceforge.ForgeContract(flavourSold, &transaction, liqoCredentials) err = g.client.Create(context.Background(), &contract) if err != nil { klog.Errorf("Error creating the Contract: %s", err) @@ -358,6 +363,21 @@ func (g *Gateway) purchaseFlavour(w http.ResponseWriter, r *http.Request) { // create a response purchase responsePurchase := resourceforge.ForgeResponsePurchaseObj(contractObject) + klog.Infof("Contract %v", *contractObject.Partition) + + // Create allocation + klog.Infof("Creating allocation...") + workerName := contract.Spec.Flavour.Spec.OptionalFields.WorkerID + allocation := *resourceforge.ForgeAllocation(&contract, "", workerName, nodecorev1alpha1.Remote, nodecorev1alpha1.Node) + err = g.client.Create(context.Background(), &allocation) + if err != nil { + klog.Errorf("Error creating the Allocation: %s", err) + http.Error(w, "Contract created but we ran into an error while allocating the resources", http.StatusInternalServerError) + return + } + + klog.Infof("Response purchase %v", *responsePurchase.Contract.Partition) + // Respond with the response purchase as JSON encodeResponse(w, responsePurchase) } diff --git a/pkg/rear-controller/gateway/services.go b/pkg/rear-controller/gateway/services.go index 4dbe5a0..e80aee2 100644 --- a/pkg/rear-controller/gateway/services.go +++ b/pkg/rear-controller/gateway/services.go @@ -16,6 +16,7 @@ package gateway import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -27,7 +28,7 @@ import ( "github.com/fluidos-project/node/pkg/utils/resourceforge" ) -func searchFlavourWithSelector(selector *models.Selector, addr string) (*nodecorev1alpha1.Flavour, error) { +func searchFlavourWithSelector(ctx context.Context, selector *models.Selector, addr string) (*nodecorev1alpha1.Flavour, error) { var flavour models.Flavour // Marshal the selector into JSON bytes @@ -37,13 +38,15 @@ func searchFlavourWithSelector(selector *models.Selector, addr string) (*nodecor } body := bytes.NewBuffer(selectorBytes) - url := fmt.Sprintf("http://%s%s", addr, LIST_FLAVOURS_BY_SELECTOR_PATH) + url := fmt.Sprintf("http://%s%s", addr, ListFlavoursBySelectorPath) - resp, err := makeRequest("POST", url, body) + resp, err := makeRequest(ctx, "POST", url, body) if err != nil { return nil, err } + defer resp.Body.Close() + // Check if the response status code is 200 (OK) if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("received non-OK response status code: %d", resp.StatusCode) @@ -54,20 +57,21 @@ func searchFlavourWithSelector(selector *models.Selector, addr string) (*nodecor return nil, err } - flavourCR := resourceforge.ForgeFlavourFromObj(flavour) + flavourCR := resourceforge.ForgeFlavourFromObj(&flavour) return flavourCR, nil } -func searchFlavour(addr string) (*nodecorev1alpha1.Flavour, error) { +func searchFlavour(ctx context.Context, addr string) (*nodecorev1alpha1.Flavour, error) { var flavour models.Flavour - url := fmt.Sprintf("http://%s%s", addr, LIST_FLAVOURS_PATH) + url := fmt.Sprintf("http://%s%s", addr, ListFlavoursPath) - resp, err := makeRequest("GET", url, nil) + resp, err := makeRequest(ctx, "GET", url, nil) if err != nil { return nil, err } + defer resp.Body.Close() // Check if the response status code is 200 (OK) if resp.StatusCode != http.StatusOK { @@ -79,20 +83,19 @@ func searchFlavour(addr string) (*nodecorev1alpha1.Flavour, error) { return nil, err } - flavourCR := resourceforge.ForgeFlavourFromObj(flavour) + flavourCR := resourceforge.ForgeFlavourFromObj(&flavour) return flavourCR, nil } -func makeRequest(method, url string, body *bytes.Buffer) (*http.Response, error) { - +func makeRequest(ctx context.Context, method, url string, body *bytes.Buffer) (*http.Response, error) { httpClient := &http.Client{} if body == nil { body = bytes.NewBuffer([]byte{}) } - req, err := http.NewRequest(method, url, body) + req, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { klog.Errorf("Error creating the request: %s", err) return nil, err diff --git a/pkg/rear-controller/gateway/utils.go b/pkg/rear-controller/gateway/utils.go index d468ca4..1f80e28 100644 --- a/pkg/rear-controller/gateway/utils.go +++ b/pkg/rear-controller/gateway/utils.go @@ -22,7 +22,7 @@ import ( "github.com/fluidos-project/node/pkg/utils/models" ) -// buildSelector builds a selector from a request body +// buildSelector builds a selector from a request body. func buildSelector(body []byte) (*models.Selector, error) { // Parse the request body into the APIRequest struct var selector models.Selector @@ -33,43 +33,42 @@ func buildSelector(body []byte) (*models.Selector, error) { return &selector, nil } -// getTransaction returns a transaction from the transactions map +// GetTransaction returns a transaction from the transactions map. func (g *Gateway) GetTransaction(transactionID string) (models.Transaction, error) { transaction, exists := g.Transactions[transactionID] if !exists { - return models.Transaction{}, fmt.Errorf("Transaction not found") + return models.Transaction{}, fmt.Errorf("transaction not found") } return transaction, nil } -// SearchTransaction returns a transaction from the transactions map -func (g *Gateway) SearchTransaction(buyerID string, flavourID string) (models.Transaction, bool) { +// SearchTransaction returns a transaction from the transactions map. +func (g *Gateway) SearchTransaction(buyerID, flavourID string) (*models.Transaction, bool) { for _, t := range g.Transactions { if t.Buyer.NodeID == buyerID && t.FlavourID == flavourID { - return t, true + return &t, true } } - return models.Transaction{}, false + return &models.Transaction{}, false } -// addNewTransacion add a new transaction to the transactions map -func (g *Gateway) addNewTransacion(transaction models.Transaction) { - g.Transactions[transaction.TransactionID] = transaction +// addNewTransacion add a new transaction to the transactions map. +func (g *Gateway) addNewTransacion(transaction *models.Transaction) { + g.Transactions[transaction.TransactionID] = *transaction } -// removeTransaction removes a transaction from the transactions map +// removeTransaction removes a transaction from the transactions map. func (g *Gateway) removeTransaction(transactionID string) { delete(g.Transactions, transactionID) } -// handleError handles errors by sending an error response +// handleError handles errors by sending an error response. func handleError(w http.ResponseWriter, err error, statusCode int) { http.Error(w, err.Error(), statusCode) } -// encodeResponse encodes the response as JSON and writes it to the response writer +// encodeResponse encodes the response as JSON and writes it to the response writer. func encodeResponse(w http.ResponseWriter, data interface{}) { - resp, err := json.Marshal(data) if err != nil { handleError(w, err, http.StatusInternalServerError) @@ -77,5 +76,5 @@ func encodeResponse(w http.ResponseWriter, data interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - w.Write(resp) + _, _ = w.Write(resp) } diff --git a/pkg/rear-controller/grpc/doc.go b/pkg/rear-controller/grpc/doc.go new file mode 100644 index 0000000..dd922c6 --- /dev/null +++ b/pkg/rear-controller/grpc/doc.go @@ -0,0 +1,16 @@ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package grpc provides the gRPC server for Liqo Controller Manager. +package grpc diff --git a/pkg/rear-controller/grpc/liqo-resource-manager.go b/pkg/rear-controller/grpc/liqo-resource-manager.go index 4ae93f7..8e0dd35 100644 --- a/pkg/rear-controller/grpc/liqo-resource-manager.go +++ b/pkg/rear-controller/grpc/liqo-resource-manager.go @@ -30,33 +30,36 @@ import ( "github.com/fluidos-project/node/pkg/utils/flags" ) -type grpcServer struct { +// Server is the object that contains all the logical data stractures of the REAR gRPC Server. +type Server struct { Server *grpc.Server client client.Client - //contractHandler connector.ContractHandler stream resourcemonitors.ResourceReader_SubscribeServer resourcemonitors.ResourceReaderServer } -func NewGrpcServer(cl client.Client) *grpcServer { - return &grpcServer{ +// NewGrpcServer creates a new gRPC server. +func NewGrpcServer(cl client.Client) *Server { + return &Server{ Server: grpc.NewServer(), client: cl, } } -func (s *grpcServer) Start(ctx context.Context) error { - grpcUrl := ":" + flags.GRPC_PORT +// Start starts the gRPC server. +func (s *Server) Start(ctx context.Context) error { + _ = ctx + grpcURL := ":" + flags.GRPCPort // gRPC Configuration klog.Info("Configuring gRPC Server") - lis, err := net.Listen("tcp", grpcUrl) + lis, err := net.Listen("tcp", grpcURL) if err != nil { klog.Infof("gRPC failed to listen: %v", err) - return fmt.Errorf("gRPC failed to listen: %v", err) + return fmt.Errorf("gRPC failed to listen: %w", err) } - klog.Infof("gRPC Server Listening on %s", grpcUrl) + klog.Infof("gRPC Server Listening on %s", grpcURL) // gRPC Server start listener return s.Server.Serve(lis) } @@ -65,7 +68,9 @@ func (s *grpcServer) Start(ctx context.Context) error { s.contractHandler = ch } */ -func (s *grpcServer) ReadResources(ctx context.Context, req *resourcemonitors.ClusterIdentity) (*resourcemonitors.ResourceList, error) { +// ReadResources is the method that returns the resources assigned to a specific ClusterID. +func (s *Server) ReadResources(ctx context.Context, req *resourcemonitors.ClusterIdentity) (*resourcemonitors.ResourceList, error) { + _ = ctx readResponse := &resourcemonitors.ResourceList{Resources: map[string]*resource.Quantity{}} log.Printf("ReadResource for clusterID %s", req.ClusterID) @@ -77,14 +82,17 @@ func (s *grpcServer) ReadResources(ctx context.Context, req *resourcemonitors.Cl log.Printf("Retrieved resources for clusterID %s: %v", req.ClusterID, resources) for key, value := range *resources { + //nolint:gosec,exportloopref // No risk of injection readResponse.Resources[key.String()] = &value } return readResponse, nil } -func (s *grpcServer) Subscribe(req *resourcemonitors.Empty, srv resourcemonitors.ResourceReader_SubscribeServer) error { +// Subscribe is the method that subscribes a the Liqo controller manager to the gRPC server. +func (s *Server) Subscribe(req *resourcemonitors.Empty, srv resourcemonitors.ResourceReader_SubscribeServer) error { // Implement here your logic + _ = req s.stream = srv ctx := srv.Context() @@ -97,27 +105,31 @@ func (s *grpcServer) Subscribe(req *resourcemonitors.Empty, srv resourcemonitors case <-ctx.Done(): klog.Info("liqo controller manager disconnected") return nil + default: + klog.Info("liqo controller manager connected") } } - } -func (s *grpcServer) NotifyChange(ctx context.Context, req *resourcemonitors.ClusterIdentity) error { +// NotifyChange is the method that notifies a change to the Liqo controller manager. +func (s *Server) NotifyChange(ctx context.Context, req *resourcemonitors.ClusterIdentity) error { + _ = ctx // Implement here your logic if s.stream == nil { return fmt.Errorf("you must first subscribe a controller manager to notify a change") - } else { - _ = s.stream.Send(req) } + _ = s.stream.Send(req) return nil } -func (s *grpcServer) RemoveCluster(ctx context.Context, req *resourcemonitors.ClusterIdentity) (*resourcemonitors.Empty, error) { +// RemoveCluster is the method that removes a cluster from the gRPC server. +/* func (s *Server) RemoveCluster(ctx context.Context, req *resourcemonitors.ClusterIdentity) (*resourcemonitors.Empty, error) { // Implement here your logic return nil, fmt.Errorf("Not implemented") -} +} */ -func (s *grpcServer) GetOfferResourcesByClusterID(clusterID string) (*corev1.ResourceList, error) { +// GetOfferResourcesByClusterID is the method that returns the resources assigned to a specific ClusterID. +func (s *Server) GetOfferResourcesByClusterID(clusterID string) (*corev1.ResourceList, error) { log.Printf("Getting resources for cluster ID: %s", clusterID) resources, err := getContractResourcesByClusterID(s.client, clusterID) if err != nil { @@ -126,6 +138,7 @@ func (s *grpcServer) GetOfferResourcesByClusterID(clusterID string) (*corev1.Res return resources, nil } -func (s *grpcServer) UpdatePeeringOffer(clusterID string) { +// UpdatePeeringOffer is the method that updates the peering offer. +func (s *Server) UpdatePeeringOffer(clusterID string) { _ = s.NotifyChange(context.Background(), &resourcemonitors.ClusterIdentity{ClusterID: clusterID}) } diff --git a/pkg/rear-controller/grpc/service.go b/pkg/rear-controller/grpc/service.go index 72bfb60..73c28df 100644 --- a/pkg/rear-controller/grpc/service.go +++ b/pkg/rear-controller/grpc/service.go @@ -38,7 +38,7 @@ func getContractResourcesByClusterID(cl client.Client, clusterID string) (*corev if len(contracts.Items) == 0 { klog.Errorf("No contracts found for cluster %s", clusterID) - return nil, fmt.Errorf("No contracts found for cluster %s", clusterID) + return nil, fmt.Errorf("no contracts found for cluster %s", clusterID) } if len(contracts.Items) > 1 { @@ -53,13 +53,13 @@ func getContractResourcesByClusterID(cl client.Client, clusterID string) (*corev func multipleContractLogic(contracts []reservationv1alpha1.Contract) *corev1.ResourceList { resources := &corev1.ResourceList{} - for _, contract := range contracts { - resources = addResources(*resources, contract.Spec.Partition) + for i := range contracts { + resources = addResources(*resources, contracts[i].Spec.Partition) } return resources } -// This function adds the resources of a contract to the existing resourceList +// This function adds the resources of a contract to the existing resourceList. func addResources(resources corev1.ResourceList, partition *nodecorev1alpha1.Partition) *corev1.ResourceList { for key, value := range *mapQuantityToResourceList(partition) { if prevRes, ok := resources[key]; !ok { diff --git a/pkg/rear-manager/allocation_controller.go b/pkg/rear-manager/allocation_controller.go index 17fed20..bcfa1a8 100644 --- a/pkg/rear-manager/allocation_controller.go +++ b/pkg/rear-manager/allocation_controller.go @@ -17,24 +17,47 @@ package rearmanager import ( "context" + liqodiscovery "github.com/liqotech/liqo/apis/discovery/v1alpha1" + discovery "github.com/liqotech/liqo/pkg/discovery" + fcutils "github.com/liqotech/liqo/pkg/utils/foreignCluster" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" + "github.com/fluidos-project/node/pkg/utils/flags" + "github.com/fluidos-project/node/pkg/utils/getters" + "github.com/fluidos-project/node/pkg/utils/resourceforge" + "github.com/fluidos-project/node/pkg/utils/services" ) +// clusterRole // +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/status,verbs=get;update;patch // +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/finalizers,verbs=update +// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters/finalizers,verbs=get;update;patch -// AllocationReconciler reconciles a Allocation object +// AllocationReconciler reconciles a Allocation object. type AllocationReconciler struct { client.Client Scheme *runtime.Scheme } +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile func (r *AllocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx, "allocation", req.NamespacedName) ctx = ctrl.LoggerInto(ctx, log) @@ -48,11 +71,274 @@ func (r *AllocationReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } + if r.checkInitialStatus(&allocation) { + if err := r.updateAllocationStatus(ctx, &allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + klog.Infof("Reconciling Allocation %s", req.NamespacedName) + + if allocation.Status.Status == nodecorev1alpha1.Error { + klog.Infof("Allocation %s is in error state", req.NamespacedName) + return ctrl.Result{}, nil + } + + if allocation.Spec.Type == nodecorev1alpha1.Node { + return r.handleNodeAllocation(ctx, req, &allocation) + } + + if allocation.Spec.Type == nodecorev1alpha1.VirtualNode && allocation.Spec.Destination == nodecorev1alpha1.Local { + return r.handleVirtualNodeAllocation(ctx, req, &allocation) + } + return ctrl.Result{}, nil } +func (r *AllocationReconciler) checkInitialStatus(allocation *nodecorev1alpha1.Allocation) bool { + if allocation.Status.Status != nodecorev1alpha1.Active && + allocation.Status.Status != nodecorev1alpha1.Reserved && + allocation.Status.Status != nodecorev1alpha1.Released && + allocation.Status.Status != nodecorev1alpha1.Inactive { + allocation.SetStatus(nodecorev1alpha1.Inactive, "Allocation has been set to Inactive") + return true + } + return false +} + +func (r *AllocationReconciler) handleNodeAllocation(ctx context.Context, + req ctrl.Request, allocation *nodecorev1alpha1.Allocation) (ctrl.Result, error) { + allocStatus := allocation.Status.Status + switch allocStatus { + case nodecorev1alpha1.Active: + // We need to check if the ForeignCluster is still ready + // If the ForeignCluster is not ready we need to set the Allocation to Released + klog.Infof("Allocation %s is active", req.NamespacedName) + return ctrl.Result{}, nil + case nodecorev1alpha1.Reserved: + if allocation.Spec.Destination == nodecorev1alpha1.Remote { + // We need to check the status of the ForeignCluster + // If the ForeignCluster is Ready the Allocation can be set to Active + // else we need to wait for the ForeignCluster to be Ready + klog.Infof("Allocation %s is reserved", req.NamespacedName) + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, allocation.Spec.RemoteClusterID) + // check if not found + if err != nil { + if apierrors.IsNotFound(err) { + klog.Infof("ForeignCluster %s not found", allocation.Spec.RemoteClusterID) + allocation.SetStatus(nodecorev1alpha1.Reserved, "ForeignCluster not found, peering not yet started") + } else { + klog.Errorf("Error when getting ForeignCluster %s: %v", allocation.Spec.RemoteClusterID, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when getting ForeignCluster") + } + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + if fcutils.IsIncomingJoined(fc) && + fcutils.IsNetworkingEstablishedOrExternal(fc) && + fcutils.IsAuthenticated(fc) && + !fcutils.IsUnpeered(fc) { + klog.Infof("ForeignCluster %s is ready, incoming peering enstablished", allocation.Spec.RemoteClusterID) + allocation.SetStatus(nodecorev1alpha1.Active, "Incoming peering ready, Allocation is now Active") + } else { + klog.Infof("ForeignCluster %s is not ready yet", allocation.Spec.RemoteClusterID) + allocation.SetStatus(nodecorev1alpha1.Reserved, "Incoming peering not yet ready, Allocation is still Reserved") + } + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + // We can set the Allocation to Active + klog.Infof("Allocation will be used locally, we can put it in 'Active' State", req.NamespacedName) + allocation.SetStatus(nodecorev1alpha1.Active, "Allocation ready, will be used locally") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.Released: + // The Allocation is released, + klog.Infof("Allocation %s is released", req.NamespacedName) + // We need to check if the ForeignCluster is again ready + return ctrl.Result{}, nil + case nodecorev1alpha1.Inactive: + // Alloction Type is Node, so we need to invalidate the Flavour + // and eventually create a new one detaching the right Partition from the old one + klog.Infof("Allocation %s is inactive", req.NamespacedName) + + flavour, err := services.GetFlavourByID(allocation.Spec.Flavour.Name, r.Client) + if err != nil { + klog.Errorf("Error when getting Flavour %s: %v", allocation.Spec.Flavour.Name, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when getting Flavour") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + flavour.Spec.OptionalFields.Availability = false + if err := r.Client.Update(ctx, flavour); err != nil { + klog.Errorf("Error when updating Flavour %s: %v", flavour.Name, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when updating Flavour") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + if allocation.Spec.Partitioned { + // We need to create a new Flavour with the right Partition + flavourRes := allocation.Spec.Flavour.Spec.Characteristics + allocationRes := allocation.Spec.Resources + + newCharacteristics := computeCharacteristics(&flavourRes, &allocationRes) + newFlavour := resourceforge.ForgeFlavourFromRef(flavour, newCharacteristics) + + if err := r.Client.Create(ctx, newFlavour); err != nil { + klog.Errorf("Error when creating Flavour %s: %v", newFlavour.Name, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when creating Flavour") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + } + + allocation.SetStatus(nodecorev1alpha1.Reserved, "Resources reserved") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + klog.Infof("Allocation %s is in an unknown state", req.NamespacedName) + return ctrl.Result{}, nil + } +} + +func (r *AllocationReconciler) handleVirtualNodeAllocation(ctx context.Context, + req ctrl.Request, allocation *nodecorev1alpha1.Allocation) (ctrl.Result, error) { + allocStatus := allocation.Status.Status + switch allocStatus { + case nodecorev1alpha1.Active: + // We need to check if the ForeignCluster is still ready + return ctrl.Result{}, nil + case nodecorev1alpha1.Reserved: + // We need to check the status of the ForeignCluster + // If the ForeignCluster is Ready the Allocation can be set to Active + // else we need to wait for the ForeignCluster to be Ready + klog.Infof("Allocation %s is reserved", req.NamespacedName) + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, allocation.Spec.RemoteClusterID) + // check if not found + if err != nil { + if apierrors.IsNotFound(err) { + klog.Infof("ForeignCluster %s not found", allocation.Spec.RemoteClusterID) + allocation.SetStatus(nodecorev1alpha1.Reserved, "ForeignCluster not found, peering not yet started") + } else { + klog.Errorf("Error when getting ForeignCluster %s: %v", allocation.Spec.RemoteClusterID, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when getting ForeignCluster") + } + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + if fcutils.IsOutgoingJoined(fc) { + klog.Infof("ForeignCluster %s is ready, outgoing peering enstablished", allocation.Spec.RemoteClusterID) + allocation.SetStatus(nodecorev1alpha1.Active, "Outgoing peering ready, Allocation is now Active") + } else { + klog.Infof("ForeignCluster %s is not ready yet", allocation.Spec.RemoteClusterID) + allocation.SetStatus(nodecorev1alpha1.Reserved, "Outgoing peering not yet ready, Allocation is still Reserved") + } + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.Released: + // The Allocation is released, + // We need to check if the ForeignCluster is again ready + return ctrl.Result{}, nil + case nodecorev1alpha1.Inactive: + klog.Infof("Allocation %s is inactive", req.NamespacedName) + allocation.SetStatus(nodecorev1alpha1.Reserved, "Allocation is now Reserved") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + klog.Infof("Allocation %s is in an unknown state", req.NamespacedName) + return ctrl.Result{}, nil + } +} + +func computeCharacteristics(origin, part *nodecorev1alpha1.Characteristics) *nodecorev1alpha1.Characteristics { + newCPU := origin.Cpu.DeepCopy() + newMemory := origin.Memory.DeepCopy() + newStorage := origin.PersistentStorage.DeepCopy() + newGpu := origin.Gpu.DeepCopy() + newEphemeralStorage := origin.EphemeralStorage.DeepCopy() + newCPU.Sub(part.Cpu) + newMemory.Sub(part.Memory) + newStorage.Sub(part.PersistentStorage) + newGpu.Sub(part.Gpu) + newEphemeralStorage.Sub(part.EphemeralStorage) + return &nodecorev1alpha1.Characteristics{ + Architecture: origin.Architecture, + Cpu: newCPU, + Memory: newMemory, + Gpu: newGpu, + PersistentStorage: newStorage, + EphemeralStorage: newEphemeralStorage, + } +} + +func (r *AllocationReconciler) updateAllocationStatus(ctx context.Context, allocation *nodecorev1alpha1.Allocation) error { + return r.Status().Update(ctx, allocation) +} + +// SetupWithManager sets up the controller with the Manager. func (r *AllocationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&nodecorev1alpha1.Allocation{}). + Watches(&liqodiscovery.ForeignCluster{}, handler.EnqueueRequestsFromMapFunc(r.fcToAllocation), builder.WithPredicates(foreignClusterPredicate())). Complete(r) } + +func foreignClusterPredicate() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return fcutils.IsOutgoingJoined(e.ObjectNew.(*liqodiscovery.ForeignCluster)) || + fcutils.IsIncomingJoined(e.ObjectNew.(*liqodiscovery.ForeignCluster)) + }, + } +} + +func (r *AllocationReconciler) fcToAllocation(_ context.Context, o client.Object) []reconcile.Request { + clusterID := o.GetLabels()[discovery.ClusterIDLabel] + allocationName := getters.GetAllocationNameByClusterIDSpec(context.Background(), r.Client, clusterID) + if allocationName == nil { + klog.Infof("Allocation with clusterID %s not found", clusterID) + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: *allocationName, + Namespace: flags.FluidoNamespace, + }, + }, + } +} diff --git a/pkg/rear-manager/allocation_wh.go b/pkg/rear-manager/allocation_wh.go index 50d35c8..6d8d385 100644 --- a/pkg/rear-manager/allocation_wh.go +++ b/pkg/rear-manager/allocation_wh.go @@ -17,7 +17,7 @@ package rearmanager import ( "context" - //admissionv1 "k8s.io/api/admission/v1" + // admissionv1 "k8s.io/api/admission/v1". "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -25,29 +25,50 @@ import ( nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" ) +// Validator is the allocation webhook validator. type Validator struct { client client.Client decoder *admission.Decoder } -func NewValidator(client client.Client) *Validator { - return &Validator{client: client, decoder: admission.NewDecoder(runtime.NewScheme())} +// NewValidator creates a new allocation webhook validator. +func NewValidator(c client.Client) *Validator { + return &Validator{client: c, decoder: admission.NewDecoder(runtime.NewScheme())} } +// Handle manages the validation of the Allocation. +// +//nolint:gocritic // This function cannot be changed func (v *Validator) Handle(ctx context.Context, req admission.Request) admission.Response { - + _ = ctx + _ = req return admission.Allowed("allowed") } +// HandleCreate manages the validation of the Allocation creation. +// +//nolint:gocritic // This function cannot be changed func (v *Validator) HandleCreate(ctx context.Context, req admission.Request) admission.Response { + _ = ctx + _ = req return admission.Allowed("allowed") } +// HandleDelete manages the validation of the Allocation deletion. +// +//nolint:gocritic // This function cannot be changed func (v *Validator) HandleDelete(ctx context.Context, req admission.Request) admission.Response { + _ = ctx + _ = req return admission.Allowed("allowed") } +// HandleUpdate manages the validation of the Allocation update. +// +//nolint:gocritic // This function cannot be changed func (v *Validator) HandleUpdate(ctx context.Context, req admission.Request) admission.Response { + _ = ctx + _ = req return admission.Allowed("allowed") } @@ -56,6 +77,7 @@ func (v *Validator) HandleUpdate(ctx context.Context, req admission.Request) adm return nil } */ +// DecodeAllocation decodes the Allocation from the raw extension. func (v *Validator) DecodeAllocation(obj runtime.RawExtension) (pc *nodecorev1alpha1.Allocation, err error) { pc = &nodecorev1alpha1.Allocation{} err = v.decoder.DecodeRaw(obj, pc) diff --git a/pkg/rear-manager/doc.go b/pkg/rear-manager/doc.go index e68026a..0012d95 100644 --- a/pkg/rear-manager/doc.go +++ b/pkg/rear-manager/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package rearmanager implements the utility functions for the rear manager controller +// Package rearmanager implements the utility functions for the rear manager controller package rearmanager diff --git a/pkg/rear-manager/solver_controller.go b/pkg/rear-manager/solver_controller.go index 1e4cfac..f966815 100644 --- a/pkg/rear-manager/solver_controller.go +++ b/pkg/rear-manager/solver_controller.go @@ -1,24 +1,23 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package rearmanager import ( "context" + fcutils "github.com/liqotech/liqo/pkg/utils/foreignCluster" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -41,39 +40,50 @@ import ( "github.com/fluidos-project/node/pkg/utils/namings" "github.com/fluidos-project/node/pkg/utils/resourceforge" "github.com/fluidos-project/node/pkg/utils/tools" + virtualfabricmanager "github.com/fluidos-project/node/pkg/virtual-fabric-manager" ) -// SolverReconciler reconciles a Solver object +// SolverReconciler reconciles a Solver object. type SolverReconciler struct { client.Client Scheme *runtime.Scheme } // clusterRole -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=solvers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=solvers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=solvers/finalizers,verbs=update -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/finalizers,verbs=update -//+kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=peeringcandidates,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=peeringcandidates/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=peeringcandidates/finalizers,verbs=update -//+kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=discoveries,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=discoveries/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=discoveries/finalizers,verbs=update -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations/finalizers,verbs=update -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch - +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=solvers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=solvers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=solvers/finalizers,verbs=update +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=flavours/finalizers,verbs=update +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/finalizers,verbs=update +// +kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=peeringcandidates,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=peeringcandidates/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=peeringcandidates/finalizers,verbs=update +// +kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=discoveries,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=discoveries/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=advertisement.fluidos.eu,resources=discoveries/finalizers,verbs=update +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations/finalizers,verbs=update +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=reservation.fluidos.eu,resources=contracts/finalizers,verbs=update +// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters/finalizers,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=*,verbs=get;list;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile func (r *SolverReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx, "solver", req.NamespacedName) ctx = ctrl.LoggerInto(ctx, log) - //_ = log.FromContext(ctx) var solver nodecorev1alpha1.Solver if err := r.Get(ctx, req.NamespacedName, &solver); client.IgnoreNotFound(err) != nil { @@ -84,14 +94,7 @@ func (r *SolverReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } - if solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseSolved && - solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseTimeout && - solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseFailed && - solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseRunning && - solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseIdle { - - solver.SetPhase(nodecorev1alpha1.PhaseIdle, "Solver initialized") - + if checkInitialStatus(&solver) { if err := r.updateSolverStatus(ctx, &solver); err != nil { klog.Errorf("Error when updating Solver %s status before reconcile: %s", req.NamespacedName, err) return ctrl.Result{}, err @@ -112,226 +115,356 @@ func (r *SolverReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } if solver.Spec.FindCandidate { - switch findCandidateStatus { - case nodecorev1alpha1.PhaseIdle: - // Search a matching PeeringCandidate if available - pc, err := r.searchPeeringCandidates(ctx, &solver) - if client.IgnoreNotFound(err) != nil { - klog.Errorf("Error when searching and booking a candidate for Solver %s: %s", req.NamespacedName.Name, err) - return ctrl.Result{}, err - } + if findCandidateStatus != nodecorev1alpha1.PhaseSolved { + return r.handleFindCandidate(ctx, req, &solver) + } + klog.Infof("Solver %s has reserved and purchased the resources", req.NamespacedName.Name) + } else { + klog.Infof("Solver %s Solved : No need to find a candidate", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseSolved, "No need to find a candidate") + err := r.updateSolverStatus(ctx, &solver) + if err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } - // If some PeeringCandidates are available, select one and book it - if len(pc) > 0 { - // If some PeeringCandidates are available, select one and book it - selectedPc, err := r.selectAndBookPeeringCandidate(ctx, &solver, pc) - if err != nil { - klog.Errorf("Error when selecting and booking a candidate for Solver %s: %s", req.NamespacedName.Name, err) - return ctrl.Result{}, err - } - klog.Infof("Solver %s has selected and booked candidate %s", req.NamespacedName.Name, selectedPc.Name) - solver.SetFindCandidateStatus(nodecorev1alpha1.PhaseSolved) - solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver has found a candidate") - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - } + if solver.Spec.ReserveAndBuy { + if findCandidateStatus == nodecorev1alpha1.PhaseSolved && reserveAndBuyStatus != nodecorev1alpha1.PhaseSolved { + return r.handleReserveAndBuy(ctx, req, &solver) + } + } else { + klog.Infof("Solver %s Solved : No need to reserve and buy the resources", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseSolved, "No need to reserve and buy the resources") + err := r.updateSolverStatus(ctx, &solver) + if err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } - // If no PeeringCandidate is available, Create a Discovery - klog.Infof("Solver %s has not found any candidate. Trying a Discovery", req.NamespacedName.Name) - solver.SetFindCandidateStatus(nodecorev1alpha1.PhaseRunning) - solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver is trying a Discovery") + if solver.Spec.EnstablishPeering { + if reserveAndBuyStatus == nodecorev1alpha1.PhaseSolved && findCandidateStatus == nodecorev1alpha1.PhaseSolved { + return r.handlePeering(ctx, req, &solver) + } + } else { + klog.Infof("Solver %s Solved : No need to enstablish a peering", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseSolved, "No need to enstablish a peering") + err := r.updateSolverStatus(ctx, &solver) + if err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } - // Update the Solver status - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseRunning: - // Check solver expiration - if tools.CheckExpiration(solver.Status.SolverPhase.LastChangeTime, flags.EXPIRATION_PHASE_RUNNING) { - klog.Infof("Solver %s has expired", req.NamespacedName.Name) - - solver.SetPhase(nodecorev1alpha1.PhaseTimeout, "Solver has expired before finding a candidate") - - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - } + return ctrl.Result{}, nil +} - klog.Infof("Getting or creating Discovery for Solver %s", req.NamespacedName.Name) - discovery, err := r.createOrGetDiscovery(ctx, &solver) - if err != nil { - klog.Errorf("Error when creating or getting Discovery for Solver %s: %s", req.NamespacedName.Name, err) - return ctrl.Result{}, err - } +func checkInitialStatus(solver *nodecorev1alpha1.Solver) bool { + if solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseSolved && + solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseTimeout && + solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseFailed && + solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseRunning && + solver.Status.SolverPhase.Phase != nodecorev1alpha1.PhaseIdle { + solver.SetPhase(nodecorev1alpha1.PhaseIdle, "Solver initialized") + return true + } + return false +} - common.DiscoveryStatusCheck(&solver, discovery) +func (r *SolverReconciler) handleFindCandidate(ctx context.Context, req ctrl.Request, solver *nodecorev1alpha1.Solver) (ctrl.Result, error) { + findCandidateStatus := solver.Status.FindCandidate + switch findCandidateStatus { + case nodecorev1alpha1.PhaseIdle: + // Search a matching PeeringCandidate if available + pc, err := r.searchPeeringCandidates(ctx, solver) + if client.IgnoreNotFound(err) != nil { + klog.Errorf("Error when searching and booking a candidate for Solver %s: %s", req.NamespacedName.Name, err) + return ctrl.Result{}, err + } - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + // If some PeeringCandidates are available, select one and book it + if len(pc) > 0 { + // If some PeeringCandidates are available, select one and book it + selectedPc, err := r.selectAndBookPeeringCandidate(ctx, solver, pc) + if err != nil { + klog.Errorf("Error when selecting and booking a candidate for Solver %s: %s", req.NamespacedName.Name, err) return ctrl.Result{}, err } - return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseFailed: - klog.Infof("Solver %s has not found any candidate", req.NamespacedName.Name) - solver.SetPhase(nodecorev1alpha1.PhaseFailed, "Solver has not found any candidate") - if err := r.updateSolverStatus(ctx, &solver); err != nil { + klog.Infof("Solver %s has selected and booked candidate %s", req.NamespacedName.Name, selectedPc.Name) + solver.SetFindCandidateStatus(nodecorev1alpha1.PhaseSolved) + solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver has found a candidate") + if err := r.updateSolverStatus(ctx, solver); err != nil { klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseSolved: - klog.Infof("Solver %s has found a candidate", req.NamespacedName.Name) - default: - solver.SetFindCandidateStatus(nodecorev1alpha1.PhaseIdle) - solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver is running") - // Update the Solver status - if err := r.updateSolverStatus(ctx, &solver); err != nil { + } + + // If no PeeringCandidate is available, Create a Discovery + klog.Infof("Solver %s has not found any candidate. Trying a Discovery", req.NamespacedName.Name) + solver.SetFindCandidateStatus(nodecorev1alpha1.PhaseRunning) + solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver is trying a Discovery") + + // Update the Solver status + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseRunning: + // Check solver expiration + if tools.CheckExpiration(solver.Status.SolverPhase.LastChangeTime, flags.ExpirationPhaseRunning) { + klog.Infof("Solver %s has expired", req.NamespacedName.Name) + + solver.SetPhase(nodecorev1alpha1.PhaseTimeout, "Solver has expired before finding a candidate") + + if err := r.updateSolverStatus(ctx, solver); err != nil { klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } return ctrl.Result{}, nil } - } else { - klog.Infof("Solver %s Solved : No need to find a candidate", req.NamespacedName.Name) - solver.SetPhase(nodecorev1alpha1.PhaseSolved, "No need to find a candidate") - err := r.updateSolverStatus(ctx, &solver) + + klog.Infof("Getting or creating Discovery for Solver %s", req.NamespacedName.Name) + discovery, err := r.createOrGetDiscovery(ctx, solver) if err != nil { + klog.Errorf("Error when creating or getting Discovery for Solver %s: %s", req.NamespacedName.Name, err) + return ctrl.Result{}, err + } + + common.DiscoveryStatusCheck(solver, discovery) + + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseFailed: + klog.Infof("Solver %s has not found any candidate", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseFailed, "Solver has not found any candidate") + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + solver.SetFindCandidateStatus(nodecorev1alpha1.PhaseIdle) + solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver is running") + // Update the Solver status + if err := r.updateSolverStatus(ctx, solver); err != nil { klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } return ctrl.Result{}, nil } +} - if solver.Spec.ReserveAndBuy { - if findCandidateStatus == nodecorev1alpha1.PhaseSolved { - klog.Infof("ReserveAndBuy %s", reserveAndBuyStatus) - switch reserveAndBuyStatus { - case nodecorev1alpha1.PhaseIdle: - var partition *nodecorev1alpha1.Partition - klog.Infof("Creating the Reservation %s", req.NamespacedName.Name) - // Create the Reservation - var pc advertisementv1alpha1.PeeringCandidate - pcNamespaceName := types.NamespacedName{Name: solver.Status.PeeringCandidate.Name, Namespace: solver.Status.PeeringCandidate.Namespace} - - // Get the PeeringCandidate from the Solver - if err := r.Get(ctx, pcNamespaceName, &pc); err != nil { - klog.Errorf("Error when getting PeeringCandidate %s: %s", solver.Status.PeeringCandidate.Name, err) - return ctrl.Result{}, err - } - - if solver.Spec.Selector == nil { - // Forge the Partition - partition = resourceforge.ForgePartition(solver.Spec.Selector) - } - - // Get the NodeIdentity - nodeIdentity := getters.GetNodeIdentity(ctx, r.Client) - - // Forge the Reservation - reservation := resourceforge.ForgeReservation(&pc, partition, *nodeIdentity) - if err := r.Client.Create(ctx, reservation); err != nil { - klog.Errorf("Error when creating Reservation for Solver %s: %s", solver.Name, err) - return ctrl.Result{}, err - } - - klog.Infof("Reservation %s created", reservation.Name) - - solver.SetReserveAndBuyStatus(nodecorev1alpha1.PhaseRunning) - solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Reservation created") - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseRunning: - // Check solver expiration - if tools.CheckExpiration(solver.Status.SolverPhase.LastChangeTime, flags.EXPIRATION_PHASE_RUNNING) { - klog.Infof("Solver %s has expired", req.NamespacedName.Name) - solver.SetPhase(nodecorev1alpha1.PhaseTimeout, "Solver has expired before reserving the resources") - - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - } - - reservation := &reservationv1alpha1.Reservation{} - resNamespaceName := types.NamespacedName{Name: namings.ForgeReservationName(solver.Name), Namespace: flags.FLUIDOS_NAMESPACE} - - // Get the Reservation - err := r.Get(ctx, resNamespaceName, reservation) - if client.IgnoreNotFound(err) != nil { - klog.Errorf("Error when getting Reservation for Solver %s: %s", solver.Name, err) - return ctrl.Result{}, err - } - - common.ReservationStatusCheck(&solver, reservation) - - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - - case nodecorev1alpha1.PhaseFailed: - klog.Infof("Solver %s has failed to reserve and buy the resources", req.NamespacedName.Name) - solver.SetPhase(nodecorev1alpha1.PhaseFailed, "Solver has failed to reserve the resources") - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - case nodecorev1alpha1.PhaseSolved: - klog.Infof("Solver %s has reserved and purchased the resources", req.NamespacedName.Name) - default: - solver.SetReserveAndBuyStatus(nodecorev1alpha1.PhaseIdle) - // Update the Solver status - if err := r.updateSolverStatus(ctx, &solver); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil +func (r *SolverReconciler) handleReserveAndBuy(ctx context.Context, req ctrl.Request, solver *nodecorev1alpha1.Solver) (ctrl.Result, error) { + reserveAndBuyStatus := solver.Status.ReserveAndBuy + switch reserveAndBuyStatus { + case nodecorev1alpha1.PhaseIdle: + var partition *nodecorev1alpha1.Partition + klog.Infof("Creating the Reservation %s", req.NamespacedName.Name) + // Create the Reservation + var pc advertisementv1alpha1.PeeringCandidate + pcNamespaceName := types.NamespacedName{Name: solver.Status.PeeringCandidate.Name, Namespace: solver.Status.PeeringCandidate.Namespace} + + // Get the PeeringCandidate from the Solver + if err := r.Get(ctx, pcNamespaceName, &pc); err != nil { + klog.Errorf("Error when getting PeeringCandidate %s: %s", solver.Status.PeeringCandidate.Name, err) + return ctrl.Result{}, err + } + + if solver.Spec.Selector != nil { + // Forge the Partition + partition = resourceforge.ForgePartition(solver.Spec.Selector) + } + + // Get the NodeIdentity + nodeIdentity := getters.GetNodeIdentity(ctx, r.Client) + + // Forge the Reservation + reservation := resourceforge.ForgeReservation(&pc, partition, *nodeIdentity) + if err := r.Client.Create(ctx, reservation); err != nil { + klog.Errorf("Error when creating Reservation for Solver %s: %s", solver.Name, err) + return ctrl.Result{}, err + } + + klog.Infof("Reservation %s created", reservation.Name) + + solver.SetReserveAndBuyStatus(nodecorev1alpha1.PhaseRunning) + solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Reservation created") + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseRunning: + // Check solver expiration + if tools.CheckExpiration(solver.Status.SolverPhase.LastChangeTime, flags.ExpirationPhaseRunning) { + klog.Infof("Solver %s has expired", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseTimeout, "Solver has expired before reserving the resources") + + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err } + return ctrl.Result{}, nil } - } else { - klog.Infof("Solver %s Solved : No need to reserve and buy the resources", req.NamespacedName.Name) - solver.SetPhase(nodecorev1alpha1.PhaseSolved, "No need to reserve and buy the resources") - err := r.updateSolverStatus(ctx, &solver) + + reservation := &reservationv1alpha1.Reservation{} + resNamespaceName := types.NamespacedName{Name: namings.ForgeReservationName(solver.Name), Namespace: flags.FluidoNamespace} + + // Get the Reservation + err := r.Get(ctx, resNamespaceName, reservation) + if client.IgnoreNotFound(err) != nil { + klog.Errorf("Error when getting Reservation for Solver %s: %s", solver.Name, err) + return ctrl.Result{}, err + } + + common.ReservationStatusCheck(solver, reservation) + + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseAllocating: + klog.Infof("Solver %s has reserved and purchased the resources, creating the Allocation", req.NamespacedName.Name) + // Create the Allocation + contractNamespaceName := types.NamespacedName{Name: solver.Status.Contract.Name, Namespace: solver.Status.Contract.Namespace} + contract := reservationv1alpha1.Contract{} + err := r.Client.Get(ctx, contractNamespaceName, &contract) if err != nil { + klog.Errorf("Error when getting Contract for Solver %s: %s", solver.Name, err) + return ctrl.Result{}, err + } + vnName := namings.ForgeVirtualNodeName(contract.Spec.SellerCredentials.ClusterName) + allocation := resourceforge.ForgeAllocation(&contract, solver.Name, vnName, nodecorev1alpha1.Local, nodecorev1alpha1.VirtualNode) + if err := r.Client.Create(ctx, allocation); err != nil { + klog.Errorf("Error when creating Allocation for Solver %s: %s", solver.Name, err) + return ctrl.Result{}, err + } + klog.Infof("Allocation %s created", allocation.Name) + solver.Status.Allocation = nodecorev1alpha1.GenericRef{ + Name: allocation.Name, + Namespace: allocation.Namespace, + } + solver.SetReserveAndBuyStatus(nodecorev1alpha1.PhaseSolved) + solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Allocation created") + solver.Status.Credentials = contract.Spec.SellerCredentials + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseFailed: + klog.Infof("Solver %s has failed to reserve and buy the resources", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseFailed, "Solver has failed to reserve the resources") + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + solver.SetReserveAndBuyStatus(nodecorev1alpha1.PhaseIdle) + // Update the Solver status + if err := r.updateSolverStatus(ctx, solver); err != nil { klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } return ctrl.Result{}, nil } +} - if solver.Spec.EnstablishPeering { - if reserveAndBuyStatus == nodecorev1alpha1.PhaseSolved { - // Peering phase to be implemented - klog.Infof("Solver %s Solved : Peering phase to be implemented", req.NamespacedName.Name) +func (r *SolverReconciler) handlePeering(ctx context.Context, req ctrl.Request, solver *nodecorev1alpha1.Solver) (ctrl.Result, error) { + peeringStatus := solver.Status.Peering + switch peeringStatus { + case nodecorev1alpha1.PhaseIdle: + klog.Infof("Solver %s is trying to enstablish a peering", req.NamespacedName.Name) + credentials := solver.Status.Credentials + _, err := virtualfabricmanager.PeerWithCluster(ctx, r.Client, credentials.ClusterID, + credentials.ClusterName, credentials.Endpoint, credentials.Token) + if err != nil { + klog.Errorf("Error when peering with cluster %s: %s", credentials.ClusterName, err) + solver.SetPeeringStatus(nodecorev1alpha1.PhaseFailed) + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, err } - } else { - klog.Infof("Solver %s Solved : No need to enstablish a peering", req.NamespacedName.Name) - solver.SetPhase(nodecorev1alpha1.PhaseSolved, "No need to enstablish a peering") - err := r.updateSolverStatus(ctx, &solver) + klog.Infof("Solver %s has started the peering with cluster %s", req.NamespacedName.Name, credentials.ClusterName) + solver.SetPeeringStatus(nodecorev1alpha1.PhaseRunning) + solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Solver is peering with cluster "+credentials.ClusterName) + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseRunning: + klog.Info("Checking peering status") + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, solver.Status.Credentials.ClusterID) if err != nil { + klog.Errorf("Error when getting ForeignCluster for Solver %s: %s", solver.Name, err) + solver.SetPeeringStatus(nodecorev1alpha1.PhaseFailed) + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, err + } + if fcutils.IsOutgoingJoined(fc) && + fcutils.IsAuthenticated(fc) && + fcutils.IsNetworkingEstablishedOrExternal(fc) && + !fcutils.IsUnpeered(fc) { + klog.Infof("Solver %s has enstablished a peering", req.NamespacedName.Name) + solver.SetPeeringStatus(nodecorev1alpha1.PhaseSolved) + solver.SetPhase(nodecorev1alpha1.PhaseSolved, "Solver has enstablished a peering") + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + klog.Infof("Solver %s is still peering", req.NamespacedName.Name) + // Check solver expiration + if tools.CheckExpiration(solver.Status.SolverPhase.LastChangeTime, flags.ExpirationPhaseRunning) { + klog.Infof("Solver %s has expired", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseTimeout, "Solver has expired before reserving the resources") + solver.SetPeeringStatus(nodecorev1alpha1.PhaseFailed) + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + return ctrl.Result{}, nil + case nodecorev1alpha1.PhaseFailed: + klog.Infof("Solver %s has failed to enstablish a peering", req.NamespacedName.Name) + solver.SetPhase(nodecorev1alpha1.PhaseFailed, "Solver has failed to enstablish a peering") + if err := r.updateSolverStatus(ctx, solver); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + default: + solver.SetPeeringStatus(nodecorev1alpha1.PhaseIdle) + // Update the Solver status + if err := r.updateSolverStatus(ctx, solver); err != nil { klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err } return ctrl.Result{}, nil } - - return ctrl.Result{}, nil } -func (r *SolverReconciler) searchPeeringCandidates(ctx context.Context, solver *nodecorev1alpha1.Solver) ([]advertisementv1alpha1.PeeringCandidate, error) { +func (r *SolverReconciler) searchPeeringCandidates(ctx context.Context, + solver *nodecorev1alpha1.Solver) ([]advertisementv1alpha1.PeeringCandidate, error) { pc := advertisementv1alpha1.PeeringCandidateList{} result := []advertisementv1alpha1.PeeringCandidate{} @@ -352,14 +485,16 @@ func (r *SolverReconciler) searchPeeringCandidates(ctx context.Context, solver * // Filter the reserved PeeringCandidates filtered := []advertisementv1alpha1.PeeringCandidate{} - for _, p := range pc.Items { + for i := range pc.Items { + p := pc.Items[i] if !p.Spec.Reserved && p.Spec.SolverID == "" { filtered = append(filtered, p) } } // Filter the list of PeeringCandidates based on the Flavour Selector - for _, p := range filtered { + for i := range filtered { + p := filtered[i] res := common.FilterPeeringCandidate(selector, &p) if res { result = append(result, p) @@ -369,13 +504,15 @@ func (r *SolverReconciler) searchPeeringCandidates(ctx context.Context, solver * return result, nil } -// TODO: unify this logic with the one of the discovery controller -func (r *SolverReconciler) selectAndBookPeeringCandidate(ctx context.Context, solver *nodecorev1alpha1.Solver, pcList []advertisementv1alpha1.PeeringCandidate) (*advertisementv1alpha1.PeeringCandidate, error) { +// TODO: unify this logic with the one of the discovery controller. +func (r *SolverReconciler) selectAndBookPeeringCandidate(ctx context.Context, + solver *nodecorev1alpha1.Solver, pcList []advertisementv1alpha1.PeeringCandidate) (*advertisementv1alpha1.PeeringCandidate, error) { // Select the first PeeringCandidate var selected *advertisementv1alpha1.PeeringCandidate - for _, pc := range pcList { + for i := range pcList { + pc := pcList[i] // Select the first PeeringCandidate that is not reserved if !pc.Spec.Reserved && pc.Spec.SolverID == "" { // Book the PeeringCandidate @@ -417,12 +554,13 @@ func (r *SolverReconciler) createOrGetDiscovery(ctx context.Context, solver *nod discovery := &advertisementv1alpha1.Discovery{} // Get the Discovery - if err := r.Get(ctx, types.NamespacedName{Name: namings.ForgeDiscoveryName(solver.Name), Namespace: flags.FLUIDOS_NAMESPACE}, discovery); client.IgnoreNotFound(err) != nil { + if err := r.Get(ctx, types.NamespacedName{Name: namings.ForgeDiscoveryName(solver.Name), + Namespace: flags.FluidoNamespace}, discovery); client.IgnoreNotFound(err) != nil { klog.Errorf("Error when getting Discovery for Solver %s: %s", solver.Name, err) return nil, err } else if err != nil { // Create the Discovery - discovery := resourceforge.ForgeDiscovery(solver.Spec.Selector, solver.Name) + discovery = resourceforge.ForgeDiscovery(solver.Spec.Selector, solver.Name) if err := r.Client.Create(ctx, discovery); err != nil { klog.Errorf("Error when creating Discovery for Solver %s: %s", solver.Name, err) return nil, err @@ -445,6 +583,9 @@ func (r *SolverReconciler) SetupWithManager(mgr ctrl.Manager) error { Watches(&reservationv1alpha1.Reservation{}, handler.EnqueueRequestsFromMapFunc( r.reservationToSolver, ), builder.WithPredicates(reservationPredicate())). + Watches(&nodecorev1alpha1.Allocation{}, handler.EnqueueRequestsFromMapFunc( + r.allocationToSolver, + ), builder.WithPredicates(allocationPredicate())). Complete(r) } @@ -466,25 +607,48 @@ func reservationPredicate() predicate.Predicate { } } -func (r *SolverReconciler) discoveryToSolver(ctx context.Context, o client.Object) []reconcile.Request { +func allocationPredicate() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return (e.ObjectNew.(*nodecorev1alpha1.Allocation).Status.Status == nodecorev1alpha1.Active || + e.ObjectNew.(*nodecorev1alpha1.Allocation).Status.Status == nodecorev1alpha1.Released) && + e.ObjectNew.(*nodecorev1alpha1.Allocation).Spec.Type == nodecorev1alpha1.VirtualNode && + e.ObjectNew.(*nodecorev1alpha1.Allocation).Spec.IntentID != "" + }, + } +} + +func (r *SolverReconciler) discoveryToSolver(_ context.Context, o client.Object) []reconcile.Request { solverName := namings.RetrieveSolverNameFromDiscovery(o.GetName()) return []reconcile.Request{ { NamespacedName: types.NamespacedName{ Name: solverName, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, }, } } -func (r *SolverReconciler) reservationToSolver(ctx context.Context, o client.Object) []reconcile.Request { +func (r *SolverReconciler) reservationToSolver(_ context.Context, o client.Object) []reconcile.Request { solverName := namings.RetrieveSolverNameFromReservation(o.GetName()) return []reconcile.Request{ { NamespacedName: types.NamespacedName{ Name: solverName, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, + }, + }, + } +} + +func (r *SolverReconciler) allocationToSolver(_ context.Context, o client.Object) []reconcile.Request { + solverName := o.(*nodecorev1alpha1.Allocation).Spec.IntentID + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: solverName, + Namespace: flags.FluidoNamespace, }, }, } diff --git a/pkg/utils/common/common.go b/pkg/utils/common/common.go index d10f540..58e3439 100644 --- a/pkg/utils/common/common.go +++ b/pkg/utils/common/common.go @@ -27,119 +27,132 @@ import ( "github.com/fluidos-project/node/pkg/utils/parseutil" ) -// FilterFlavoursBySelector returns the Flavour CRs in the cluster that match the selector +// FilterFlavoursBySelector returns the Flavour CRs in the cluster that match the selector. func FilterFlavoursBySelector(flavours []nodecorev1alpha1.Flavour, selector *models.Selector) ([]nodecorev1alpha1.Flavour, error) { var flavoursSelected []nodecorev1alpha1.Flavour // Get the Flavours that match the selector - for _, f := range flavours { + for i := range flavours { + f := flavours[i] if string(f.Spec.Type) == selector.FlavourType { // filter function - if FilterFlavour(selector, f) { + if FilterFlavour(selector, &f) { flavoursSelected = append(flavoursSelected, f) } - } } return flavoursSelected, nil } -// filterFlavour filters the Flavour CRs in the cluster that match the selector -func FilterFlavour(selector *models.Selector, f nodecorev1alpha1.Flavour) bool { - +// FilterFlavour filters the Flavour CRs in the cluster that match the selector. +func FilterFlavour(selector *models.Selector, f *nodecorev1alpha1.Flavour) bool { if f.Spec.Characteristics.Architecture != selector.Architecture { klog.Infof("Flavour %s has different architecture: %s - Selector: %s", f.Name, f.Spec.Characteristics.Architecture, selector.Architecture) return false } if selector.MatchSelector != nil { - if selector.MatchSelector.Cpu.CmpInt64(0) == 0 && f.Spec.Characteristics.Cpu.Cmp(selector.MatchSelector.Cpu) != 0 { - klog.Infof("MatchSelector Cpu: %d - Flavour Cpu: %d", selector.MatchSelector.Cpu, f.Spec.Characteristics.Cpu) + if !filterByMatchSelector(selector, f) { return false } + } - if selector.MatchSelector.Memory.CmpInt64(0) == 0 && f.Spec.Characteristics.Memory.Cmp(selector.MatchSelector.Memory) != 0 { - klog.Infof("MatchSelector Memory: %d - Flavour Memory: %d", selector.MatchSelector.Memory, f.Spec.Characteristics.Memory) + if selector.RangeSelector != nil && selector.MatchSelector == nil { + if !filterByRangeSelector(selector, f) { return false } + } - if selector.MatchSelector.EphemeralStorage.CmpInt64(0) == 0 && f.Spec.Characteristics.EphemeralStorage.Cmp(selector.MatchSelector.EphemeralStorage) != 0 { - klog.Infof("MatchSelector EphemeralStorage: %d - Flavour EphemeralStorage: %d", selector.MatchSelector.EphemeralStorage, f.Spec.Characteristics.EphemeralStorage) - return false - } + return true +} - if selector.MatchSelector.Storage.CmpInt64(0) == 0 && f.Spec.Characteristics.PersistentStorage.Cmp(selector.MatchSelector.Storage) != 0 { - klog.Infof("MatchSelector Storage: %d - Flavour Storage: %d", selector.MatchSelector.Storage, f.Spec.Characteristics.PersistentStorage) - return false - } +func filterByMatchSelector(selector *models.Selector, f *nodecorev1alpha1.Flavour) bool { + if selector.MatchSelector.CPU.CmpInt64(0) == 0 && f.Spec.Characteristics.Cpu.Cmp(selector.MatchSelector.CPU) != 0 { + klog.Infof("MatchSelector Cpu: %d - Flavour Cpu: %d", selector.MatchSelector.CPU, f.Spec.Characteristics.Cpu) + return false + } - if selector.MatchSelector.Gpu.CmpInt64(0) == 0 && f.Spec.Characteristics.Gpu.Cmp(selector.MatchSelector.Gpu) != 0 { - klog.Infof("MatchSelector GPU: %d - Flavour GPU: %d", selector.MatchSelector.Gpu, f.Spec.Characteristics.Gpu) - return false - } + if selector.MatchSelector.Memory.CmpInt64(0) == 0 && f.Spec.Characteristics.Memory.Cmp(selector.MatchSelector.Memory) != 0 { + klog.Infof("MatchSelector Memory: %d - Flavour Memory: %d", selector.MatchSelector.Memory, f.Spec.Characteristics.Memory) + return false } - if selector.RangeSelector != nil && selector.MatchSelector == nil { + if selector.MatchSelector.EphemeralStorage.CmpInt64(0) == 0 && + f.Spec.Characteristics.EphemeralStorage.Cmp(selector.MatchSelector.EphemeralStorage) != 0 { + klog.Infof("MatchSelector EphemeralStorage: %d - Flavour EphemeralStorage: %d", + selector.MatchSelector.EphemeralStorage, f.Spec.Characteristics.EphemeralStorage) + return false + } - if selector.RangeSelector.MinCpu.CmpInt64(0) != 0 && f.Spec.Characteristics.Cpu.Cmp(selector.RangeSelector.MinCpu) < 0 { - klog.Infof("RangeSelector MinCpu: %d - Flavour Cpu: %d", selector.RangeSelector.MinCpu, f.Spec.Characteristics.Cpu) - return false - } + if selector.MatchSelector.Storage.CmpInt64(0) == 0 && f.Spec.Characteristics.PersistentStorage.Cmp(selector.MatchSelector.Storage) != 0 { + klog.Infof("MatchSelector Storage: %d - Flavour Storage: %d", selector.MatchSelector.Storage, f.Spec.Characteristics.PersistentStorage) + return false + } - if selector.RangeSelector.MinMemory.CmpInt64(0) != 0 && f.Spec.Characteristics.Memory.Cmp(selector.RangeSelector.MinMemory) < 0 { - klog.Infof("RangeSelector MinMemory: %d - Flavour Memory: %d", selector.RangeSelector.MinMemory, f.Spec.Characteristics.Memory) - return false - } + if selector.MatchSelector.Gpu.CmpInt64(0) == 0 && f.Spec.Characteristics.Gpu.Cmp(selector.MatchSelector.Gpu) != 0 { + klog.Infof("MatchSelector GPU: %d - Flavour GPU: %d", selector.MatchSelector.Gpu, f.Spec.Characteristics.Gpu) + return false + } + return true +} - if selector.RangeSelector.MinEph.CmpInt64(0) != 0 && f.Spec.Characteristics.EphemeralStorage.Cmp(selector.RangeSelector.MinEph) < 0 { - klog.Infof("RangeSelector MinEph: %d - Flavour EphemeralStorage: %d", selector.RangeSelector.MinEph, f.Spec.Characteristics.EphemeralStorage) - return false - } +func filterByRangeSelector(selector *models.Selector, f *nodecorev1alpha1.Flavour) bool { + if selector.RangeSelector.MinCPU.CmpInt64(0) != 0 && f.Spec.Characteristics.Cpu.Cmp(selector.RangeSelector.MinCPU) < 0 { + klog.Infof("RangeSelector MinCpu: %d - Flavour Cpu: %d", selector.RangeSelector.MinCPU, f.Spec.Characteristics.Cpu) + return false + } - if selector.RangeSelector.MinStorage.CmpInt64(0) != 0 && f.Spec.Characteristics.PersistentStorage.Cmp(selector.RangeSelector.MinStorage) < 0 { - klog.Infof("RangeSelector MinStorage: %d - Flavour Storage: %d", selector.RangeSelector.MinStorage, f.Spec.Characteristics.PersistentStorage) - return false - } + if selector.RangeSelector.MinMemory.CmpInt64(0) != 0 && f.Spec.Characteristics.Memory.Cmp(selector.RangeSelector.MinMemory) < 0 { + klog.Infof("RangeSelector MinMemory: %d - Flavour Memory: %d", selector.RangeSelector.MinMemory, f.Spec.Characteristics.Memory) + return false + } - if selector.RangeSelector.MinGpu.CmpInt64(0) != 0 && f.Spec.Characteristics.Gpu.Cmp(selector.RangeSelector.MinGpu) < 0 { - return false - } + if selector.RangeSelector.MinEph.CmpInt64(0) != 0 && f.Spec.Characteristics.EphemeralStorage.Cmp(selector.RangeSelector.MinEph) < 0 { + klog.Infof("RangeSelector MinEph: %d - Flavour EphemeralStorage: %d", selector.RangeSelector.MinEph, f.Spec.Characteristics.EphemeralStorage) + return false + } - if selector.RangeSelector.MaxCpu.CmpInt64(0) != 0 && f.Spec.Characteristics.Cpu.Cmp(selector.RangeSelector.MaxCpu) > 0 { - return false - } + if selector.RangeSelector.MinStorage.CmpInt64(0) != 0 && f.Spec.Characteristics.PersistentStorage.Cmp(selector.RangeSelector.MinStorage) < 0 { + klog.Infof("RangeSelector MinStorage: %d - Flavour Storage: %d", selector.RangeSelector.MinStorage, f.Spec.Characteristics.PersistentStorage) + return false + } - if selector.RangeSelector.MaxMemory.CmpInt64(0) != 0 && f.Spec.Characteristics.Memory.Cmp(selector.RangeSelector.MaxMemory) > 0 { - return false - } + if selector.RangeSelector.MinGpu.CmpInt64(0) != 0 && f.Spec.Characteristics.Gpu.Cmp(selector.RangeSelector.MinGpu) < 0 { + return false + } - if selector.RangeSelector.MaxEph.CmpInt64(0) != 0 && f.Spec.Characteristics.EphemeralStorage.Cmp(selector.RangeSelector.MaxEph) > 0 { - return false - } + if selector.RangeSelector.MaxCPU.CmpInt64(0) != 0 && f.Spec.Characteristics.Cpu.Cmp(selector.RangeSelector.MaxCPU) > 0 { + return false + } - if selector.RangeSelector.MaxStorage.CmpInt64(0) != 0 && f.Spec.Characteristics.PersistentStorage.Cmp(selector.RangeSelector.MaxStorage) > 0 { - return false - } + if selector.RangeSelector.MaxMemory.CmpInt64(0) != 0 && f.Spec.Characteristics.Memory.Cmp(selector.RangeSelector.MaxMemory) > 0 { + return false + } - if selector.RangeSelector.MaxGpu.CmpInt64(0) != 0 && f.Spec.Characteristics.Gpu.Cmp(selector.RangeSelector.MaxGpu) > 0 { - return false - } + if selector.RangeSelector.MaxEph.CmpInt64(0) != 0 && f.Spec.Characteristics.EphemeralStorage.Cmp(selector.RangeSelector.MaxEph) > 0 { + return false + } + + if selector.RangeSelector.MaxStorage.CmpInt64(0) != 0 && f.Spec.Characteristics.PersistentStorage.Cmp(selector.RangeSelector.MaxStorage) > 0 { + return false } + if selector.RangeSelector.MaxGpu.CmpInt64(0) != 0 && f.Spec.Characteristics.Gpu.Cmp(selector.RangeSelector.MaxGpu) > 0 { + return false + } return true } -// FilterPeeringCandidate filters the peering candidate based on the solver's flavour selector +// FilterPeeringCandidate filters the peering candidate based on the solver's flavour selector. func FilterPeeringCandidate(selector *nodecorev1alpha1.FlavourSelector, pc *advertisementv1alpha1.PeeringCandidate) bool { s := parseutil.ParseFlavourSelector(selector) - return FilterFlavour(s, pc.Spec.Flavour) + return FilterFlavour(s, &pc.Spec.Flavour) } // CheckSelector ia a func to check if the syntax of the Selector is right. -// Strict and range syntax cannot be used together +// Strict and range syntax cannot be used together. func CheckSelector(selector *models.Selector) error { - if selector.MatchSelector != nil && selector.RangeSelector != nil { return fmt.Errorf("selector syntax error: strict and range syntax cannot be used together") } @@ -148,7 +161,7 @@ func CheckSelector(selector *models.Selector) error { // SOLVER PHASE SETTERS -// DiscoveryStatusCheck checks the status of the discovery +// DiscoveryStatusCheck checks the status of the discovery. func DiscoveryStatusCheck(solver *nodecorev1alpha1.Solver, discovery *advertisementv1alpha1.Discovery) { if discovery.Status.Phase.Phase == nodecorev1alpha1.PhaseSolved { klog.Infof("Discovery %s has found a candidate: %s", discovery.Name, discovery.Status.PeeringCandidate) @@ -179,13 +192,15 @@ func DiscoveryStatusCheck(solver *nodecorev1alpha1.Solver, discovery *advertisem } } +// ReservationStatusCheck checks the status of the reservation. func ReservationStatusCheck(solver *nodecorev1alpha1.Solver, reservation *reservationv1alpha1.Reservation) { klog.Infof("Reservation %s is in phase %s", reservation.Name, reservation.Status.Phase.Phase) flavourName := namings.RetrieveFlavourNameFromPC(reservation.Spec.PeeringCandidate.Name) if reservation.Status.Phase.Phase == nodecorev1alpha1.PhaseSolved { klog.Infof("Reservation %s has reserved and purchase the flavour %s", reservation.Name, flavourName) solver.Status.ReservationPhase = nodecorev1alpha1.PhaseSolved - solver.Status.ReserveAndBuy = nodecorev1alpha1.PhaseSolved + solver.Status.ReserveAndBuy = nodecorev1alpha1.PhaseAllocating + solver.Status.Contract = reservation.Status.Contract solver.SetPhase(nodecorev1alpha1.PhaseRunning, "Reservation: Flavour reserved and purchased") } if reservation.Status.Phase.Phase == nodecorev1alpha1.PhaseFailed { diff --git a/pkg/utils/common/doc.go b/pkg/utils/common/doc.go index c2ad6de..243015a 100644 --- a/pkg/utils/common/doc.go +++ b/pkg/utils/common/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package common contains common functions used by other packagess +// Package common contains common functions used by other packagess package common diff --git a/pkg/utils/consts/doc.go b/pkg/utils/consts/doc.go index e8a786b..588750f 100644 --- a/pkg/utils/consts/doc.go +++ b/pkg/utils/consts/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package consts contains the constants used in the FLUIDOS and some miscellaneous ones. +// Package consts contains the constants used in the FLUIDOS and some miscellaneous ones. package consts diff --git a/pkg/utils/flags/flags.go b/pkg/utils/flags/flags.go index 32eddd6..54bb7aa 100644 --- a/pkg/utils/flags/flags.go +++ b/pkg/utils/flags/flags.go @@ -16,36 +16,38 @@ package flags import "time" -// NAMESPACES flags +// Namespace flags. var ( - FLUIDOS_NAMESPACE string = "fluidos" + FluidoNamespace = "fluidos" ) -// EXPIRATION flags +// Expiration/Time flags. var ( - EXPIRATION_PHASE_RUNNING = 2 * time.Minute - EXPIRATION_SOLVER = 5 * time.Minute - EXPIRATION_TRANSACTION = 20 * time.Second - EXPIRATION_CONTRACT = 365 * 24 * time.Hour - REFRESH_CACHE_INTERVAL = 20 * time.Second - LIQO_CHECK_INTERVAL = 20 * time.Second + ExpirationPhaseRunning = 2 * time.Minute + ExpirationSolver = 5 * time.Minute + ExpirationTransaction = 20 * time.Second + ExpirationContract = 365 * 24 * time.Hour + RefreshCacheInterval = 20 * time.Second + LiqoCheckInterval = 20 * time.Second ) +// Configs flags. var ( - HTTP_PORT string - GRPC_PORT string - RESOURCE_NODE_LABEL string + HTTPPort string + GRPCPort string + ResourceNodeLabel string ) +// Customization flags. var ( - RESOURCE_TYPE string - AMOUNT string - CURRENCY string - PERIOD string - CPU_MIN string - MEMORY_MIN string - CPU_STEP string - MEMORY_STEP string - MIN_COUNT int64 - MAX_COUNT int64 + ResourceType string + AMOUNT string + CURRENCY string + PERIOD string + CPUMin string + MemoryMin string + CPUStep string + MemoryStep string + MinCount int64 + MaxCount int64 ) diff --git a/pkg/utils/getters/doc.go b/pkg/utils/getters/doc.go new file mode 100644 index 0000000..98b9988 --- /dev/null +++ b/pkg/utils/getters/doc.go @@ -0,0 +1,16 @@ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package getters contains functions to get data from the system. +package getters diff --git a/pkg/utils/getters/getters.go b/pkg/utils/getters/getters.go index 76abe53..28d8660 100644 --- a/pkg/utils/getters/getters.go +++ b/pkg/utils/getters/getters.go @@ -27,20 +27,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" - reservationv1alpha1 "github.com/fluidos-project/node/apis/reservation/v1alpha1" "github.com/fluidos-project/node/pkg/utils/consts" "github.com/fluidos-project/node/pkg/utils/flags" ) // GetNodeIdentity retrieves the list of local providers ip addresses from the Network Manager configMap. func GetNodeIdentity(ctx context.Context, cl client.Client) *nodecorev1alpha1.NodeIdentity { - cm := &corev1.ConfigMap{} // Get the node identity err := cl.Get(ctx, types.NamespacedName{ Name: consts.NodeIdentityConfigMapName, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, cm) if err != nil { klog.Errorf("Error getting the configmap: %s", err) @@ -61,7 +59,7 @@ func GetLocalProviders(ctx context.Context, cl client.Client) []string { // Get the configmap err := cl.Get(ctx, types.NamespacedName{ Name: consts.NetworkConfigMapName, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, cm) if err != nil { klog.Errorf("Error getting the configmap: %s", err) @@ -71,7 +69,7 @@ func GetLocalProviders(ctx context.Context, cl client.Client) []string { } // GetLiqoCredentials retrieves the Liqo credentials from the local cluster. -func GetLiqoCredentials(ctx context.Context, cl client.Client) (*reservationv1alpha1.LiqoCredentials, error) { +func GetLiqoCredentials(ctx context.Context, cl client.Client) (*nodecorev1alpha1.LiqoCredentials, error) { localToken, err := auth.GetToken(ctx, cl, consts.LiqoNamespace) if err != nil { return nil, err @@ -93,10 +91,24 @@ func GetLiqoCredentials(ctx context.Context, cl client.Client) (*reservationv1al clusterIdentity.ClusterName = clusterIdentity.ClusterID } - return &reservationv1alpha1.LiqoCredentials{ + return &nodecorev1alpha1.LiqoCredentials{ ClusterName: clusterIdentity.ClusterName, ClusterID: clusterIdentity.ClusterID, Endpoint: authEP, Token: localToken, }, nil } + +// GetAllocationNameByClusterIDSpec retrieves the name of the allocation with the given in its Specs. +func GetAllocationNameByClusterIDSpec(ctx context.Context, c client.Client, clusterID string) *string { + allocationList := nodecorev1alpha1.AllocationList{} + if err := c.List(ctx, &allocationList, client.MatchingFields{"spec.remoteClusterID": clusterID}); err != nil { + klog.Infof("Error when getting the allocation list: %s", err) + return nil + } + if len(allocationList.Items) == 0 { + klog.Infof("No Allocations found with the ClusterID %s", clusterID) + return nil + } + return &allocationList.Items[0].Name +} diff --git a/pkg/utils/models/doc.go b/pkg/utils/models/doc.go index 3c61a24..45fc5cf 100644 --- a/pkg/utils/models/doc.go +++ b/pkg/utils/models/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package models contains shared models for the FLUIDOS environment. +// Package models contains shared models for the FLUIDOS environment. package models diff --git a/pkg/utils/models/gateway.go b/pkg/utils/models/gateway.go index 364a03b..15ffcb1 100644 --- a/pkg/utils/models/gateway.go +++ b/pkg/utils/models/gateway.go @@ -14,18 +14,18 @@ package models -// PurchaseRequest is the request model for purchasing a Flavour +// PurchaseRequest is the request model for purchasing a Flavour. type PurchaseRequest struct { TransactionID string `json:"transactionID"` } -// ResponsePurchase contain information after purchase a Flavour +// ResponsePurchase contain information after purchase a Flavour. type ResponsePurchase struct { Contract Contract `json:"contract"` Status string `json:"status"` } -// ReserveRequest is the request model for reserving a Flavour +// ReserveRequest is the request model for reserving a Flavour. type ReserveRequest struct { FlavourID string `json:"flavourID"` Buyer NodeIdentity `json:"buyerID"` diff --git a/pkg/utils/models/local-resource-manager.go b/pkg/utils/models/local-resource-manager.go index ac78cc2..809064d 100644 --- a/pkg/utils/models/local-resource-manager.go +++ b/pkg/utils/models/local-resource-manager.go @@ -18,7 +18,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" ) -// NodeInfo represents a node and its resources +// NodeInfo represents a node and its resources. type NodeInfo struct { UID string `json:"uid"` Name string `json:"name"` @@ -27,7 +27,7 @@ type NodeInfo struct { ResourceMetrics ResourceMetrics `json:"resources"` } -// ResourceMetrics represents resources of a certain node +// ResourceMetrics represents resources of a certain node. type ResourceMetrics struct { CPUTotal resource.Quantity `json:"totalCPU"` CPUAvailable resource.Quantity `json:"availableCPU"` diff --git a/pkg/utils/models/models.go b/pkg/utils/models/models.go index a741faf..e48c254 100644 --- a/pkg/utils/models/models.go +++ b/pkg/utils/models/models.go @@ -83,6 +83,7 @@ type OptionalFields struct { WorkerID string `json:"workerID"` } +// Selector represents the criteria for selecting Flavours. type Selector struct { FlavourType string `json:"type,omitempty"` Architecture string `json:"architecture,omitempty"` @@ -92,7 +93,7 @@ type Selector struct { // MatchSelector represents the criteria for selecting Flavours through a strict match. type MatchSelector struct { - Cpu resource.Quantity `json:"cpu,omitempty"` + CPU resource.Quantity `json:"cpu,omitempty"` Memory resource.Quantity `json:"memory,omitempty"` Storage resource.Quantity `json:"storage,omitempty"` EphemeralStorage resource.Quantity `json:"ephemeralStorage,omitempty"` @@ -101,12 +102,12 @@ type MatchSelector struct { // RangeSelector represents the criteria for selecting Flavours through a range. type RangeSelector struct { - MinCpu resource.Quantity `json:"minCpu,omitempty"` + MinCPU resource.Quantity `json:"minCpu,omitempty"` MinMemory resource.Quantity `json:"minMemory,omitempty"` MinStorage resource.Quantity `json:"minStorage,omitempty"` MinEph resource.Quantity `json:"minEph,omitempty"` MinGpu resource.Quantity `json:"minGpu,omitempty"` - MaxCpu resource.Quantity `json:"maxCpu,omitempty"` + MaxCPU resource.Quantity `json:"maxCpu,omitempty"` MaxMemory resource.Quantity `json:"maxMemory,omitempty"` MaxStorage resource.Quantity `json:"maxStorage,omitempty"` MaxEph resource.Quantity `json:"maxEph,omitempty"` diff --git a/pkg/utils/models/reservation.go b/pkg/utils/models/reservation.go index 7b7e5af..192e1fb 100644 --- a/pkg/utils/models/reservation.go +++ b/pkg/utils/models/reservation.go @@ -16,17 +16,17 @@ package models import "k8s.io/apimachinery/pkg/api/resource" -// Partition represents the partitioning properties of a Flavour +// Partition represents the partitioning properties of a Flavour. type Partition struct { Architecture string `json:"architecture"` - Cpu resource.Quantity `json:"cpu"` + CPU resource.Quantity `json:"cpu"` Memory resource.Quantity `json:"memory"` EphemeralStorage resource.Quantity `json:"ephemeral-storage,omitempty"` Gpu resource.Quantity `json:"gpu,omitempty"` Storage resource.Quantity `json:"storage,omitempty"` } -// Transaction contains information regarding the transaction for a flavour +// Transaction contains information regarding the transaction for a flavour. type Transaction struct { TransactionID string `json:"transactionID"` FlavourID string `json:"flavourID"` @@ -36,7 +36,7 @@ type Transaction struct { StartTime string `json:"startTime"` } -// Contract represents a Contract object with its characteristics +// Contract represents a Contract object with its characteristics. type Contract struct { ContractID string `json:"contractID"` TransactionID string `json:"transactionID"` diff --git a/pkg/utils/namings/doc.go b/pkg/utils/namings/doc.go index c53c480..cb185f3 100644 --- a/pkg/utils/namings/doc.go +++ b/pkg/utils/namings/doc.go @@ -12,6 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package namings contains different naming functions -// used into the FLUIDOS environment. +// Package namings contains different naming functions used into the FLUIDOS environment. package namings diff --git a/pkg/utils/namings/namings.go b/pkg/utils/namings/namings.go index effafcc..36818e7 100644 --- a/pkg/utils/namings/namings.go +++ b/pkg/utils/namings/namings.go @@ -27,46 +27,59 @@ import ( "github.com/fluidos-project/node/pkg/utils/flags" ) -// ForgeContractName creates a name for the Contract CR +// ForgeVirtualNodeName creates a name for the VirtualNode starting from the cluster name of the remote cluster. +func ForgeVirtualNodeName(clusterName string) string { + return fmt.Sprintf("liqo-%s", clusterName) +} + +// ForgeContractName creates a name for the Contract. func ForgeContractName(flavourID string) string { hash := ForgeHashString(flavourID, 4) return fmt.Sprintf("contract-%s-%s", flavourID, hash) } -// ForgePeeringCandidateName generates a name for the PeeringCandidate +// ForgeAllocationName generates a name for the Allocation. +func ForgeAllocationName(flavourID string) string { + hash := ForgeHashString(flavourID, 4) + return fmt.Sprintf("allocation-%s-%s", flavourID, hash) +} + +// ForgePeeringCandidateName generates a name for the PeeringCandidate. func ForgePeeringCandidateName(flavourID string) string { return fmt.Sprintf("peeringcandidate-%s", flavourID) } -// ForgeReservationName generates a name for the Reservation +// ForgeReservationName generates a name for the Reservation. func ForgeReservationName(solverID string) string { return fmt.Sprintf("reservation-%s", solverID) } -// ForgeFlavourName returns the name of the flavour following the pattern Domain-Type-rand(4) -func ForgeFlavourName(WorkerID, domain string) string { - rand, err := ForgeRandomString() +// ForgeFlavourName returns the name of the flavour following the pattern Domain-Type-rand(4). +func ForgeFlavourName(workerID, domain string) string { + r, err := ForgeRandomString() if err != nil { klog.Errorf("Error when generating random string: %s", err) } - return domain + "-" + flags.RESOURCE_TYPE + "-" + ForgeHashString(WorkerID+rand, 8) + return domain + "-" + flags.ResourceType + "-" + ForgeHashString(workerID+r, 8) } -// ForgeDiscoveryName returns the name of the discovery following the pattern solverID-discovery +// ForgeDiscoveryName returns the name of the discovery following the pattern solverID-discovery. func ForgeDiscoveryName(solverID string) string { return fmt.Sprintf("discovery-%s", solverID) } +// RetrieveSolverNameFromDiscovery retrieves the solver name from the discovery name. func RetrieveSolverNameFromDiscovery(discoveryName string) string { return strings.TrimPrefix(discoveryName, "discovery-") } +// RetrieveSolverNameFromReservation retrieves the solver name from the reservation name. func RetrieveSolverNameFromReservation(reservationName string) string { return strings.TrimPrefix(reservationName, "reservation-") } -// ForgeTransactionID Generates a unique transaction ID using the current timestamp +// ForgeTransactionID Generates a unique transaction ID using the current timestamp. func ForgeTransactionID() (string, error) { // Convert the random bytes to a hexadecimal string transactionID, err := ForgeRandomString() @@ -80,12 +93,12 @@ func ForgeTransactionID() (string, error) { return transactionID, nil } -// RetrieveFlavourNameFromPC generates a name for the Flavour from the PeeringCandidate +// RetrieveFlavourNameFromPC generates a name for the Flavour from the PeeringCandidate. func RetrieveFlavourNameFromPC(pcName string) string { return strings.TrimPrefix(pcName, "peeringcandidate-") } -// ForgePrefixClientID generates a prefix for the client ID +// ForgeRandomString generates a random string of 16 bytes. func ForgeRandomString() (string, error) { randomBytes := make([]byte, 16) _, err := rand.Read(randomBytes) @@ -97,11 +110,11 @@ func ForgeRandomString() (string, error) { return randomString, nil } -// ForgeHashString computes SHA-256 Hash of the NodeUID -func ForgeHashString(input string, lenght int) string { +// ForgeHashString computes SHA-256 Hash of the NodeUID. +func ForgeHashString(input string, length int) string { hash := sha256.Sum256([]byte(input)) hashString := hex.EncodeToString(hash[:]) - uniqueString := hashString[:lenght] + uniqueString := hashString[:length] return uniqueString } diff --git a/pkg/utils/parseutil/parseutil.go b/pkg/utils/parseutil/parseutil.go index bbd7a2d..18ccae4 100644 --- a/pkg/utils/parseutil/parseutil.go +++ b/pkg/utils/parseutil/parseutil.go @@ -23,14 +23,15 @@ import ( ) // ParseFlavourSelector parses FlavourSelector into a Selector. -func ParseFlavourSelector(selector *nodecorev1alpha1.FlavourSelector) (s *models.Selector) { - - s.Architecture = selector.Architecture - s.FlavourType = string(selector.FlavourType) +func ParseFlavourSelector(selector *nodecorev1alpha1.FlavourSelector) *models.Selector { + s := &models.Selector{ + Architecture: selector.Architecture, + FlavourType: selector.FlavourType, + } if selector.MatchSelector != nil { s.MatchSelector = &models.MatchSelector{ - Cpu: selector.MatchSelector.Cpu, + CPU: selector.MatchSelector.Cpu, Memory: selector.MatchSelector.Memory, EphemeralStorage: selector.MatchSelector.EphemeralStorage, Storage: selector.MatchSelector.Storage, @@ -40,12 +41,12 @@ func ParseFlavourSelector(selector *nodecorev1alpha1.FlavourSelector) (s *models if selector.RangeSelector != nil { s.RangeSelector = &models.RangeSelector{ - MinCpu: selector.RangeSelector.MinCpu, + MinCPU: selector.RangeSelector.MinCpu, MinMemory: selector.RangeSelector.MinMemory, MinEph: selector.RangeSelector.MinEph, MinStorage: selector.RangeSelector.MinStorage, MinGpu: selector.RangeSelector.MinGpu, - MaxCpu: selector.RangeSelector.MaxCpu, + MaxCPU: selector.RangeSelector.MaxCpu, MaxMemory: selector.RangeSelector.MaxMemory, MaxEph: selector.RangeSelector.MaxEph, MaxStorage: selector.RangeSelector.MaxStorage, @@ -53,13 +54,13 @@ func ParseFlavourSelector(selector *nodecorev1alpha1.FlavourSelector) (s *models } } - return + return s } // ParsePartition creates a Partition Object from a Partition CR. func ParsePartition(partition *nodecorev1alpha1.Partition) *models.Partition { return &models.Partition{ - Cpu: partition.CPU, + CPU: partition.CPU, Memory: partition.Memory, EphemeralStorage: partition.EphemeralStorage, Storage: partition.Storage, @@ -71,7 +72,7 @@ func ParsePartition(partition *nodecorev1alpha1.Partition) *models.Partition { func ParsePartitionFromObj(partition *models.Partition) *nodecorev1alpha1.Partition { return &nodecorev1alpha1.Partition{ Architecture: partition.Architecture, - CPU: partition.Cpu, + CPU: partition.CPU, Memory: partition.Memory, Gpu: partition.Gpu, Storage: partition.Storage, @@ -89,8 +90,8 @@ func ParseNodeIdentity(node nodecorev1alpha1.NodeIdentity) models.NodeIdentity { } // ParseFlavour creates a Flavour Object from a Flavour CR. -func ParseFlavour(flavour nodecorev1alpha1.Flavour) models.Flavour { - return models.Flavour{ +func ParseFlavour(flavour *nodecorev1alpha1.Flavour) *models.Flavour { + return &models.Flavour{ FlavourID: flavour.Name, Type: string(flavour.Spec.Type), ProviderID: flavour.Spec.ProviderID, @@ -137,10 +138,10 @@ func ParseFlavour(flavour nodecorev1alpha1.Flavour) models.Flavour { } // ParseContract creates a Contract Object. -func ParseContract(contract *reservationv1alpha1.Contract) models.Contract { - return models.Contract{ +func ParseContract(contract *reservationv1alpha1.Contract) *models.Contract { + return &models.Contract{ ContractID: contract.Name, - Flavour: ParseFlavour(contract.Spec.Flavour), + Flavour: *ParseFlavour(&contract.Spec.Flavour), Buyer: ParseNodeIdentity(contract.Spec.Buyer), BuyerClusterID: contract.Spec.BuyerClusterID, TransactionID: contract.Spec.TransactionID, diff --git a/pkg/utils/resourceforge/doc.go b/pkg/utils/resourceforge/doc.go index c66df1d..b442f50 100644 --- a/pkg/utils/resourceforge/doc.go +++ b/pkg/utils/resourceforge/doc.go @@ -12,6 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package resourceforge contains different functions used -// to forge the different resources used into the FLUIDOS environment. +// Package resourceforge contains different functions used to forge the different resources used into the FLUIDOS environment. package resourceforge diff --git a/pkg/utils/resourceforge/forge.go b/pkg/utils/resourceforge/forge.go index 6ed3292..53a30b0 100644 --- a/pkg/utils/resourceforge/forge.go +++ b/pkg/utils/resourceforge/forge.go @@ -34,7 +34,7 @@ func ForgeDiscovery(selector *nodecorev1alpha1.FlavourSelector, solverID string) return &advertisementv1alpha1.Discovery{ ObjectMeta: metav1.ObjectMeta{ Name: namings.ForgeDiscoveryName(solverID), - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: advertisementv1alpha1.DiscoverySpec{ Selector: func() *nodecorev1alpha1.FlavourSelector { @@ -50,11 +50,12 @@ func ForgeDiscovery(selector *nodecorev1alpha1.FlavourSelector, solverID string) } // ForgePeeringCandidate creates a PeeringCandidate CR from a Flavour and a Discovery. -func ForgePeeringCandidate(flavourPeeringCandidate *nodecorev1alpha1.Flavour, solverID string, reserved bool) (pc *advertisementv1alpha1.PeeringCandidate) { +func ForgePeeringCandidate(flavourPeeringCandidate *nodecorev1alpha1.Flavour, + solverID string, reserved bool) (pc *advertisementv1alpha1.PeeringCandidate) { pc = &advertisementv1alpha1.PeeringCandidate{ ObjectMeta: metav1.ObjectMeta{ Name: namings.ForgePeeringCandidateName(flavourPeeringCandidate.Name), - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: advertisementv1alpha1.PeeringCandidateSpec{ Flavour: nodecorev1alpha1.Flavour{ @@ -83,7 +84,7 @@ func ForgeReservation(pc *advertisementv1alpha1.PeeringCandidate, reservation := &reservationv1alpha1.Reservation{ ObjectMeta: metav1.ObjectMeta{ Name: namings.ForgeReservationName(solverID), - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: reservationv1alpha1.ReservationSpec{ SolverID: solverID, @@ -114,14 +115,15 @@ func ForgeReservation(pc *advertisementv1alpha1.PeeringCandidate, } // ForgeContract creates a Contract CR. -func ForgeContract(flavour nodecorev1alpha1.Flavour, transaction models.Transaction, lc *reservationv1alpha1.LiqoCredentials) *reservationv1alpha1.Contract { +func ForgeContract(flavour *nodecorev1alpha1.Flavour, transaction *models.Transaction, + lc *nodecorev1alpha1.LiqoCredentials) *reservationv1alpha1.Contract { return &reservationv1alpha1.Contract{ ObjectMeta: metav1.ObjectMeta{ Name: namings.ForgeContractName(flavour.Name), - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: reservationv1alpha1.ContractSpec{ - Flavour: flavour, + Flavour: *flavour, Buyer: nodecorev1alpha1.NodeIdentity{ Domain: transaction.Buyer.Domain, IP: transaction.Buyer.IP, @@ -137,7 +139,7 @@ func ForgeContract(flavour nodecorev1alpha1.Flavour, transaction models.Transact } return nil }(), - ExpirationTime: time.Now().Add(flags.EXPIRATION_CONTRACT).Format(time.RFC3339), + ExpirationTime: time.Now().Add(flags.ExpirationContract).Format(time.RFC3339), ExtraInformation: nil, }, Status: reservationv1alpha1.ContractStatus{ @@ -150,11 +152,11 @@ func ForgeContract(flavour nodecorev1alpha1.Flavour, transaction models.Transact } // ForgeFlavourFromMetrics creates a new flavour custom resource from the metrics of the node. -func ForgeFlavourFromMetrics(node models.NodeInfo, ni nodecorev1alpha1.NodeIdentity) (flavour *nodecorev1alpha1.Flavour) { +func ForgeFlavourFromMetrics(node *models.NodeInfo, ni nodecorev1alpha1.NodeIdentity) (flavour *nodecorev1alpha1.Flavour) { return &nodecorev1alpha1.Flavour{ ObjectMeta: metav1.ObjectMeta{ Name: namings.ForgeFlavourName(node.UID, ni.Domain), - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: nodecorev1alpha1.FlavourSpec{ ProviderID: ni.NodeID, @@ -169,14 +171,14 @@ func ForgeFlavourFromMetrics(node models.NodeInfo, ni nodecorev1alpha1.NodeIdent }, Policy: nodecorev1alpha1.Policy{ Partitionable: &nodecorev1alpha1.Partitionable{ - CpuMin: parseutil.ParseQuantityFromString(flags.CPU_MIN), - MemoryMin: parseutil.ParseQuantityFromString(flags.MEMORY_MIN), - CpuStep: parseutil.ParseQuantityFromString(flags.CPU_STEP), - MemoryStep: parseutil.ParseQuantityFromString(flags.MEMORY_STEP), + CpuMin: parseutil.ParseQuantityFromString(flags.CPUMin), + MemoryMin: parseutil.ParseQuantityFromString(flags.MemoryMin), + CpuStep: parseutil.ParseQuantityFromString(flags.CPUStep), + MemoryStep: parseutil.ParseQuantityFromString(flags.MemoryStep), }, Aggregatable: &nodecorev1alpha1.Aggregatable{ - MinCount: int(flags.MIN_COUNT), - MaxCount: int(flags.MAX_COUNT), + MinCount: int(flags.MinCount), + MaxCount: int(flags.MaxCount), }, }, Owner: ni, @@ -187,7 +189,30 @@ func ForgeFlavourFromMetrics(node models.NodeInfo, ni nodecorev1alpha1.NodeIdent }, OptionalFields: nodecorev1alpha1.OptionalFields{ Availability: true, - WorkerID: node.UID, + // This previously was the node UID that maybe is not the best choice to manage the scheduling + WorkerID: node.Name, + }, + }, + } +} + +// ForgeFlavourFromRef creates a new flavour starting from a Reference Flavour and the new Characteristics. +func ForgeFlavourFromRef(f *nodecorev1alpha1.Flavour, char *nodecorev1alpha1.Characteristics) (flavour *nodecorev1alpha1.Flavour) { + return &nodecorev1alpha1.Flavour{ + ObjectMeta: metav1.ObjectMeta{ + Name: namings.ForgeFlavourName(f.Spec.OptionalFields.WorkerID, f.Spec.Owner.Domain), + Namespace: flags.FluidoNamespace, + }, + Spec: nodecorev1alpha1.FlavourSpec{ + ProviderID: f.Spec.ProviderID, + Type: f.Spec.Type, + Characteristics: *char, + Policy: f.Spec.Policy, + Owner: f.Spec.Owner, + Price: f.Spec.Price, + OptionalFields: nodecorev1alpha1.OptionalFields{ + Availability: true, + WorkerID: f.Spec.OptionalFields.WorkerID, }, }, } @@ -196,9 +221,9 @@ func ForgeFlavourFromMetrics(node models.NodeInfo, ni nodecorev1alpha1.NodeIdent // FORGER FUNCTIONS FROM OBJECTS // ForgeTransactionObj creates a new Transaction object. -func ForgeTransactionObj(ID string, req models.ReserveRequest) models.Transaction { - return models.Transaction{ - TransactionID: ID, +func ForgeTransactionObj(id string, req *models.ReserveRequest) *models.Transaction { + return &models.Transaction{ + TransactionID: id, Buyer: req.Buyer, ClusterID: req.ClusterID, FlavourID: req.FlavourID, @@ -216,7 +241,7 @@ func ForgeTransactionObj(ID string, req models.ReserveRequest) models.Transactio func ForgeContractObj(contract *reservationv1alpha1.Contract) models.Contract { return models.Contract{ ContractID: contract.Name, - Flavour: parseutil.ParseFlavour(contract.Spec.Flavour), + Flavour: *parseutil.ParseFlavour(&contract.Spec.Flavour), Buyer: parseutil.ParseNodeIdentity(contract.Spec.Buyer), BuyerClusterID: contract.Spec.BuyerClusterID, Seller: parseutil.ParseNodeIdentity(contract.Spec.Seller), @@ -244,22 +269,22 @@ func ForgeContractObj(contract *reservationv1alpha1.Contract) models.Contract { } // ForgeResponsePurchaseObj creates a new response purchase. -func ForgeResponsePurchaseObj(contract models.Contract) models.ResponsePurchase { - return models.ResponsePurchase{ - Contract: contract, +func ForgeResponsePurchaseObj(contract *models.Contract) *models.ResponsePurchase { + return &models.ResponsePurchase{ + Contract: *contract, Status: "Completed", } } // ForgeContractFromObj creates a Contract from a reservation. -func ForgeContractFromObj(contract models.Contract) *reservationv1alpha1.Contract { +func ForgeContractFromObj(contract *models.Contract) *reservationv1alpha1.Contract { return &reservationv1alpha1.Contract{ ObjectMeta: metav1.ObjectMeta{ Name: contract.ContractID, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: reservationv1alpha1.ContractSpec{ - Flavour: *ForgeFlavourFromObj(contract.Flavour), + Flavour: *ForgeFlavourFromObj(&contract.Flavour), Buyer: nodecorev1alpha1.NodeIdentity{ Domain: contract.Buyer.Domain, IP: contract.Buyer.IP, @@ -271,7 +296,7 @@ func ForgeContractFromObj(contract models.Contract) *reservationv1alpha1.Contrac IP: contract.Seller.IP, Domain: contract.Seller.Domain, }, - SellerCredentials: reservationv1alpha1.LiqoCredentials{ + SellerCredentials: nodecorev1alpha1.LiqoCredentials{ ClusterID: contract.SellerCredentials.ClusterID, ClusterName: contract.SellerCredentials.ClusterName, Token: contract.SellerCredentials.Token, @@ -301,12 +326,12 @@ func ForgeContractFromObj(contract models.Contract) *reservationv1alpha1.Contrac } } -// ForgeTransactionFromObj creates a transaction from a Transaction object +// ForgeTransactionFromObj creates a transaction from a Transaction object. func ForgeTransactionFromObj(transaction *models.Transaction) *reservationv1alpha1.Transaction { return &reservationv1alpha1.Transaction{ ObjectMeta: metav1.ObjectMeta{ Name: transaction.TransactionID, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: reservationv1alpha1.TransactionSpec{ FlavourID: transaction.FlavourID, @@ -328,11 +353,11 @@ func ForgeTransactionFromObj(transaction *models.Transaction) *reservationv1alph } // ForgeFlavourFromObj creates a Flavour CR from a Flavour Object (REAR). -func ForgeFlavourFromObj(flavour models.Flavour) *nodecorev1alpha1.Flavour { +func ForgeFlavourFromObj(flavour *models.Flavour) *nodecorev1alpha1.Flavour { f := &nodecorev1alpha1.Flavour{ ObjectMeta: metav1.ObjectMeta{ Name: flavour.FlavourID, - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, }, Spec: nodecorev1alpha1.FlavourSpec{ ProviderID: flavour.Owner.NodeID, @@ -398,3 +423,42 @@ func ForgePartition(selector *nodecorev1alpha1.FlavourSelector) *nodecorev1alpha Gpu: selector.RangeSelector.MinGpu, } } + +// ForgeAllocation creates an Allocation from a Contract. +func ForgeAllocation(contract *reservationv1alpha1.Contract, intentID, nodeName string, + destination nodecorev1alpha1.Destination, nodeType nodecorev1alpha1.NodeType) *nodecorev1alpha1.Allocation { + return &nodecorev1alpha1.Allocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: namings.ForgeAllocationName(contract.Spec.Flavour.Name), + Namespace: flags.FluidoNamespace, + }, + Spec: nodecorev1alpha1.AllocationSpec{ + RemoteClusterID: func() string { + if nodeType == nodecorev1alpha1.Node { + return contract.Spec.BuyerClusterID + } + return contract.Spec.SellerCredentials.ClusterID + }(), + IntentID: intentID, + NodeName: nodeName, + Type: nodeType, + Destination: destination, + Forwarding: false, + Flavour: *contract.Spec.Flavour.DeepCopy(), + Partitioned: func() bool { return contract.Spec.Partition != nil }(), + Resources: func() nodecorev1alpha1.Characteristics { + if contract.Spec.Partition != nil { + return nodecorev1alpha1.Characteristics{ + Architecture: contract.Spec.Partition.Architecture, + Cpu: contract.Spec.Partition.CPU, + Memory: contract.Spec.Partition.Memory, + EphemeralStorage: contract.Spec.Partition.EphemeralStorage, + Gpu: contract.Spec.Partition.Gpu, + PersistentStorage: contract.Spec.Partition.Storage, + } + } + return *contract.Spec.Flavour.Spec.Characteristics.DeepCopy() + }(), + }, + } +} diff --git a/pkg/utils/services/doc.go b/pkg/utils/services/doc.go index eb33da9..4146496 100644 --- a/pkg/utils/services/doc.go +++ b/pkg/utils/services/doc.go @@ -12,6 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package services contains different functions and -// services used into the FLUIDOS environment. +// Package services contains different functions and services used into the FLUIDOS environment. package services diff --git a/pkg/utils/services/flavours_services.go b/pkg/utils/services/flavours_services.go index 1346336..6eefe3f 100644 --- a/pkg/utils/services/flavours_services.go +++ b/pkg/utils/services/flavours_services.go @@ -16,6 +16,7 @@ package services import ( "context" + "sync" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -24,9 +25,15 @@ import ( "github.com/fluidos-project/node/pkg/utils/flags" ) -// GetAllFlavours returns all the Flavours in the cluster -func GetAllFlavours(cl client.Client) ([]nodecorev1alpha1.Flavour, error) { +// FlavourService is the interface that wraps the basic Flavour methods and allows to manage the concurrent access to the Flavour CRs. +type FlavourService interface { + sync.Mutex + GetAllFlavours() ([]nodecorev1alpha1.Flavour, error) + GetFlavourByID(flavourID string) (*nodecorev1alpha1.Flavour, error) +} +// GetAllFlavours returns all the Flavours in the cluster. +func GetAllFlavours(cl client.Client) ([]nodecorev1alpha1.Flavour, error) { var flavourList nodecorev1alpha1.FlavourList // List all Flavour CRs @@ -39,13 +46,12 @@ func GetAllFlavours(cl client.Client) ([]nodecorev1alpha1.Flavour, error) { return flavourList.Items, nil } -// GetFlavourByID returns the entire Flavour CR (not only spec) in the cluster that matches the flavourID +// GetFlavourByID returns the entire Flavour CR (not only spec) in the cluster that matches the flavourID. func GetFlavourByID(flavourID string, cl client.Client) (*nodecorev1alpha1.Flavour, error) { - // Get the flavour with the given ID (that is the name of the CR) flavour := &nodecorev1alpha1.Flavour{} err := cl.Get(context.Background(), client.ObjectKey{ - Namespace: flags.FLUIDOS_NAMESPACE, + Namespace: flags.FluidoNamespace, Name: flavourID, }, flavour) if err != nil { diff --git a/pkg/utils/tools/doc.go b/pkg/utils/tools/doc.go new file mode 100644 index 0000000..0231463 --- /dev/null +++ b/pkg/utils/tools/doc.go @@ -0,0 +1,16 @@ +// Copyright 2022-2023 FLUIDOS Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tools contains different tools used into the FLUIDOS environment. +package tools diff --git a/pkg/utils/tools/tools.go b/pkg/utils/tools/tools.go index b2a18c8..b7324fd 100644 --- a/pkg/utils/tools/tools.go +++ b/pkg/utils/tools/tools.go @@ -20,12 +20,12 @@ import ( "k8s.io/klog/v2" ) -// GetTimeNow returns the current time in RFC3339 format +// GetTimeNow returns the current time in RFC3339 format. func GetTimeNow() string { return time.Now().Format(time.RFC3339) } -// CheckExpiration checks if the timestamp has expired +// CheckExpiration checks if the timestamp has expired. func CheckExpiration(timestamp string, expTime time.Duration) bool { t, err := time.Parse(time.RFC3339, timestamp) if err != nil { diff --git a/pkg/virtual-fabric-manager/services.go b/pkg/virtual-fabric-manager/services.go index 9adbacd..a199b4d 100644 --- a/pkg/virtual-fabric-manager/services.go +++ b/pkg/virtual-fabric-manager/services.go @@ -33,12 +33,13 @@ import ( "github.com/fluidos-project/node/pkg/utils/consts" ) +// clusterRole //+kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=core,resources=*,verbs=get;list;watch // PeerWithCluster creates a ForeignCluster resource to peer with a remote cluster. -func PeerWithCluster(ctx context.Context, cl client.Client, clusterID, clusterName, clusterAuthURL, clusterToken string) (*discoveryv1alpha1.ForeignCluster, error) { - +func PeerWithCluster(ctx context.Context, cl client.Client, clusterID, + clusterName, clusterAuthURL, clusterToken string) (*discoveryv1alpha1.ForeignCluster, error) { // Retrieve the cluster identity associated with the current cluster. clusterIdentity, err := utils.GetClusterIdentityWithControllerClient(ctx, cl, consts.LiqoNamespace) if err != nil { @@ -87,6 +88,7 @@ func enforceForeignCluster(ctx context.Context, cl client.Client, fc.Spec.IncomingPeeringEnabled = discoveryv1alpha1.PeeringEnabledAuto } if fc.Spec.InsecureSkipTLSVerify == nil { + //nolint:staticcheck // referring to the Liqo implementation fc.Spec.InsecureSkipTLSVerify = pointer.BoolPtr(true) } return nil diff --git a/tools/development/README.md b/tools/development/README.md new file mode 100644 index 0000000..751adfd --- /dev/null +++ b/tools/development/README.md @@ -0,0 +1,104 @@ +# ๐Ÿ› ๏ธ ๐Ÿ’ป Development Toolkit and Guide + +## Introduction + +This document describes the development toolkit and guide for the FLUIDOS Node. Here we offer a set of instructions and scripts to build and run your version of the FLUIDOS Node, test it and contribute to the project. + +**REMEMBER**: This toolkit is just a way to help you to develop your own FLUIDOS Node version. There are several options to do it better and more efficiently i.e. running the FLUIDOS Node components in your host machine instead of using containers (`go run ...`). + +## Prerequisites + +1. [Docker](https://docs.docker.com/get-docker/) installed and running. +2. [KIND](https://kind.sigs.k8s.io/docs/user/quick-start/) installed. +3. [Helm](https://helm.sh/docs/intro/install/) installed. +4. [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed. + +## Setup + +1. Clone this repository. +2. Install the [Testbed for KIND](../../testbed/kind/README.md). + +## Development + +### Prepare the environment + +In this project we use *Makefile* to automate the most common tasks. You can find the list of all available commands in the [Makefile](../../Makefile). + +Here some useful commands: + +- **APIs** If you have have done some changes in the `/api` folder, you need to update the CRDs, generating again the manifests: + +```bash +make manifests +``` + +- **RBACs** If you have done some changes in the code that requires new RBAC rules, you need to ***put the the correct kubebuilder annotation*** in the code and then run the following command to update the RBAC rules: + +```bash +make rbac +``` + +**NOTE:** you can find more information about the kubebuilder annotation [here](https://book.kubebuilder.io/reference/markers/crd.html). + +- **SUGGESTED:** in general, if you have done some changes in the code, our suggestion is to run the following command to update the manifests and the RBAC rules, and to format the code: + +```bash +make generate +``` + +### Build and Run your own FLUIDOS Node version + +:warning: **FIRST TIME ONLY:** you need to follow ALL the 3 steps below only the first time you build your own FLUIDOS Node version. + +**OTHERWISE:** you can skip the step 2 [Install your own FLUIDOS Node Helm chart](#2-install-your-own-fluidos-node-helm-chart-๐Ÿ› ๏ธ) and run only the step 1 and 3. + +Before starting, move to the `tools/development` folder and run the following command: + +```bash +cd tools/development +``` + +#### 1. Build and load in KIND the FLUIDOS Node images ๐Ÿ“ฆ + +Build your code and load the images in KIND. Run the following command: + +```bash +. ./build.sh [ ...] +``` + +where: + +- `` is your docker username (e.g., `/`) +- `` is the version of the component you want to build (e.g. `0.0.1`) +- `` is the component you want to build (e.g., `local-resource-manager`). If you do not specify any component, all the components will be built. + +**VALID COMPONENTS:** `local-resource-manager`, `rear-manager`, `rear-controller` + +#### 2. Install your own FLUIDOS Node Helm chart ๐Ÿ› ๏ธ + +Now upgrade the Helm chart with your FLUIDOS Node images. +Run the following command: + +```bash +. .helm.sh [ ...] +``` + +where: + +- `` is your docker username (e.g., `/`) +- `` is the version of the component you want to build (e.g. `0.0.1`) +- `[ ...]` the list of components you want to override (e.g., `local-resource-manager`). + You need to specify :warning: **at least one component** and should be the same you specified in the previous step. [Build and load in KIND the FLUIDOS Node images](#1-build-and-load-in-kind-the-fluidos-node-images-๐Ÿ“ฆ). + If in the previous step you did not specify any component, you need to specify **all the components**. For example: + + ```bash + . .helm.sh local-resource-manager rear-manager rear-controller + ``` + +**VALID COMPONENTS:** `local-resource-manager`, `rear-manager`, `rear-controller` + +#### 3. Purge the FLUIDOS resources and CRs ๐Ÿงน + +```bash +. ./purge.sh +``` diff --git a/tools/development/build.sh b/tools/development/build.sh new file mode 100644 index 0000000..7cdceae --- /dev/null +++ b/tools/development/build.sh @@ -0,0 +1,33 @@ +#!/usr/bin/bash + +# Build and load the docker image +build_and_load() { + local COMPONENT="$1" + docker build -f ../../build/common/Dockerfile --build-arg COMPONENT=$COMPONENT -t $NAMESPACE/$COMPONENT:$VERSION ../../ + kind load docker-image $NAMESPACE/$COMPONENT:$VERSION --name=fluidos-provider + kind load docker-image $NAMESPACE/$COMPONENT:$VERSION --name=fluidos-consumer +} + +# Get the Docker namespace, version, and component from the command line +NAMESPACE="$1" +VERSION="$2" +COMPONENT="$3" +VALID_COMPONENTS=("rear-controller" "rear-manager" "local-resource-manager") + +# Validate input arguments +if [[ -z "$NAMESPACE" || -z "$VERSION" ]]; then + echo "Syntax error: ./build.sh [ ...]" + exit 1 +fi + +# Build for a specific component or for all components if not specified +if [[ -z "$COMPONENT" ]]; then + for item in "${VALID_COMPONENTS[@]}"; do + build_and_load "$item" + done +elif [[ " ${VALID_COMPONENTS[@]} " =~ " ${COMPONENT} " ]]; then + build_and_load "$COMPONENT" +else + echo "Error: Invalid component '$COMPONENT'. Valid components are: ${VALID_COMPONENTS[@]}" + exit 1 +fi diff --git a/tools/development/helm.sh b/tools/development/helm.sh new file mode 100644 index 0000000..917a159 --- /dev/null +++ b/tools/development/helm.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Check that there are at least 3 arguments (username, version, at least one component) +if [[ "$#" -lt 3 ]]; then + echo "Syntaxt error: insufficient arguments." + echo "Use: $0 DOCKER_USERNAME VERSIONE component [component2 ...]" + exit 1 +fi + +DOCKER_USERNAME="$1" +VERSION="$2" + +# Remove the first two arguments (username and version) to handle only components and images +shift 2 + +# Associative array to associate components to their corresponding Helm values +declare -A COMPONENT_MAP +COMPONENT_MAP["rear-controller"]="rearController.imageName" +COMPONENT_MAP["rear-manager"]="rearManager.imageName" +COMPONENT_MAP["local-resource-manager"]="localResourceManager.imageName" + +# Initialize a variable to store the --set options +IMAGE_SET_STRING="" + +# Iterates over the arguments passed to the script +for component in "$@"; do + # Check that the component is valid + if [[ -z "${COMPONENT_MAP[$component]}" ]]; then + echo "Error: component '$component' not recognized." + continue + fi + + helm_key="${COMPONENT_MAP[$component]}" + # Build the --set string using the map + IMAGE_SET_STRING="$IMAGE_SET_STRING --set $helm_key=$DOCKER_USERNAME/$component" +done + +export KUBECONFIG=../../testbed/kind/consumer/config + +kubectl apply -f ../../deployments/node/crds + +# Run the helm upgrade command +helm upgrade node fluidos/node -n fluidos --reuse-values \ + --set tag="$VERSION" \ + $IMAGE_SET_STRING + +export KUBECONFIG=../../testbed/kind/provider/config + +kubectl apply -f ../../deployments/node/crds + +# Run the helm upgrade command +helm upgrade node fluidos/node -n fluidos --reuse-values \ + --set tag="$VERSION" \ + $IMAGE_SET_STRING + + diff --git a/tools/development/purge.sh b/tools/development/purge.sh new file mode 100644 index 0000000..fff009c --- /dev/null +++ b/tools/development/purge.sh @@ -0,0 +1,10 @@ + +kubectl delete solvers.nodecore.fluidos.eu -n fluidos --all +kubectl delete discoveries.advertisement.fluidos.eu -n fluidos --all +kubectl delete reservations.reservation.fluidos.eu -n fluidos --all +kubectl delete contracts.reservation.fluidos.eu -n fluidos --all +kubectl delete peeringcandidates.advertisement.fluidos.eu -n fluidos --all +kubectl delete transactions.reservation.fluidos.eu -n fluidos --all +kubectl delete allocations.nodecore.fluidos.eu -n fluidos --all +kubectl delete flavours.nodecore.fluidos.eu -n fluidos --all +kubectl delete pod -n fluidos --all \ No newline at end of file