From cf1adf51d6869e724e9cf37d9ca710dd6bb7d0e1 Mon Sep 17 00:00:00 2001 From: Andrea Colli-Vignarelli <48754766+andreacv98@users.noreply.github.com> Date: Tue, 26 Mar 2024 16:54:13 +0100 Subject: [PATCH] Prototype of user-friendly installation script and its documentation (#26) --- docs/installation/installation.md | 28 +++ quickstart/kind/configs/consumer.yaml | 14 ++ quickstart/kind/configs/provider.yaml | 14 ++ quickstart/kind/configs/standard.yaml | 14 ++ quickstart/utils/consumer-values-nolrm.yaml | 151 ++++++++++++ quickstart/utils/consumer-values.yaml | 151 ++++++++++++ quickstart/utils/metrics-server.yaml | 197 ++++++++++++++++ quickstart/utils/provider-values-nolrm.yaml | 151 ++++++++++++ quickstart/utils/provider-values.yaml | 151 ++++++++++++ tools/scripts/environment.sh | 184 +++++++++++++++ tools/scripts/installation.sh | 245 ++++++++++++++++++++ tools/scripts/requirements.sh | 203 ++++++++++++++++ tools/scripts/setup.sh | 166 +++++++++++++ tools/scripts/utils.sh | 14 ++ 14 files changed, 1683 insertions(+) create mode 100644 quickstart/kind/configs/consumer.yaml create mode 100644 quickstart/kind/configs/provider.yaml create mode 100644 quickstart/kind/configs/standard.yaml create mode 100644 quickstart/utils/consumer-values-nolrm.yaml create mode 100644 quickstart/utils/consumer-values.yaml create mode 100644 quickstart/utils/metrics-server.yaml create mode 100644 quickstart/utils/provider-values-nolrm.yaml create mode 100644 quickstart/utils/provider-values.yaml create mode 100644 tools/scripts/environment.sh create mode 100644 tools/scripts/installation.sh create mode 100644 tools/scripts/requirements.sh create mode 100644 tools/scripts/setup.sh create mode 100644 tools/scripts/utils.sh diff --git a/docs/installation/installation.md b/docs/installation/installation.md index e69de29..23a1d10 100644 --- a/docs/installation/installation.md +++ b/docs/installation/installation.md @@ -0,0 +1,28 @@ +# Installation + +A quick script for installing the FLUIDOS Node is available. Currently, the script supports installation on KIND Clusters, with plans to extend support to generic Kubernetes clusters in the near future. + +--- + +**⚠️ ATTENTION:** The script is currently in an experimental phase, so it may not work as expected. If any issues arise, it may be tricky to understand and terminate the script, as many sub-tasks are executed in the background. We are aware of these issues and are actively working to resolve them. + +If you want to use a **working and tested script** to test the FLUIDOS Node, please refer to the [**Testbed**](../testbed/README.md) section. + +--- + +To execute the script, use the following command: + +```bash +cd tools/scripts +. ./setup.sh +``` + +No options are available through the CLI, but you can choose the installation mode by choosing the right option during the script execution. +The option supported are: + +- `1` to install the FLUIDOS Node as the demo testbed through KIND +- `2` to install the FLUIDOS Node in n consumer clusters and m provider clusters through KIND +- `3` to install the FLUIDOS Node in n clusters through their KUBECONFIG files + **DISCLAIMER:** in this case all your Kubernetes clusters inserted in the script must have at least one node tagged with `node-role.fluidos.eu/worker: "true"` and at least in the provider clusters, you can choose the nodes that exposes their Kubernetes resources with the label `node-role.fluidos.eu/resources: "true"`. + +For each option, you can choose to install from either the official remote FLUIDOS repository or the local repository, by building it at the moment. diff --git a/quickstart/kind/configs/consumer.yaml b/quickstart/kind/configs/consumer.yaml new file mode 100644 index 0000000..17517ce --- /dev/null +++ b/quickstart/kind/configs/consumer.yaml @@ -0,0 +1,14 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: kindest/node:v1.28.0 + - role: worker + labels: + node-role.fluidos.eu/resources: "true" + node-role.fluidos.eu/worker: "true" + image: kindest/node:v1.28.0 + - role: worker + image: kindest/node:v1.28.0 + labels: + node-role.fluidos.eu/resources: "true" \ No newline at end of file diff --git a/quickstart/kind/configs/provider.yaml b/quickstart/kind/configs/provider.yaml new file mode 100644 index 0000000..17517ce --- /dev/null +++ b/quickstart/kind/configs/provider.yaml @@ -0,0 +1,14 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: kindest/node:v1.28.0 + - role: worker + labels: + node-role.fluidos.eu/resources: "true" + node-role.fluidos.eu/worker: "true" + image: kindest/node:v1.28.0 + - role: worker + image: kindest/node:v1.28.0 + labels: + node-role.fluidos.eu/resources: "true" \ No newline at end of file diff --git a/quickstart/kind/configs/standard.yaml b/quickstart/kind/configs/standard.yaml new file mode 100644 index 0000000..43a4d7c --- /dev/null +++ b/quickstart/kind/configs/standard.yaml @@ -0,0 +1,14 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: kindest/node:v1.28.0 + - role: worker + labels: + node-role.fluidos.eu/resources: "true" + node-role.fluidos.eu/worker: "true" + image: kindest/node:v1.28.0 + - role: worker + image: kindest/node:v1.28.0 + labels: + node-role.fluidos.eu/resources: "true" diff --git a/quickstart/utils/consumer-values-nolrm.yaml b/quickstart/utils/consumer-values-nolrm.yaml new file mode 100644 index 0000000..74f7b97 --- /dev/null +++ b/quickstart/utils/consumer-values-nolrm.yaml @@ -0,0 +1,151 @@ +# Default values for fluidos-node. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Images' tag to select a development version of fluidos-node instead of a release +tag: "" +# -- The pullPolicy for fluidos-node pods. +pullPolicy: "IfNotPresent" + +common: + # -- NodeSelector for all fluidos-node pods + nodeSelector: { + node-role.fluidos.eu/worker: "true" + } + # -- Tolerations for all fluidos-node pods + tolerations: [] + # -- Affinity for all fluidos-node pods + affinity: {} + # -- Extra arguments for all fluidos-node pods + extraArgs: [] + +localResourceManager: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 0 + pod: + # -- Annotations for the local-resource-manager pod. + annotations: {} + # -- Labels for the local-resource-manager pod. + labels: {} + # -- Extra arguments for the local-resource-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the local-resource-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/local-resource-manager" + config: + # -- Label used to identify the nodes from which resources are collected. + nodeResourceLabel: "node-role.fluidos.eu/resources" + # -- This flag defines the resource type of the generated flavours. + resourceType: "k8s-fluidos" + flavour: + # -- The minimum number of CPUs that can be requested to purchase a flavour. + cpuMin: "0" + # -- The minimum amount of memory that can be requested to purchase a flavour. + memoryMin: "0" + # -- The CPU step that must be respected when requesting a flavour through a Flavour Selector. + cpuStep: "1000m" + # -- The memory step that must be respected when requesting a flavour through a Flavour Selector. + memoryStep: "100Mi" + +rearManager: + # -- The number of REAR Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-manager pod. + annotations: {} + # -- Labels for the rear-manager pod. + labels: {} + # -- Extra arguments for the rear-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-manager" + +rearController: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-controller pod. + annotations: {} + # -- Labels for the rear-controller pod. + labels: {} + # -- Extra arguments for the rear-controller pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-controller pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-controller" + service: + grpc: + name: "grpc" + # -- Kubernetes service used to expose the gRPC Server to liqo. + type: "ClusterIP" + # -- Annotations for the gRPC service. + annotations: {} + # -- Labels for the gRPC service. + labels: {} + # -- The gRPC port used by Liqo to connect with the Gateway of the rear-controller to obtain the Contract resources for a given consumer ClusterID. + port: 2710 + # -- The target port used by the gRPC service. + targetPort: 2710 + gateway: + name: "gateway" + # -- Kubernetes service to be used to expose the REAR gateway. + type: "NodePort" + # -- Annotations for the REAR gateway service. + annotations: {} + # -- Labels for the REAR gateway service. + labels: {} + # -- Options valid if service type is NodePort. + nodePort: + # -- Force the port used by the NodePort service. + port: 30000 + # -- Options valid if service type is LoadBalancer. + loadBalancer: + # -- Override the IP here if service type is LoadBalancer and you want to use a specific IP address, e.g., because you want a static LB. + ip: "" + # -- The port used by the rear-controller to expose the REAR Gateway. + port: 3004 + # -- The target port used by the REAR Gateway service. + targetPort: 3004 + +networkManager: + # -- The number of Network Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the network-manager pod. + annotations: {} + # -- Labels for the network-manager pod. + labels: {} + # -- Extra arguments for the network-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the network-manager pod. + resources: + limits: {} + requests: {} + # -- The resource image to be used by the network-manager pod. + imageName: "ghcr.io/fluidos/network-manager" + configMaps: + providers: + # -- The name of the ConfigMap containing the list of the FLUIDOS Providers and the default FLUIDOS Provider (SuperNode or Catalogue). + name: "fluidos-network-manager-config" + # -- The IP List of Local knwon FLUIDOS Nodes separated by commas. + local: + # -- The IP List of Remote known FLUIDOS Nodes separated by commas. + remote: + # -- The IP List of SuperNodes separated by commas. + default: + nodeIdentity: + # -- The name of the ConfigMap containing the FLUIDOS Node identity info. + name: "fluidos-network-manager-identity" + # -- The domain name of the FLUIDOS closed domani: It represents for instance the Enterprise and it is used to generate the FQDN of the owned FLUIDOS Nodes + domain: "fluidos.eu" + # -- The IP address of the FLUIDOS Node. It can be public or private, depending on the network configuration and it corresponds to the IP address to reach the Network Manager from the outside of the cluster. + ip: + # -- The NodeID is a UUID that identifies the FLUIDOS Node. It is used to generate the FQDN of the owned FLUIDOS Nodes and it is unique in the FLUIDOS closed domain + nodeID: diff --git a/quickstart/utils/consumer-values.yaml b/quickstart/utils/consumer-values.yaml new file mode 100644 index 0000000..f00beec --- /dev/null +++ b/quickstart/utils/consumer-values.yaml @@ -0,0 +1,151 @@ +# Default values for fluidos-node. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Images' tag to select a development version of fluidos-node instead of a release +tag: "" +# -- The pullPolicy for fluidos-node pods. +pullPolicy: "IfNotPresent" + +common: + # -- NodeSelector for all fluidos-node pods + nodeSelector: { + node-role.fluidos.eu/worker: "true" + } + # -- Tolerations for all fluidos-node pods + tolerations: [] + # -- Affinity for all fluidos-node pods + affinity: {} + # -- Extra arguments for all fluidos-node pods + extraArgs: [] + +localResourceManager: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the local-resource-manager pod. + annotations: {} + # -- Labels for the local-resource-manager pod. + labels: {} + # -- Extra arguments for the local-resource-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the local-resource-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/local-resource-manager" + config: + # -- Label used to identify the nodes from which resources are collected. + nodeResourceLabel: "node-role.fluidos.eu/resources" + # -- This flag defines the resource type of the generated flavours. + resourceType: "k8s-fluidos" + flavour: + # -- The minimum number of CPUs that can be requested to purchase a flavour. + cpuMin: "0" + # -- The minimum amount of memory that can be requested to purchase a flavour. + memoryMin: "0" + # -- The CPU step that must be respected when requesting a flavour through a Flavour Selector. + cpuStep: "1000m" + # -- The memory step that must be respected when requesting a flavour through a Flavour Selector. + memoryStep: "100Mi" + +rearManager: + # -- The number of REAR Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-manager pod. + annotations: {} + # -- Labels for the rear-manager pod. + labels: {} + # -- Extra arguments for the rear-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-manager" + +rearController: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-controller pod. + annotations: {} + # -- Labels for the rear-controller pod. + labels: {} + # -- Extra arguments for the rear-controller pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-controller pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-controller" + service: + grpc: + name: "grpc" + # -- Kubernetes service used to expose the gRPC Server to liqo. + type: "ClusterIP" + # -- Annotations for the gRPC service. + annotations: {} + # -- Labels for the gRPC service. + labels: {} + # -- The gRPC port used by Liqo to connect with the Gateway of the rear-controller to obtain the Contract resources for a given consumer ClusterID. + port: 2710 + # -- The target port used by the gRPC service. + targetPort: 2710 + gateway: + name: "gateway" + # -- Kubernetes service to be used to expose the REAR gateway. + type: "NodePort" + # -- Annotations for the REAR gateway service. + annotations: {} + # -- Labels for the REAR gateway service. + labels: {} + # -- Options valid if service type is NodePort. + nodePort: + # -- Force the port used by the NodePort service. + port: 30000 + # -- Options valid if service type is LoadBalancer. + loadBalancer: + # -- Override the IP here if service type is LoadBalancer and you want to use a specific IP address, e.g., because you want a static LB. + ip: "" + # -- The port used by the rear-controller to expose the REAR Gateway. + port: 3004 + # -- The target port used by the REAR Gateway service. + targetPort: 3004 + +networkManager: + # -- The number of Network Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the network-manager pod. + annotations: {} + # -- Labels for the network-manager pod. + labels: {} + # -- Extra arguments for the network-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the network-manager pod. + resources: + limits: {} + requests: {} + # -- The resource image to be used by the network-manager pod. + imageName: "ghcr.io/fluidos/network-manager" + configMaps: + providers: + # -- The name of the ConfigMap containing the list of the FLUIDOS Providers and the default FLUIDOS Provider (SuperNode or Catalogue). + name: "fluidos-network-manager-config" + # -- The IP List of Local knwon FLUIDOS Nodes separated by commas. + local: + # -- The IP List of Remote known FLUIDOS Nodes separated by commas. + remote: + # -- The IP List of SuperNodes separated by commas. + default: + nodeIdentity: + # -- The name of the ConfigMap containing the FLUIDOS Node identity info. + name: "fluidos-network-manager-identity" + # -- The domain name of the FLUIDOS closed domani: It represents for instance the Enterprise and it is used to generate the FQDN of the owned FLUIDOS Nodes + domain: "fluidos.eu" + # -- The IP address of the FLUIDOS Node. It can be public or private, depending on the network configuration and it corresponds to the IP address to reach the Network Manager from the outside of the cluster. + ip: + # -- The NodeID is a UUID that identifies the FLUIDOS Node. It is used to generate the FQDN of the owned FLUIDOS Nodes and it is unique in the FLUIDOS closed domain + nodeID: diff --git a/quickstart/utils/metrics-server.yaml b/quickstart/utils/metrics-server.yaml new file mode 100644 index 0000000..d84b7e2 --- /dev/null +++ b/quickstart/utils/metrics-server.yaml @@ -0,0 +1,197 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + image: registry.k8s.io/metrics-server/metrics-server:v0.6.4 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/quickstart/utils/provider-values-nolrm.yaml b/quickstart/utils/provider-values-nolrm.yaml new file mode 100644 index 0000000..0402450 --- /dev/null +++ b/quickstart/utils/provider-values-nolrm.yaml @@ -0,0 +1,151 @@ +# Default values for fluidos-node. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Images' tag to select a development version of fluidos-node instead of a release +tag: "" +# -- The pullPolicy for fluidos-node pods. +pullPolicy: "IfNotPresent" + +common: + # -- NodeSelector for all fluidos-node pods + nodeSelector: { + node-role.fluidos.eu/worker: "true" + } + # -- Tolerations for all fluidos-node pods + tolerations: [] + # -- Affinity for all fluidos-node pods + affinity: {} + # -- Extra arguments for all fluidos-node pods + extraArgs: [] + +localResourceManager: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 0 + pod: + # -- Annotations for the local-resource-manager pod. + annotations: {} + # -- Labels for the local-resource-manager pod. + labels: {} + # -- Extra arguments for the local-resource-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the local-resource-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/local-resource-manager" + config: + # -- Label used to identify the nodes from which resources are collected. + nodeResourceLabel: "node-role.fluidos.eu/resources" + # -- This flag defines the resource type of the generated flavours. + resourceType: "k8s-fluidos" + flavour: + # -- The minimum number of CPUs that can be requested to purchase a flavour. + cpuMin: "0" + # -- The minimum amount of memory that can be requested to purchase a flavour. + memoryMin: "0" + # -- The CPU step that must be respected when requesting a flavour through a Flavour Selector. + cpuStep: "1000m" + # -- The memory step that must be respected when requesting a flavour through a Flavour Selector. + memoryStep: "100Mi" + +rearManager: + # -- The number of REAR Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-manager pod. + annotations: {} + # -- Labels for the rear-manager pod. + labels: {} + # -- Extra arguments for the rear-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-manager" + +rearController: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-controller pod. + annotations: {} + # -- Labels for the rear-controller pod. + labels: {} + # -- Extra arguments for the rear-controller pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-controller pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-controller" + service: + grpc: + name: "grpc" + # -- Kubernetes service used to expose the gRPC Server to liqo. + type: "ClusterIP" + # -- Annotations for the gRPC service. + annotations: {} + # -- Labels for the gRPC service. + labels: {} + # -- The gRPC port used by Liqo to connect with the Gateway of the rear-controller to obtain the Contract resources for a given consumer ClusterID. + port: 2710 + # -- The target port used by the gRPC service. + targetPort: 2710 + gateway: + name: "gateway" + # -- Kubernetes service to be used to expose the REAR gateway. + type: "NodePort" + # -- Annotations for the REAR gateway service. + annotations: {} + # -- Labels for the REAR gateway service. + labels: {} + # -- Options valid if service type is NodePort. + nodePort: + # -- Force the port used by the NodePort service. + port: 30001 + # -- Options valid if service type is LoadBalancer. + loadBalancer: + # -- Override the IP here if service type is LoadBalancer and you want to use a specific IP address, e.g., because you want a static LB. + ip: "" + # -- The port used by the rear-controller to expose the REAR Gateway. + port: 3004 + # -- The target port used by the REAR Gateway service. + targetPort: 3004 + +networkManager: + # -- The number of Network Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the network-manager pod. + annotations: {} + # -- Labels for the network-manager pod. + labels: {} + # -- Extra arguments for the network-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the network-manager pod. + resources: + limits: {} + requests: {} + # -- The resource image to be used by the network-manager pod. + imageName: "ghcr.io/fluidos/network-manager" + configMaps: + providers: + # -- The name of the ConfigMap containing the list of the FLUIDOS Providers and the default FLUIDOS Provider (SuperNode or Catalogue). + name: "fluidos-network-manager-config" + # -- The IP List of Local knwon FLUIDOS Nodes separated by commas. + local: "host.docker.internal:9000" + # -- The IP List of Remote known FLUIDOS Nodes separated by commas. + remote: + # -- The IP List of SuperNodes separated by commas. + default: + nodeIdentity: + # -- The name of the ConfigMap containing the FLUIDOS Node identity info. + name: "fluidos-network-manager-identity" + # -- The domain name of the FLUIDOS closed domani: It represents for instance the Enterprise and it is used to generate the FQDN of the owned FLUIDOS Nodes + domain: "fluidos.eu" + # -- The IP address of the FLUIDOS Node. It can be public or private, depending on the network configuration and it corresponds to the IP address to reach the Network Manager from the outside of the cluster. + ip: + # -- The NodeID is a UUID that identifies the FLUIDOS Node. It is used to generate the FQDN of the owned FLUIDOS Nodes and it is unique in the FLUIDOS closed domain + nodeID: diff --git a/quickstart/utils/provider-values.yaml b/quickstart/utils/provider-values.yaml new file mode 100644 index 0000000..37346f9 --- /dev/null +++ b/quickstart/utils/provider-values.yaml @@ -0,0 +1,151 @@ +# Default values for fluidos-node. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Images' tag to select a development version of fluidos-node instead of a release +tag: "" +# -- The pullPolicy for fluidos-node pods. +pullPolicy: "IfNotPresent" + +common: + # -- NodeSelector for all fluidos-node pods + nodeSelector: { + node-role.fluidos.eu/worker: "true" + } + # -- Tolerations for all fluidos-node pods + tolerations: [] + # -- Affinity for all fluidos-node pods + affinity: {} + # -- Extra arguments for all fluidos-node pods + extraArgs: [] + +localResourceManager: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the local-resource-manager pod. + annotations: {} + # -- Labels for the local-resource-manager pod. + labels: {} + # -- Extra arguments for the local-resource-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the local-resource-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/local-resource-manager" + config: + # -- Label used to identify the nodes from which resources are collected. + nodeResourceLabel: "node-role.fluidos.eu/resources" + # -- This flag defines the resource type of the generated flavours. + resourceType: "k8s-fluidos" + flavour: + # -- The minimum number of CPUs that can be requested to purchase a flavour. + cpuMin: "0" + # -- The minimum amount of memory that can be requested to purchase a flavour. + memoryMin: "0" + # -- The CPU step that must be respected when requesting a flavour through a Flavour Selector. + cpuStep: "1000m" + # -- The memory step that must be respected when requesting a flavour through a Flavour Selector. + memoryStep: "100Mi" + +rearManager: + # -- The number of REAR Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-manager pod. + annotations: {} + # -- Labels for the rear-manager pod. + labels: {} + # -- Extra arguments for the rear-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-manager pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-manager" + +rearController: + # -- The number of REAR Controller, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the rear-controller pod. + annotations: {} + # -- Labels for the rear-controller pod. + labels: {} + # -- Extra arguments for the rear-controller pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the rear-controller pod. + resources: + limits: {} + requests: {} + imageName: "ghcr.io/fluidos-project/rear-controller" + service: + grpc: + name: "grpc" + # -- Kubernetes service used to expose the gRPC Server to liqo. + type: "ClusterIP" + # -- Annotations for the gRPC service. + annotations: {} + # -- Labels for the gRPC service. + labels: {} + # -- The gRPC port used by Liqo to connect with the Gateway of the rear-controller to obtain the Contract resources for a given consumer ClusterID. + port: 2710 + # -- The target port used by the gRPC service. + targetPort: 2710 + gateway: + name: "gateway" + # -- Kubernetes service to be used to expose the REAR gateway. + type: "NodePort" + # -- Annotations for the REAR gateway service. + annotations: {} + # -- Labels for the REAR gateway service. + labels: {} + # -- Options valid if service type is NodePort. + nodePort: + # -- Force the port used by the NodePort service. + port: 30001 + # -- Options valid if service type is LoadBalancer. + loadBalancer: + # -- Override the IP here if service type is LoadBalancer and you want to use a specific IP address, e.g., because you want a static LB. + ip: "" + # -- The port used by the rear-controller to expose the REAR Gateway. + port: 3004 + # -- The target port used by the REAR Gateway service. + targetPort: 3004 + +networkManager: + # -- The number of Network Manager, which can be increased for active/passive high availability. + replicas: 1 + pod: + # -- Annotations for the network-manager pod. + annotations: {} + # -- Labels for the network-manager pod. + labels: {} + # -- Extra arguments for the network-manager pod. + extraArgs: [] + # -- Resource requests and limits (https://kubernetes.io/docs/user-guide/compute-resources/) for the network-manager pod. + resources: + limits: {} + requests: {} + # -- The resource image to be used by the network-manager pod. + imageName: "ghcr.io/fluidos/network-manager" + configMaps: + providers: + # -- The name of the ConfigMap containing the list of the FLUIDOS Providers and the default FLUIDOS Provider (SuperNode or Catalogue). + name: "fluidos-network-manager-config" + # -- The IP List of Local knwon FLUIDOS Nodes separated by commas. + local: "host.docker.internal:9000" + # -- The IP List of Remote known FLUIDOS Nodes separated by commas. + remote: + # -- The IP List of SuperNodes separated by commas. + default: + nodeIdentity: + # -- The name of the ConfigMap containing the FLUIDOS Node identity info. + name: "fluidos-network-manager-identity" + # -- The domain name of the FLUIDOS closed domani: It represents for instance the Enterprise and it is used to generate the FQDN of the owned FLUIDOS Nodes + domain: "fluidos.eu" + # -- The IP address of the FLUIDOS Node. It can be public or private, depending on the network configuration and it corresponds to the IP address to reach the Network Manager from the outside of the cluster. + ip: + # -- The NodeID is a UUID that identifies the FLUIDOS Node. It is used to generate the FQDN of the owned FLUIDOS Nodes and it is unique in the FLUIDOS closed domain + nodeID: diff --git a/tools/scripts/environment.sh b/tools/scripts/environment.sh new file mode 100644 index 0000000..899b45f --- /dev/null +++ b/tools/scripts/environment.sh @@ -0,0 +1,184 @@ +#!/usr/bin/bash + +# Enable job control +set -m + + +SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" + +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/utils.sh + +# PIDs of the processes in background +pids=() + +# Function to handle errors +handle_error() { + echo "An error occurred. Exiting..." + for pid in "${pids[@]}"; do + # Kill all the processes in background + kill "$pid" 2>/dev/null + done + return 1 +} + +# Function to handle exit +handle_exit() { + echo "Exiting..." + for pid in "${pids[@]}"; do + # Kill all the processes in background + kill "$pid" 2>/dev/null + done + # Ask the user if really wants to exit + read -r -p "Do you really want to exit? [y/N] " answer + if [ "$answer" == "y" ]; then + return 0 + fi +} + +# Create KIND clusters +# Parameters: +# $1: consumer JSON tmp file +# $2: provider JSON tmp file +create_kind_clusters() { + + # Get consumer JSON tmp file from parameter + consumer_json=$1 + + # Get provider JSON tmp file from parameter + provider_json=$2 + + print_title "Create KIND clusters..." + + # Map of clusters: + # key: cluster name + # Value: Dictionary with IP of control plane and kubeconfig file + unset clusters + declare -A clusters + + # Get parameter to know wich clusters configuration follow + # customkind: n consumers and m providers + if [ "$3" == "customkind" ]; then + # Create n consumer cluster and m provider clusters + # Iterate over consumer cluster creation + for i in $(seq 1 "$4"); do + ( + # Cluster name + name="fluidos-consumer-$i" + # Print cluster creation information + echo "Creating cluster $name..." + # Set the role of the cluster + role="consumer" + # Create the cluster + kind create cluster --name "$name" --config "$SCRIPT_DIR"/../../quickstart/kind/configs/standard.yaml --kubeconfig "$SCRIPT_DIR"/"$name"-config -q + # Get the IP of the control plane of the cluster + controlplane_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$name"-control-plane) + # Write the cluster info to a file + echo "$name: {\"ip\":\"$controlplane_ip\", \"kubeconfig\":\"$SCRIPT_DIR/$name-config\", \"role\":\"$role\"}" >> "$consumer_json" + ) & + # Save the PID of the process + pids+=($!) + done + # Iterate over provider clusters creation + for i in $(seq 1 "$5"); do + ( + # Cluster name + name="fluidos-provider-$i" + # Print cluster creation information + echo "Creating cluster $name..." + # Set the role of the cluster + role="provider" + # Create the cluster + kind create cluster --name "$name" --config "$SCRIPT_DIR"/../../quickstart/kind/configs/standard.yaml --kubeconfig "$SCRIPT_DIR"/"$name"-config -q + # Get the IP of the control plane of the cluster + controlplane_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$name"-control-plane) + # Write the cluster info to a file + echo "$name: {\"ip\":\"$controlplane_ip\", \"kubeconfig\":\"$SCRIPT_DIR/$name-config\", \"role\":\"$role\"}" >> "$provider_json" + ) & + # Save the PID of the process + pids+=($!) + done + + # Wait for all the processes to finish + for pid in "${pids[@]}"; do + wait "$pid" + done + + else + echo "Invalid parameter." + exit 1 + fi + + print_title "KIND clusters created successfully." + + # Return the clusters + echo "${clusters[@]}" +} + +# Get clusters from KUBECONFIG files +# Parameters: +# $1: consumer JSON tmp file +# $2: provider JSON tmp file +get_clusters() { + + # Get consumer JSON tmp file from parameter + consumer_json=$1 + + # Get provider JSON tmp file from parameter + provider_json=$2 + + print_title "CONSUMER CLUSTERS" + + insert_clusters "$consumer_json" "consumer" + + print_title "PROVIDER CLUSTERS" + + insert_clusters "$provider_json" "provider" +} + +# Insert clusters into proper file +# Parameters: +# $1: JSON file +# $2: role +insert_clusters() { + + # Get JSON file from parameter + json_file=$1 + + # Get role from parameter + role=$2 + + while true; do + # Ask the user for the KUBECONFIG file path + echo "Insert the KUBECONFIG file path of a $role cluster or press Enter to exit:" + read -r kubeconfig_path + + # Exit the cycle if the user press Enter + if [ -z "$kubeconfig_path" ]; then + break + fi + + # Check if the file exists + if [ ! -f "$kubeconfig_path" ]; then + echo "Errore: il file $kubeconfig_path non esiste." + continue + fi + + # Validate KUBECONFIG + if ! kubectl config view --kubeconfig="$kubeconfig_path" &> /dev/null; then + echo "Error: file $kubeconfig_path is not a valid KUBECONFIG file." + continue + fi + + # Get name of the cluster + cluster_name=$(kubectl config view --kubeconfig="$kubeconfig_path" -o=jsonpath='{.current-context}') + + # Get the IP of a random node in the cluster + node_ip=$(kubectl get nodes --kubeconfig="$kubeconfig_path" -o jsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}') + + # Save the cluster info into json_file file + echo "$cluster_name: {\"ip\":\"$node_ip\", \"kubeconfig\":\"$kubeconfig_path\", \"role\":\"$role\"}" >> "$json_file" + done + +} \ No newline at end of file diff --git a/tools/scripts/installation.sh b/tools/scripts/installation.sh new file mode 100644 index 0000000..ab0e726 --- /dev/null +++ b/tools/scripts/installation.sh @@ -0,0 +1,245 @@ +#!/usr/bin/bash + +SCRIPT_PATH=$(realpath "${BASH_SOURCE[0]}") +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" + +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/utils.sh + +declare -A providers_ips + +# PIDs of the processes in background +pids=() + +# Function to handle errors +handle_error() { + echo "An error occurred. Exiting..." + for pid in "${pids[@]}"; do + # Kill all the processes in background + kill "$pid" 2>/dev/null + done + read -r -p "All the processes in background have been killed. Press enter to exit." + return 1 +} + +# Function to handle exit +handle_exit() { + echo "Exiting..." + for pid in "${pids[@]}"; do + # Kill all the processes in background + kill "$pid" 2>/dev/null + done + # Ask the user if really wants to exit + read -r -p "Do you really want to exit? [y/N] " answer + if [ "$answer" == "y" ]; then + return 0 + fi +} + +# Build and load the docker image +build_and_load() { + local COMPONENT="$1" + local NAMESPACE="$2" + local VERSION="$3" + # Build the docker image + docker build -q -f "$SCRIPT_DIR"/../../build/common/Dockerfile --build-arg COMPONENT="$COMPONENT" -t "$NAMESPACE"/"$COMPONENT":"$VERSION" "$SCRIPT_DIR"/../../ + + echo "Docker image $NAMESPACE/$COMPONENT:$VERSION built" + # For each cluster, load the docker image + for cluster in "${!clusters[@]}"; do + kind load docker-image "$NAMESPACE"/"$COMPONENT":"$VERSION" --name="$cluster" + done +} + +# Install remote components function +# Parameters: +# $1: consumer JSON tmp file +# $2: provider JSON tmp file +# $3: local repositories boolean +# $4: local resource manager boolean +# Return: none +function install_components() { + + unset clusters + declare -A clusters + + # Get consumer JSON tmp file from parameter + consumers_json=$1 + + # Get provider JSON tmp file from parameter + providers_json=$2 + + # Get the remote boolean from parameters + local_repositories=$3 + + # Get the local resource manager installation boolean from parameters + local_resource_manager=$4 + + helm repo add fluidos https://fluidos-project.github.io/node/ + + consumer_node_port=30000 + provider_node_port=30001 + + # Read the results from the files + while IFS= read -r line; do + echo + name=$(echo "$line" | cut -d: -f1) + info=$(echo "$line" | cut -d: -f2-) + clusters["$name"]=$info + done < "$consumers_json" + + while IFS= read -r line; do + name=$(echo "$line" | cut -d: -f1) + info=$(echo "$line" | cut -d: -f2-) + clusters["$name"]=$info + done < "$providers_json" + + # Print the clusters + for cluster in "${!clusters[@]}"; do + echo "Cluster: $cluster" + echo "Value: ${clusters[$cluster]}" + done + + if [ "$local_repositories" == "true" ]; then + unset COMPONENT_MAP + declare -A COMPONENT_MAP + COMPONENT_MAP["rear-controller"]="rearController.imageName" + COMPONENT_MAP["rear-manager"]="rearManager.imageName" + COMPONENT_MAP["local-resource-manager"]="localResourceManager.imageName" + # Build the image name using the username + IMAGE_SET_STRING="" + DOCKER_USERNAME="fluidoscustom" + VERSION="0.0.1" + for component in rear-controller rear-manager local-resource-manager; do + helm_key="${COMPONENT_MAP[$component]}" + IMAGE_SET_STRING="$IMAGE_SET_STRING --set $helm_key=$DOCKER_USERNAME/$component" + # Build and load the docker image + ( + build_and_load $component $DOCKER_USERNAME $VERSION + ) & + # Save the PID of the process + pids+=($!) + done + + # Wait for each process and if any of them fails, generates a trap to be captured, which kills all the processes and exits + for pid in "${pids[@]}"; do + wait "$pid" || handle_error + echo "Process $pid finished" + done + + # Reset the pids array + pids=() + fi + + # Iterate over the clusters + for cluster in "${!clusters[@]}"; do + + ( + echo "Cluster is: $cluster" + echo "Cluster value is: ${clusters[$cluster]}" + + # Create list of providers ip taking all the clusters controlplane IPs from the map and put it ina string separated by commas + for provider in "${!clusters[@]}"; do + # Check if the cluster is not the current one + # Check if the cluster is a provider + cluster_role=$(jq -r '.role' <<< "${clusters[$provider]}") + # Print cluster role + echo "Cluster role is: $cluster_role" + if [ "$provider" != "$cluster" ] && [ "$cluster_role" == "provider" ]; then + # Print the specific cluster informations + echo "Cluster: $provider" + echo "Value: ${clusters[$provider]}" + ip_value="${clusters[$provider]}" + ip=$(jq -r '.ip' <<< "$ip_value") + # Add the provider port to the IP + ip="$ip:$provider_node_port" + + if [ -z "${providers_ips[$provider]}" ]; then + providers_ips[$cluster]="$ip" + else + providers_ips[$cluster]="${providers_ips[$cluster]},$ip" + fi + fi + done + + # Set the KUBECONFIG environment variable taking the value + export KUBECONFIG + KUBECONFIG=$(echo "${clusters[$cluster]}" | jq -r '.kubeconfig') + + echo "The KUBECONFIG is $KUBECONFIG" + + # Apply the metrics-server + kubectl apply -f "$SCRIPT_DIR"/../../quickstart/utils/metrics-server.yaml --kubeconfig "$KUBECONFIG" + + # Wait for the metrics-server to be ready + echo "Waiting for metrics-server to be ready" + kubectl wait --for=condition=ready pod -l k8s-app=metrics-server -n kube-system --timeout=300s --kubeconfig "$KUBECONFIG" + + # Decide value file to use based on the role of the cluster + if [ "$(jq -r '.role' <<< "${clusters[$cluster]}")" == "consumer" ]; then + # Check if local resouce manager is enabled + if [ "$local_resource_manager" == "true" ]; then + value_file="$SCRIPT_DIR/../../quickstart/utils/consumer-values.yaml" + else + value_file="$SCRIPT_DIR/../../quickstart/utils/consumer-values-nolrm.yaml" + fi + # Get cluster IP and port + ip_value="${clusters[$cluster]}" + ip=$(jq -r '.ip' <<< "$ip_value") + port=$consumer_node_port + else + # Check if local resouce manager is enabled + if [ "$local_resource_manager" == "true" ]; then + value_file="$SCRIPT_DIR/../../quickstart/utils/provider-values.yaml" + else + value_file="$SCRIPT_DIR/../../quickstart/utils/provider-values-nolrm.yaml" + fi + # Get cluster IP and port + ip_value="${clusters[$cluster]}" + ip=$(jq -r '.ip' <<< "$ip_value") + port=$provider_node_port + fi + + # Install the node Helm chart + # The installation set statically all the other nodes as providers and the current node as the consumer + echo "Installing node Helm chart in cluster $cluster" + # If the installation does not use remote repository, the image is used the one built locally + if [ "$local_repositories" == "true" ]; then + # If the installation does not use remote repository, the CRDs are applied + kubectl apply -f "$SCRIPT_DIR"/../../deployments/node/crds --kubeconfig "$KUBECONFIG" + echo "Installing local repositories in cluster $cluster with local resource manager" + # Execute command + # shellcheck disable=SC2086 + helm upgrade --install node $SCRIPT_DIR/../../deployments/node \ + -n fluidos --create-namespace -f $value_file $IMAGE_SET_STRING \ + --set tag=$VERSION \ + --set "networkManager.configMaps.nodeIdentity.ip=$ip:$port" \ + --set "networkManager.configMaps.providers.local=${providers_ips[$cluster]}" \ + --kubeconfig $KUBECONFIG + else + echo "Installing remote repositories in cluster $cluster with local resource manager" + helm install node fluidos/node -n fluidos --create-namespace -f "$value_file" \ + --set "networkManager.configMaps.nodeIdentity.ip=$ip:$port" \ + --set "networkManager.configMaps.providers.local=${providers_ips[$cluster]}" \ + --kubeconfig "$KUBECONFIG" + fi + + echo "Installing LIQO in cluster $cluster" + liqoctl install kind \ + --cluster-name "$cluster" \ + --set controllerManager.config.resourcePluginAddress=node-rear-controller-grpc.fluidos:2710 \ + --set controllerManager.config.enableResourceEnforcement=true \ + --kubeconfig "$KUBECONFIG" + ) & + + # Save the PID of the process + pids+=($!) + + done + + # Wait for each process and if any of them fails, generates a trap to be captured, which kills all the processes and exits + for pid in "${pids[@]}"; do + wait "$pid" || handle_error + echo "Process $pid finished" + done +} \ No newline at end of file diff --git a/tools/scripts/requirements.sh b/tools/scripts/requirements.sh new file mode 100644 index 0000000..dad80bf --- /dev/null +++ b/tools/scripts/requirements.sh @@ -0,0 +1,203 @@ +#!/usr/bin/bash + +SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" + +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/utils.sh + +# Install KIND function +function install_kind() { + print_title "Install kind..." + # Check AMD64 or ARM64 + ARCH=$(uname -m) + if [ "$ARCH" == "x86_64" ]; then + ARCH="amd64" + elif [ "$ARCH" == "aarch64" ]; then + ARCH="arm64" + else + echo "Unsupported architecture." + exit 1 + fi + # Install kind if AMD64 + if [ "$ARCH" == "amd64" ]; then + echo "Install kind AMD64..." + [ "$(uname -m)" = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.21.0/kind-linux-amd64 + chmod +x kind + sudo mv kind /usr/local/bin/kind + elif [ "$ARCH" == "arm64" ]; then + echo "Install kind ARM64..." + [ "$(uname -m)" = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.21.0/kind-linux-arm64 + chmod +x kind + sudo mv kind /usr/local/bin/kind + fi + print_title "Kind installed successfully." +} + +# Install docker function +function install_docker() { + print_title "Install docker..." + # Add Docker's official GPG key: + sudo apt-get update + sudo apt-get install ca-certificates curl + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + # Add the repository to Apt sources: + # shellcheck disable=SC1091 + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + print_title "Docker installed successfully." +} + +# Check docker function +function check_docker() { + print_title "Check docker..." + if ! docker -v; then + echo "Please install docker first." + return 1 + fi +} + +# Install Kubectl function +function install_kubectl() { + print_title "Install kubectl..." + # Check AMD64 or ARM64 + ARCH=$(uname -m) + if [ "$ARCH" == "x86_64" ]; then + ARCH="amd64" + elif [ "$ARCH" == "aarch64" ]; then + ARCH="arm64" + else + echo "Unsupported architecture." + return 1 + fi + # Install kubectl if AMD64 + if [ "$ARCH" == "amd64" ]; then + echo "Install kubectl AMD64..." + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + elif [ "$ARCH" == "arm64" ]; then + echo "Install kubectl ARM64..." + curl -LO "https://dl.k8s.io/release/v1.21.0/bin/linux/arm64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + fi + print_title "Kubectl installed successfully." +} + +# Check Kubectl function +function check_kubectl() { + print_title "Check kubectl..." + if ! kubectl version --client; then + # Ask the user if they want to install kubectl + read -r -p "Do you want to install kubectl? (y/n): " install_kubectl + if [ "$install_kubectl" == "y" ]; then + install_kubectl + else + echo "Please install kubectl first. Exiting..." + return 1 + fi + fi +} + +# Install Helm function +function install_helm() { + print_title "Install helm..." + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh + print_title "Helm installed successfully." +} + +# Check Helm function +function check_helm() { + print_title "Check helm..." + helm version + if ! helm version; then + # Ask the user if they want to install helm + read -r -p "Do you want to install helm? (y/n): " install_helm + if [ "$install_helm" == "y" ]; then + install_helm + else + echo "Please install helm first. Exiting..." + exit 1 + fi + fi +} + +# Install liqoctl function +function install_liqoctl() { + print_title "Install liqo..." + # Check AMD64 or ARM64 + ARCH=$(uname -m) + if [ "$ARCH" == "x86_64" ]; then + ARCH="amd64" + elif [ "$ARCH" == "aarch64" ]; then + ARCH="arm64" + else + echo "Unsupported architecture." + exit 1 + fi + # Install liqoctl if AMD64 + if [ "$ARCH" == "amd64" ]; then + echo "Install liqoctl AMD64..." + curl --fail -LS "https://github.com/liqotech/liqo/releases/download/v0.10.1/liqoctl-linux-amd64.tar.gz" | tar -xz + sudo install -o root -g root -m 0755 liqoctl /usr/local/bin/liqoctl + elif [ "$ARCH" == "arm64" ]; then + echo "Install liqoctl ARM64..." + curl --fail -LS "https://github.com/liqotech/liqo/releases/download/v0.10.1/liqoctl-linux-arm64.tar.gz" | tar -xz + sudo install -o root -g root -m 0755 liqoctl /usr/local/bin/liqoctl + fi + print_title "Liqo installed successfully." +} + +# Check liqoctl function +function check_liqoctl() { + print_title "Check liqoctl..." + if ! liqoctl version --client; then + echo "Please install liqoctl first." + # Ask the user if they want to install liqoctl + read -r -p "Do you want to install liqoctl? (y/n): " install_liqoctl + if [ "$install_liqoctl" == "y" ]; then + install_liqo + else + echo "LIQO is required to continue. Exiting..." + exit 1 + fi + fi +} + +# Install jq function +function install_jq() { + print_title "Install jq..." + sudo apt-get install jq + print_title "jq installed successfully." +} + +# Check jq function +function check_jq() { + if ! jq --version; then + # Ask the user if they want to install jq + read -r -p "Do you want to install jq? (y/n): " install_jq + if [ "$install_jq" == "y" ]; then + install_jq + else + echo "Please install jq first. Exiting..." + exit 1 + fi + fi +} + +# Check all the tools +function check_tools() { + print_title "Check all the tools..." + check_jq + check_docker + check_kubectl + check_helm + check_liqoctl +} \ No newline at end of file diff --git a/tools/scripts/setup.sh b/tools/scripts/setup.sh new file mode 100644 index 0000000..5882ca1 --- /dev/null +++ b/tools/scripts/setup.sh @@ -0,0 +1,166 @@ +#!/usr/bin/bash + +# Enable job control +set -m + +# Set traps to handle errors +trap 'handle_error' ERR +# Set trap to handle exit +trap 'handle_exit' INT + + +SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" + +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/requirements.sh +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/utils.sh +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/environment.sh +# shellcheck disable=SC1091 +source "$SCRIPT_DIR"/installation.sh + +# Tmp consumer JSON file +consumers_json="$SCRIPT_DIR/fluidos-consumers-clusters.json" + +# Tmp provider JSON file +providers_json="$SCRIPT_DIR/fluidos-providers-clusters.json" + + +# FLUIDOS node installer greetings into the terminal +print_title "Welcome to the FLUIDOS node installer" + +echo "We'll now run the installation process for the FLUIDOS node." + +# Ask the user what type of enviroment they want to use/create +# Options are: +# 1. Use demo KIND enviroment (one consumer and one provider) +# 2. Use a custom KIND enviroment with n clusters (half consumer, half provider) +# 3. Use personal Kubernetes clusters through KUBECONFIG files (not supported yet) +read -r -p "What type of environment do you want to use? / +1. Use demo KIND environment (one consumer and one provider) / +2. Use a custom KIND environment with n consumer and m provides / +3. Use personal Kubernetes clusters through KUBECONFIG files / +Please enter the number of the option you want to use: + " environment_type + +# Check if the input is a number +if ! [[ $environment_type =~ ^[0-9]+$ ]]; then + echo "Please enter a number." + return 1 +fi + +# Ask the user if they want to use local repositories or the public ones +read -r -p "Do you want to use local repositories? [y/n] " local_repositories + +# Check if the input is y or n +if [ "$local_repositories" == "y" ]; then + # If the enviroment is Kubernetes cluster, the user can't use local repositories + if [ "$environment_type" -eq 3 ]; then + # Option not available at the moment + echo "Option not available at the moment." + echo "You can't use local repositories with a personal Kubernetes cluster." + read -r -p "Press any key to continue..." + return 0 + fi + local_repositories=true +elif [ "$local_repositories" == "n" ]; then + local_repositories=false +else + echo "Invalid option." + return 1 +fi + +# Ask the user if they want to use a local resource manager +read -r -p "Do you want to use a local resource manager? [y/n] " local_resource_manager + +# Check if the input is y or n +if [ "$local_resource_manager" == "y" ]; then + local_resource_manager=true +elif [ "$local_resource_manager" == "n" ]; then + local_resource_manager=false +else + echo "Invalid option." + return 1 +fi + +# Check requirements with function check_tools from requirements.sh +check_tools + +echo "All the tools are installed." + +# Check if the input is 1, 2 or 3 +if [ "$environment_type" -eq 1 ]; then + environment_type="customkind" + # Call create_kind clusters with parameters and save return value into clusters variable + create_kind_clusters "$consumers_json" "$providers_json" $environment_type 1 1 +elif [ "$environment_type" -eq 2 ]; then + environment_type="customkind" + # Ask the user how many consumer and provider clusters they want + read -r -p "How many consumer clusters do you want? " consumer_clusters + read -r -p "How many provider clusters do you want? " provider_clusters + + # Check if the input is a number + if ! [[ $consumer_clusters =~ ^[0-9]+$ ]] || ! [[ $provider_clusters =~ ^[0-9]+$ ]]; then + echo "Please enter a number." + return 1 + fi + + # Call create_kind clusters with parameters and save return value into clusters variable + create_kind_clusters "$consumers_json" "$providers_json" $environment_type "$consumer_clusters" "$provider_clusters" +elif [ "$environment_type" -eq 3 ]; then + get_clusters "$consumers_json" "$providers_json" +else + echo "Invalid option." + return 1 +fi + +# FLUIDOS node installation +install_components "$consumers_json" "$providers_json" $local_repositories $local_resource_manager + +print_title "Installation completed successfully" + +# Print KUBECONFIG files for each cluster +echo "KUBECONFIG files for each cluster:" + +# Create cluster variable +unset clusters +declare -A clusters + +print_title "Consumer Clusters" +# Read consumers +while IFS= read -r line; do + echo + name=$(echo "$line" | cut -d: -f1) + info=$(echo "$line" | cut -d: -f2-) + clusters["$name"]=$info +done < "$consumers_json" + +# Print KUBECONFIG files for each consumer cluster +for cluster in "${!clusters[@]}"; do + KUBECONFIG=$(echo "${clusters[$cluster]}" | jq -r '.kubeconfig') + echo "$cluster: $KUBECONFIG" +done + +unset clusters +declare -A clusters + +print_title "Provider Clusters" + +# Read consumers +while IFS= read -r line; do + echo + name=$(echo "$line" | cut -d: -f1) + info=$(echo "$line" | cut -d: -f2-) + clusters["$name"]=$info +done < "$providers_json" + +# Print KUBECONFIG files for each consumer cluster +for cluster in "${!clusters[@]}"; do + KUBECONFIG=$(echo "${clusters[$cluster]}" | jq -r '.kubeconfig') + echo "$cluster: $KUBECONFIG" +done + +rm "$consumers_json" +rm "$providers_json" \ No newline at end of file diff --git a/tools/scripts/utils.sh b/tools/scripts/utils.sh new file mode 100644 index 0000000..933cbf9 --- /dev/null +++ b/tools/scripts/utils.sh @@ -0,0 +1,14 @@ +#!/usr/bin/bash + +# Function to print variable title +function print_title() { + local title="$1" + local line="--------------------------------------------------" + local padding=$(( ( ${#line} - ${#title} ) / 2 )) + local padded_title + padded_title=$(printf "%${padding}s%s%${padding}s" "" "${title^^}" "") + + echo "$line" + echo "$padded_title" + echo "$line" +} \ No newline at end of file