diff --git a/Makefile b/Makefile index 2cef8ca..a9d043f 100644 --- a/Makefile +++ b/Makefile @@ -48,6 +48,7 @@ help: ## Display this help. .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) rbac:roleName=non-admin-controller-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + sed -i 's/Velero backup/NonAdminBackup/' ./config/crd/bases/nac.oadp.openshift.io_nonadminrestores.yaml .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. diff --git a/README.md b/README.md index e98f34d..5c5fe0d 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,8 @@ To use NAC functionality: ``` Check the application was successful deployed by accessing its route. + + Create and update items in application UI, to later check if application was successfully restored. - create NonAdminBackup For example, use one of the sample NonAdminBackup available in `hack/samples/backups/` folder, by running @@ -47,7 +49,28 @@ To use NAC functionality: | oc create -f - ``` - - TODO NonAdminRestore + - delete sample application + + For example, delete one of the sample applications available in `hack/samples/apps/` folder, by running + ```sh + oc process -f ./hack/samples/apps/ \ + -p NAMESPACE= \ + | oc delete -f - + ``` + + Check that application was successful deleted by accessing its route. + - create NonAdminRestore + + For example, use one of the sample NonAdminRestore available in `hack/samples/restores/` folder, by running + ```sh + oc process -f ./hack/samples/restores/ \ + -p NAMESPACE= \ + -p NAME= \ + | oc create -f - + ``` + + + After NonAdminRestore completes, check if the application was successful restored by accessing its route and seeing its items in application UI. ## Contributing diff --git a/api/v1alpha1/nonadminrestore_types.go b/api/v1alpha1/nonadminrestore_types.go index a9935dc..73662b0 100644 --- a/api/v1alpha1/nonadminrestore_types.go +++ b/api/v1alpha1/nonadminrestore_types.go @@ -17,23 +17,30 @@ limitations under the License. package v1alpha1 import ( + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // NonAdminRestoreSpec defines the desired state of NonAdminRestore type NonAdminRestoreSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file + // Specification for a Velero restore. + // +kubebuilder:validation:Required + RestoreSpec *velerov1api.RestoreSpec `json:"restoreSpec,omitempty"` + // TODO add test that NAR can not be created without restoreSpec or restoreSpec.backupName + // TODO need to investigate restoreSpec.namespaceMapping, depends on how NAC tracks the namespace access per user - // Foo is an example field of NonAdminRestore. Edit nonadminrestore_types.go to remove/update - Foo string `json:"foo,omitempty"` + // TODO NonAdminRestore log level, by default TODO. + // +optional + // +kubebuilder:validation:Enum=trace;debug;info;warning;error;fatal;panic + LogLevel string `json:"logLevel,omitempty"` + // TODO ALSO ADD TEST FOR DIFFERENT LOG LEVELS } // NonAdminRestoreStatus defines the observed state of NonAdminRestore type NonAdminRestoreStatus struct { + // TODO https://github.com/migtools/oadp-non-admin/pull/23 + // TODO https://github.com/migtools/oadp-non-admin/pull/13 + Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ace977b..b9665a8 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -137,7 +137,7 @@ func (in *NonAdminRestore) DeepCopyInto(out *NonAdminRestore) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -194,6 +194,11 @@ func (in *NonAdminRestoreList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NonAdminRestoreSpec) DeepCopyInto(out *NonAdminRestoreSpec) { *out = *in + if in.RestoreSpec != nil { + in, out := &in.RestoreSpec, &out.RestoreSpec + *out = new(v1.RestoreSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonAdminRestoreSpec. diff --git a/cmd/main.go b/cmd/main.go index 8dec830..d047639 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -98,7 +98,8 @@ func main() { TLSOpts: tlsOpts, }) - if len(constant.OadpNamespace) == 0 { + // TODO create get function in common :question: + if len(os.Getenv(constant.NamespaceEnvVar)) == 0 { setupLog.Error(fmt.Errorf("%v environment variable is empty", constant.NamespaceEnvVar), "environment variable must be set") os.Exit(1) } diff --git a/config/crd/bases/nac.oadp.openshift.io_nonadminrestores.yaml b/config/crd/bases/nac.oadp.openshift.io_nonadminrestores.yaml index dabd110..434252b 100644 --- a/config/crd/bases/nac.oadp.openshift.io_nonadminrestores.yaml +++ b/config/crd/bases/nac.oadp.openshift.io_nonadminrestores.yaml @@ -39,10 +39,410 @@ spec: spec: description: NonAdminRestoreSpec defines the desired state of NonAdminRestore properties: - foo: - description: Foo is an example field of NonAdminRestore. Edit nonadminrestore_types.go - to remove/update + logLevel: + description: TODO NonAdminRestore log level, by default TODO. + enum: + - trace + - debug + - info + - warning + - error + - fatal + - panic type: string + restoreSpec: + description: Specification for a Velero restore. + properties: + backupName: + description: |- + BackupName is the unique name of the NonAdminBackup to restore + from. + type: string + excludedNamespaces: + description: |- + ExcludedNamespaces contains a list of namespaces that are not + included in the restore. + items: + type: string + nullable: true + type: array + excludedResources: + description: |- + ExcludedResources is a slice of resource names that are not + included in the restore. + items: + type: string + nullable: true + type: array + existingResourcePolicy: + description: ExistingResourcePolicy specifies the restore behavior + for the Kubernetes resource to be restored + nullable: true + type: string + hooks: + description: Hooks represent custom behaviors that should be executed + during or post restore. + properties: + resources: + items: + description: |- + RestoreResourceHookSpec defines one or more RestoreResrouceHooks that should be executed based on + the rules defined for namespaces, resources, and label selector. + properties: + excludedNamespaces: + description: ExcludedNamespaces specifies the namespaces + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + excludedResources: + description: ExcludedResources specifies the resources + to which this hook spec does not apply. + items: + type: string + nullable: true + type: array + includedNamespaces: + description: |- + IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + to all namespaces. + items: + type: string + nullable: true + type: array + includedResources: + description: |- + IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + to all resources. + items: + type: string + nullable: true + type: array + labelSelector: + description: LabelSelector, if specified, filters the + resources to which this hook spec applies. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: Name is the name of this hook. + type: string + postHooks: + description: PostHooks is a list of RestoreResourceHooks + to execute during and after restoring a resource. + items: + description: RestoreResourceHook defines a restore + hook for a resource. + properties: + exec: + description: Exec defines an exec restore hook. + properties: + command: + description: Command is the command and arguments + to execute from within a container after + a pod has been restored. + items: + type: string + minItems: 1 + type: array + container: + description: |- + Container is the container in the pod where the command should be executed. If not specified, + the pod's first container is used. + type: string + execTimeout: + description: |- + ExecTimeout defines the maximum amount of time Velero should wait for the hook to complete before + considering the execution a failure. + type: string + onError: + description: OnError specifies how Velero + should behave if it encounters an error + executing this hook. + enum: + - Continue + - Fail + type: string + waitTimeout: + description: |- + WaitTimeout defines the maximum amount of time Velero should wait for the container to be Ready + before attempting to run the command. + type: string + required: + - command + type: object + init: + description: Init defines an init restore hook. + properties: + initContainers: + description: InitContainers is list of init + containers to be added to a pod during its + restore. + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + x-kubernetes-preserve-unknown-fields: true + timeout: + description: Timeout defines the maximum amount + of time Velero should wait for the initContainers + to complete. + type: string + type: object + type: object + type: array + required: + - name + type: object + type: array + type: object + includeClusterResources: + description: |- + IncludeClusterResources specifies whether cluster-scoped resources + should be included for consideration in the restore. If null, defaults + to true. + nullable: true + type: boolean + includedNamespaces: + description: |- + IncludedNamespaces is a slice of namespace names to include objects + from. If empty, all namespaces are included. + items: + type: string + nullable: true + type: array + includedResources: + description: |- + IncludedResources is a slice of resource names to include + in the restore. If empty, all resources in the backup are included. + items: + type: string + nullable: true + type: array + itemOperationTimeout: + description: |- + ItemOperationTimeout specifies the time used to wait for RestoreItemAction operations + The default value is 1 hour. + type: string + labelSelector: + description: |- + LabelSelector is a metav1.LabelSelector to filter with + when restoring individual objects from the backup. If empty + or nil, all objects are included. Optional. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceMapping: + additionalProperties: + type: string + description: |- + NamespaceMapping is a map of source namespace names + to target namespace names to restore into. Any source + namespaces not included in the map will be restored into + namespaces of the same name. + type: object + orLabelSelectors: + description: |- + OrLabelSelectors is list of metav1.LabelSelector to filter with + when restoring individual objects from the backup. If multiple provided + they will be joined by the OR operator. LabelSelector as well as + OrLabelSelectors cannot co-exist in restore request, only one of them + can be used + items: + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nullable: true + type: array + preserveNodePorts: + description: PreserveNodePorts specifies whether to restore old + nodePorts from backup. + nullable: true + type: boolean + resourceModifier: + description: ResourceModifier specifies the reference to JSON + resource patches that should be applied to resources before + restoration. + nullable: true + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + restorePVs: + description: |- + RestorePVs specifies whether to restore all included + PVs from snapshot + nullable: true + type: boolean + restoreStatus: + description: |- + RestoreStatus specifies which resources we should restore the status + field. If nil, no objects are included. Optional. + nullable: true + properties: + excludedResources: + description: ExcludedResources specifies the resources to + which will not restore the status. + items: + type: string + nullable: true + type: array + includedResources: + description: |- + IncludedResources specifies the resources to which will restore the status. + If empty, it applies to all resources. + items: + type: string + nullable: true + type: array + type: object + scheduleName: + description: |- + ScheduleName is the unique name of the Velero schedule to restore + from. If specified, and BackupName is empty, Velero will restore + from the most recent successful backup created from this schedule. + type: string + required: + - backupName + type: object type: object status: description: NonAdminRestoreStatus defines the observed state of NonAdminRestore diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index f013316..58ffd37 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -67,3 +67,14 @@ rules: - patch - update - watch +- apiGroups: + - velero.io + resources: + - restores + verbs: + - create + - get + - list + - patch + - update + - watch diff --git a/config/samples/nac_v1alpha1_nonadminrestore.yaml b/config/samples/nac_v1alpha1_nonadminrestore.yaml index 80c51d5..c8e2e2f 100644 --- a/config/samples/nac_v1alpha1_nonadminrestore.yaml +++ b/config/samples/nac_v1alpha1_nonadminrestore.yaml @@ -9,4 +9,5 @@ metadata: app.kubernetes.io/created-by: oadp-operator name: nonadminrestore-sample spec: - # TODO(user): Add fields here + restoreSpec: + backupName: nonadminbackup-sample diff --git a/docs/non_admin_user.md b/docs/non_admin_user.md index 4902541..182b5c0 100644 --- a/docs/non_admin_user.md +++ b/docs/non_admin_user.md @@ -40,28 +40,48 @@ Choose one of the authentication method sections to follow. ``` - Ensure non admin user have appropriate permissions in its namespace, i.e., non admin user have editor roles for the following objects - `nonadminbackups.nac.oadp.openshift.io` + - `nonadminrestores.nac.oadp.openshift.io` For example ```yaml - # config/rbac/nonadminbackup_editor_role.yaml - - apiGroups: - - nac.oadp.openshift.io - resources: - - nonadminbackups - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - nac.oadp.openshift.io - resources: - - nonadminbackups/status - verbs: - - get + # config/rbac/nonadminbackup_editor_role.yaml + - apiGroups: + - nac.oadp.openshift.io + resources: + - nonadminbackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - nac.oadp.openshift.io + resources: + - nonadminbackups/status + verbs: + - get + # config/rbac/nonadminrestore_editor_role.yaml + - apiGroups: + - nac.oadp.openshift.io + resources: + - nonadminrestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - nac.oadp.openshift.io + resources: + - nonadminrestores/status + verbs: + - get ``` For example, make non admin user have `admin` ClusterRole permissions on its namespace ```sh diff --git a/go.mod b/go.mod index 00d9950..7eb4612 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/onsi/gomega v1.30.0 github.com/stretchr/testify v1.8.4 github.com/vmware-tanzu/velero v1.12.0 + k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 sigs.k8s.io/controller-runtime v0.17.0 @@ -65,7 +66,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.0 // indirect k8s.io/apiextensions-apiserver v0.29.0 // indirect k8s.io/component-base v0.29.0 // indirect k8s.io/klog/v2 v2.110.1 // indirect diff --git a/hack/samples/restores/common.yaml b/hack/samples/restores/common.yaml new file mode 100644 index 0000000..7b47631 --- /dev/null +++ b/hack/samples/restores/common.yaml @@ -0,0 +1,23 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: sample-nonadminrestore +objects: + - apiVersion: nac.oadp.openshift.io/v1alpha1 + kind: NonAdminRestore + metadata: + name: nonadminrestore-sample-${SUFFIX} + namespace: ${NAMESPACE} + spec: + restoreSpec: + backupName: ${NAME} +parameters: + - description: NonAdminRestore suffix + from: '[a-z0-9]{8}' + generate: expression + name: SUFFIX + - description: NonAdminRestore namespace + name: NAMESPACE + value: mysql-persistent + - description: NonAdminBackup name + name: NAME diff --git a/internal/controller/nonadminrestore_controller.go b/internal/controller/nonadminrestore_controller.go index 964c774..cd21d52 100644 --- a/internal/controller/nonadminrestore_controller.go +++ b/internal/controller/nonadminrestore_controller.go @@ -18,43 +18,95 @@ package controller import ( "context" + "fmt" + "os" + "github.com/go-logr/logr" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + "github.com/migtools/oadp-non-admin/internal/common/constant" ) // NonAdminRestoreReconciler reconciles a NonAdminRestore object type NonAdminRestoreReconciler struct { client.Client Scheme *runtime.Scheme + Logger logr.Logger } // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminrestores,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminrestores/status,verbs=get;update;patch // +kubebuilder:rbac:groups=nac.oadp.openshift.io,resources=nonadminrestores/finalizers,verbs=update +// +kubebuilder:rbac:groups=velero.io,resources=restores,verbs=get;list;watch;create;update;patch + // Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the NonAdminRestore object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.17.0/pkg/reconcile -func (*NonAdminRestoreReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - - // TODO(user): your logic here +// move the current state of the NonAdminRestore to the desired state. +func (r *NonAdminRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Logger = log.FromContext(ctx) + logger := r.Logger.WithValues("NonAdminRestore", req.NamespacedName) + + logger.Info("TODO") + + nonAdminRestore := nacv1alpha1.NonAdminRestore{} + err := r.Get(ctx, req.NamespacedName, &nonAdminRestore) + if err != nil { + return ctrl.Result{}, err + } + + err = r.validateSpec(ctx, req, nonAdminRestore.Spec) + if err != nil { + return ctrl.Result{}, err + } + + // TODO try to create Velero Restore return ctrl.Result{}, nil } -// SetupWithManager sets up the controller with the Manager. +// TODO remove functions params +func (r *NonAdminRestoreReconciler) validateSpec(ctx context.Context, req ctrl.Request, objectSpec nacv1alpha1.NonAdminRestoreSpec) error { + if len(objectSpec.RestoreSpec.ScheduleName) > 0 { + return fmt.Errorf("spec.restoreSpec.scheduleName field is not allowed in NonAdminRestore") + } + + // TODO nonAdminBackup respect restricted fields + + nonAdminBackupName := objectSpec.RestoreSpec.BackupName + nonAdminBackup := &nacv1alpha1.NonAdminBackup{} + err := r.Get(ctx, types.NamespacedName{Namespace: req.Namespace, Name: nonAdminBackupName}, nonAdminBackup) + if err != nil { + if errors.IsNotFound(err) { + // TODO add this error message to NonAdminRestore status + return fmt.Errorf("invalid spec.restoreSpec.backupName: NonAdminBackup '%s' does not exist in namespace %s", nonAdminBackupName, req.Namespace) + } + return err + } + // TODO nonAdminBackup has necessary labels (NAB controller job :question:) + // TODO nonAdminBackup is in complete state :question:!!!! + + // TODO create get function in common :question: + oadpNamespace := os.Getenv(constant.NamespaceEnvVar) + + veleroBackupName := nonAdminBackup.Labels["naoSei"] + veleroBackup := &velerov1api.Backup{} + err = r.Get(ctx, types.NamespacedName{Namespace: oadpNamespace, Name: veleroBackupName}, veleroBackup) + if err != nil { + // TODO test error messages, THEY MUST BE INFORMATIVE + return err + } + + return nil +} + +// SetupWithManager sets up the NonAdminRestore controller with the Manager. func (r *NonAdminRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&nacv1alpha1.NonAdminRestore{}). diff --git a/internal/controller/nonadminrestore_controller_test.go b/internal/controller/nonadminrestore_controller_test.go index 5d6f76e..fb0b087 100644 --- a/internal/controller/nonadminrestore_controller_test.go +++ b/internal/controller/nonadminrestore_controller_test.go @@ -18,66 +18,188 @@ package controller import ( "context" + "os" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" nacv1alpha1 "github.com/migtools/oadp-non-admin/api/v1alpha1" + "github.com/migtools/oadp-non-admin/internal/common/constant" ) -var _ = ginkgo.Describe("NonAdminRestore Controller", func() { - ginkgo.Context("When reconciling a resource", func() { - const resourceName = "test-resource" +type clusterScenario struct { + namespace string + nonAdminRestore string +} - ctx := context.Background() +type nonAdminRestoreReconcileScenario struct { + restoreSpec *v1.RestoreSpec + namespace string + nonAdminRestore string + errMessage string +} - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed +func createTestNonAdminRestore(name string, namespace string, restoreSpec v1.RestoreSpec) *nacv1alpha1.NonAdminRestore { + return &nacv1alpha1.NonAdminRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: nacv1alpha1.NonAdminRestoreSpec{ + RestoreSpec: &restoreSpec, + }, + } +} + +// TODO this does not work with envtest :question: +var _ = ginkgo.Describe("Test NonAdminRestore in cluster validation", func() { + var ( + ctx = context.Background() + currentTestScenario clusterScenario + updateTestScenario = func(scenario clusterScenario) { + currentTestScenario = scenario + } + ) + + ginkgo.AfterEach(func() { + nonAdminRestore := &nacv1alpha1.NonAdminRestore{} + if k8sClient.Get( + ctx, + types.NamespacedName{ + Name: currentTestScenario.nonAdminRestore, + Namespace: currentTestScenario.namespace, + }, + nonAdminRestore, + ) == nil { + gomega.Expect(k8sClient.Delete(ctx, nonAdminRestore)).To(gomega.Succeed()) } - nonadminrestore := &nacv1alpha1.NonAdminRestore{} - - ginkgo.BeforeEach(func() { - ginkgo.By("creating the custom resource for the Kind NonAdminRestore") - err := k8sClient.Get(ctx, typeNamespacedName, nonadminrestore) - if err != nil && errors.IsNotFound(err) { - resource := &nacv1alpha1.NonAdminRestore{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. - } - gomega.Expect(k8sClient.Create(ctx, resource)).To(gomega.Succeed()) + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.namespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) + }) + + ginkgo.DescribeTable("Validation is false", + func(scenario clusterScenario) { + updateTestScenario(scenario) + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.namespace, + }, } - }) - - ginkgo.AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &nacv1alpha1.NonAdminRestore{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Cleanup the specific resource instance NonAdminRestore") - gomega.Expect(k8sClient.Delete(ctx, resource)).To(gomega.Succeed()) - }) - ginkgo.It("should successfully reconcile the resource", func() { - ginkgo.By("Reconciling the created resource") - controllerReconciler := &NonAdminRestoreReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), + gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) + + nonAdminRestore := &nacv1alpha1.NonAdminRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.nonAdminRestore, + Namespace: scenario.namespace, + }, + // Spec: nacv1alpha1.NonAdminRestoreSpec{}, } + gomega.Expect(k8sClient.Create(ctx, nonAdminRestore)).To(gomega.Not(gomega.Succeed())) + }, + ginkgo.Entry("Should NOT create NonAdminRestore without spec.restoreSpec", clusterScenario{ + namespace: "test-nonadminrestore-cluster-1", + nonAdminRestore: "test-nonadminrestore-cluster-1-cr", + }), + // TODO Should NOT create NonAdminRestore without spec.restoreSpec.backupName + ) +}) + +var _ = ginkgo.Describe("Test NonAdminRestore Reconcile function", func() { + var ( + ctx = context.Background() + currentTestScenario nonAdminRestoreReconcileScenario + updateTestScenario = func(scenario nonAdminRestoreReconcileScenario) { + currentTestScenario = scenario + } + ) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. - }) + ginkgo.AfterEach(func() { + gomega.Expect(os.Unsetenv(constant.NamespaceEnvVar)).To(gomega.Succeed()) + + nonAdminRestore := &nacv1alpha1.NonAdminRestore{} + if k8sClient.Get( + ctx, + types.NamespacedName{ + Name: currentTestScenario.nonAdminRestore, + Namespace: currentTestScenario.namespace, + }, + nonAdminRestore, + ) == nil { + gomega.Expect(k8sClient.Delete(ctx, nonAdminRestore)).To(gomega.Succeed()) + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentTestScenario.namespace, + }, + } + gomega.Expect(k8sClient.Delete(ctx, namespace)).To(gomega.Succeed()) }) + + ginkgo.DescribeTable("Reconcile is false", + func(scenario nonAdminRestoreReconcileScenario) { + updateTestScenario(scenario) + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scenario.namespace, + }, + } + gomega.Expect(k8sClient.Create(ctx, namespace)).To(gomega.Succeed()) + + nonAdminRestore := createTestNonAdminRestore(scenario.nonAdminRestore, scenario.namespace, *scenario.restoreSpec) + gomega.Expect(k8sClient.Create(ctx, nonAdminRestore)).To(gomega.Succeed()) + + gomega.Expect(os.Setenv(constant.NamespaceEnvVar, "envVarValue")).To(gomega.Succeed()) + r := &NonAdminRestoreReconciler{ + Client: k8sClient, + Scheme: testEnv.Scheme, + } + result, err := r.Reconcile( + context.Background(), + reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: scenario.namespace, + Name: scenario.nonAdminRestore, + }}, + ) + + if len(scenario.errMessage) == 0 { + gomega.Expect(result).To(gomega.Equal(reconcile.Result{Requeue: false, RequeueAfter: 0})) + gomega.Expect(err).To(gomega.Not(gomega.HaveOccurred())) + } else { + gomega.Expect(result).To(gomega.Equal(reconcile.Result{Requeue: false, RequeueAfter: 0})) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err.Error()).To(gomega.ContainSubstring(scenario.errMessage)) + } + }, + ginkgo.Entry("Should NOT accept scheduleName", nonAdminRestoreReconcileScenario{ + namespace: "test-nonadminrestore-reconcile-1", + nonAdminRestore: "test-nonadminrestore-reconcile-1-cr", + errMessage: "scheduleName", + restoreSpec: &v1.RestoreSpec{ + ScheduleName: "wrong", + }, + }), + ginkgo.Entry("Should NOT accept non existing NonAdminBackup", nonAdminRestoreReconcileScenario{ + namespace: "test-nonadminrestore-reconcile-2", + nonAdminRestore: "test-nonadminrestore-reconcile-2-cr", + errMessage: "backupName", + restoreSpec: &v1.RestoreSpec{ + BackupName: "do-not-exist", + }, + }), + // TODO Should NOT accept NonAdminBackup that is not in complete state :question: + // TODO Should NOT accept non existing related Velero Backup + ) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index c200c59..54bfa6b 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -61,7 +61,7 @@ var _ = ginkgov2.BeforeSuite(func() { // Note that you must have the required binaries setup under the bin directory to perform // the tests directly. When we run make test it will be setup and used automatically. BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", - fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + fmt.Sprintf("1.28.3-%s-%s", runtime.GOOS, runtime.GOARCH)), } var err error