diff --git a/docs/i18n/en/docusaurus-plugin-content-docs/current.json b/docs/i18n/en/docusaurus-plugin-content-docs/current.json index cb273109c..f00b4afc1 100644 --- a/docs/i18n/en/docusaurus-plugin-content-docs/current.json +++ b/docs/i18n/en/docusaurus-plugin-content-docs/current.json @@ -166,5 +166,13 @@ "sidebar.docs.category.Legacy Storage to New Storage": { "message": "Legacy Storage to New Storage", "description": "The label for category Legacy Storage to New Storage in sidebar docs" + }, + "sidebar.docs.category.Data Migration": { + "message": "Data Migration", + "description": "The label for category Data Migration in sidebar docs" + }, + "sidebar.docs.category.Migration using Velero": { + "message": "Migration using Velero", + "description": "The label for category Migration using Velero in sidebar docs" } } \ No newline at end of file diff --git a/docs/main/user-guides/data-migration/migration-overview.md b/docs/main/user-guides/data-migration/migration-overview.md new file mode 100644 index 000000000..038baaf33 --- /dev/null +++ b/docs/main/user-guides/data-migration/migration-overview.md @@ -0,0 +1,34 @@ +--- +id: migration-overview +title: Migration Overview +keywords: + - Migration + - Data Migration + - Migration from OpenEBS Local PV Device to OpenEBS LVM Local PV + - Local PV Device to Local PV LVM + - Local PV Device to Local PV ZFS + - Migration from OpenEBS cStor to OpenEBS Replicated + - cStor to Replicated + - cStor to Mayastor + - Jiva to Replicated + - Jiva to Mayastor +description: This section outlines the process of migrating the legacy storage to latest storage solution. +--- + +## Migration Overview + +Data migration is the process of moving data from a source storage to a destination storage. In OpenEBS context, the users can migrate the data from legacy OpenEBS storage to the latest OpenEBS storage. + +There are different techniques/methodologies for performing data migration. Users can perform data migration within the same Kubernetes cluster or across Kubernetes clusters. The following guides outline several methodologies for migrating from legacy OpenEBS storage to latest OpenEBS storage: +- [Migration using pv-migrate Utility](../migration/migration-using-pv-migrate.md) +- [Migration using velero Utility](../migration/migration-using-velero/) + +:::info +Users of non-OpenEBS storage solutions can also use these approaches described below to migrate their data to OpenEBS storage. +::: + +## See Also + +- [Migration from Legacy Storage to Latest Storage Solution](../data-migration/migration-using-pv-migrate.md) +- [Migration for Distrubuted DB](../data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md) +- [Migration for Replicated DB](../data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md) \ No newline at end of file diff --git a/docs/main/user-guides/data-migration/migration-using-pv-migrate.md b/docs/main/user-guides/data-migration/migration-using-pv-migrate.md new file mode 100644 index 000000000..9aaf995e6 --- /dev/null +++ b/docs/main/user-guides/data-migration/migration-using-pv-migrate.md @@ -0,0 +1,309 @@ +--- +id: migration-using-pv-migrate +title: Migration from Legacy Storage to Latest Storage Solution +keywords: + - Migration + - Data Migration + - Migration from OpenEBS Local PV Device to OpenEBS LVM Local PV + - Local PV Device to Local PV LVM + - Local PV Device to Local PV ZFS + - Migration from OpenEBS cStor to OpenEBS Replicated + - cStor to Replicated + - cStor to Mayastor + - Jiva to Replicated + - Jiva to Mayastor +description: This section outlines the process of migrating the legacy storage to latest storage solution. +--- + +This section describes the process of migrating the legacy storage to latest storage solution. + +## Overview + +Data migration is the process of moving data from a source storage to a destination storage. In OpenEBS context, the users can migrate the data from legacy OpenEBS storage to the latest OpenEBS storage. + +There are different techniques/methodologies for performing data migration. Users can perform data migration within the same Kubernetes cluster or across Kubernetes clusters. The following guides outline several methodologies for migrating from legacy OpenEBS storage to latest OpenEBS storage: +- [Migration using pv-migrate Utility](#migration-using-pv-migrate) +- [Migration using velero Utility](../migration/migration-using-velero/) + +:::info +Users of non-OpenEBS storage solutions can also use these approaches described below to migrate their data to OpenEBS storage. +::: + +## Migration using pv-migrate + +In this migration process, we are using [pv-migrate](https://github.com/utkuozdemir/pv-migrate) that is a CLI tool/kubectl plugin to easily migrate the contents of one Kubernetes `PersistentVolumeClaim` to another. + +This tool is binary and can be [downloaded](https://github.com/utkuozdemir/pv-migrate/releases/download/v1.7.1/pv-migrate_v1.7.1_linux_x86_64.tar.gz) from the release section for linux/amd64. For other OS and arch, download the respective binary from the latest [release section](https://github.com/utkuozdemir/pv-migrate/releases/tag/v1.7.1). + +1. Once downloaded, untar the binary as below: + +``` +tar -xvf pv-migrate_v1.7.1_linux_x86_64.tar.gz +``` + +2. Add the binary to `PATH` or move it to `/usr/local/bin` to use the binary like any usual binary. + +``` +mv pv-migrate /usr/local/bin +``` + +The binary can be used as specified in the migrate flows. + +## Migration from Local PV Device to Local PV LVM + +:::info +.The following example describes the steps to migrate data from legacy OpenEBS Local PV Device storage to OpenEBS Local PV LVM storage. Legacy OpenEBS Local PV ZFS storage users can also use the below steps to migrate to OpenEBS Local PV LVM storage. +::: + +### Assumptions + +- Local PV Device is already deployed. +- MongoDB Standalone is deployed as below using the Local PV Device PVC. (Here, MongoDB Standalone is an example.) + +``` +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: localpv-vol +spec: + storageClassName: openebs-device + accessModes: ["ReadWriteOnce"] + volumeMode: Filesystem + resources: + requests: + storage: 5Gi +``` + +- For validation, some data has been inserted in the MongoDB as an example below: + +``` +db.admin.insertMany([{name: "Max"}, {name:"Alex"}]) + +[ + { _id: ObjectId('65eaafa01cd2b6de45285d86'), name: 'Max' }, + { _id: ObjectId('65eaafa01cd2b6de45285d87'), name: 'Alex' } +] +``` +### Steps to migrate Local PV Device to Local PV LVM + +Follow the steps below to migrate OpenEBS Local PV Device to OpenEBS Local PV LVM. + +1. [Install Local Engine](../../../quickstart-guide/installation.md) on your cluster. + +2. Create a LVM PVC of the same [configuration](../../../user-guides/local-engine-user-guide/lvm-localpv.md#configuration). + +:::info +For the LVM volume to be created, the node (where the application was deployed) needs to be same as that of where Volume Group (VG) is created. +::: + +See the example below: + +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: openebs-lvmpv +allowVolumeExpansion: true +parameters: + storage: "lvm" + volgroup: "lvmvg" +provisioner: local.csi.openebs.io +allowedTopologies: +- matchLabelExpressions: + - key: kubernetes.io/hostname + values: + - node-1-152720 +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: csi-lvmpv +spec: + storageClassName: openebs-lvmpv + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +``` + +3. Scale down the MongoDB pod. + +:::note +In your case, scale down or delete the concerned application pod. +::: + +4. Start the migration and let it complete. + +:::info +Use the correct Local PV Device PVC name that your application has. +::: + +See the example below: + +``` +pv-migrate migrate \ + --source-namespace default \ + --dest-namespace default \ + localpv-vol csi-lvmpv + +πŸš€ Starting migration +πŸ’­ Will attempt 3 strategies: mnt2, svc, lbsvc +🚁 Attempting strategy: mnt2 +πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| (3.4 GB/s) +πŸ“‚ Copying data... 0% | | [0s:0s]🧹 Cleaning up +πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| +✨ Cleanup done +βœ… Migration succeeded +``` + +5. Deploy the MongoDB application using the LVM PVC. + +6. Once the MongoDB pod is created, check the data that was persisted previously. + +``` +root@mongo-lvm-556f58cd7d-rws6l:/# mongosh -u admin -p admin123 +Current Mongosh Log ID: 65eabe0ee915a8cf7d9eee57 +Connecting to: mongodb://@127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+2.1.5 +Using MongoDB: 7.0.6 +Using Mongosh: 2.1.5 + +For mongosh info see: https://docs.mongodb.com/mongodb-shell/ + +------ + The server generated these startup warnings when booting + 2024-03-08T07:27:19.404+00:00: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem + 2024-03-08T07:27:19.747+00:00: vm.max_map_count is too low +------ + +test> db.admin.find().pretty() +[ + { _id: ObjectId('65eab75b8f5d183790d7bbd5'), name: 'Max' }, + { _id: ObjectId('65eab75b8f5d183790d7bbd6'), name: 'Alex' } +] +``` + +The migration is successful. + +The Local PV Device volumes and pools can now be removed and Local PV Device can be uninstalled. + +## Migration from cStor to Replicated + +:::info +The following example describes the steps to migrate data from legacy OpenEBS CStor storage to OpenEBS Replicated (f.k.a Mayastor) storage. Legacy OpenEBS Jiva storage users can also use the below steps to migrate to OpenEBS Replicated. +::: + +### Assumptions + +- cStor is already deployed. +- MongoDB Standalone is deployed as below using the cStor PVC. (Here, MongoDB Standalone is an example.) + +``` +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: cstor-pvc +spec: + storageClassName: cstor-csi-disk + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +``` + +- For validation, some data has been inserted in the MongoDB as an example below: + +``` +db.admin.insertMany([{name: "Max"}, {name:"Alex"}]) + +[ + { _id: ObjectId('65eaafa01cd2b6de45285d86'), name: 'Max' }, + { _id: ObjectId('65eaafa01cd2b6de45285d87'), name: 'Alex' } +] +``` +### Steps to migrate cStor to Replicated + +Follow the steps below to migrate OpenEBS cStor to OpenEBS Replicated (fka Mayastor). + +1. [Install Replicated Engine](../../../quickstart-guide/installation.md) on your cluster. + +2. Create a replicated PVC of the same [configuration](../../../user-guides/replicated-engine-user-guide/replicated-engine-deployment.md). See the example below: + +``` +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ms-volume-claim +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: mayastor-2 +``` + +3. Scale down the MongoDB pod. + +:::note +In your case, scale down or delete the concerned application pod. +::: + +4. Start the migration and let it complete. + +:::info +Use the correct cStor PVC name that your application has. +::: + +See the example below: + +``` +pv-migrate migrate \ + --source-namespace default \ + --dest-namespace default \ + cstor-pvc ms-volume-claim + +πŸš€ Starting migration +πŸ’­ Will attempt 3 strategies: mnt2, svc, lbsvc +🚁 Attempting strategy: mnt2 +πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| (2.8 GB/s) +πŸ“‚ Copying data... 0% | | [0s:0s]🧹 Cleaning up +πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| +✨ Cleanup done +βœ… Migration succeeded +``` + +5. Deploy the MongoDB application using the Replicated PVC. + +6. Once the MongoDB pod is created, check the data that was persisted previously. + +``` +root@mongo-mayastor-c7d645666-b98pc:/# mongosh -u admin -p admin123 +Current Mongosh Log ID: 65eab3877cce529ad560c3e8 +Connecting to: mongodb://@127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+2.1.5 +Using MongoDB: 7.0.6 +Using Mongosh: 2.1.5 + +For mongosh info see: https://docs.mongodb.com/mongodb-shell/ + +------ + The server generated these startup warnings when booting + 2024-03-08T06:41:42.650+00:00: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem + 2024-03-08T06:41:44.268+00:00: vm.max_map_count is too low +------ + +test> db.admin.find().pretty() +[ + { _id: ObjectId('65eaafa01cd2b6de45285d86'), name: 'Max' }, + { _id: ObjectId('65eaafa01cd2b6de45285d87'), name: 'Alex' } +] +``` + +The migration is successful. + +The cStor volume and pools can now be removed and cStor can be uninstalled. + +## See Also + + - [Migration using Velero](../data-migration/migration-using-velero/overview.md) \ No newline at end of file diff --git a/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-backup.md b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md similarity index 90% rename from docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-backup.md rename to docs/main/user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md index 33b7a9265..86a829f8f 100644 --- a/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-backup.md +++ b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md @@ -11,57 +11,50 @@ description: This section explains how to backup from cStor for Distributed DBs. In the current setup, we have a CStor cluster serving as the source, with Cassandra running as a StatefulSet, utilizing CStor volumes. -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get pods -n cassandra ``` -{% endtab %} -{% tab title="Example Output" %} -```text +**Example Output** + +``` NAME READY STATUS RESTARTS AGE cassandra-0 1/1 Running 0 6m22s cassandra-1 1/1 Running 0 4m23s cassandra-2 1/1 Running 0 2m15s ``` -{% endtab %} -{% endtabs %} -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get pvc -n cassandra ``` -{% endtab %} -{% tab title="Example Output" %} -```text +**Example Output** + +``` NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE data-cassandra-0 Bound pvc-05c464de-f273-4d04-b915-600bc434d762 3Gi RWO cstor-csi-disk 6m37s data-cassandra-1 Bound pvc-a7ac4af9-6cc9-4722-aee1-b8c9e1c1f8c8 3Gi RWO cstor-csi-disk 4m38s data-cassandra-2 Bound pvc-0980ea22-0b4b-4f02-bc57-81c4089cf55a 3Gi RWO cstor-csi-disk 2m30s ``` -{% endtab %} -{% endtabs %} -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get cvc -n openebs ``` -{% endtab %} -{% tab title="Example Output" %} -```text +**Example Output** + +``` NAME CAPACITY STATUS AGE pvc-05c464de-f273-4d04-b915-600bc434d762 3Gi Bound 6m47s pvc-0980ea22-0b4b-4f02-bc57-81c4089cf55a 3Gi Bound 2m40s pvc-a7ac4af9-6cc9-4722-aee1-b8c9e1c1f8c8 3Gi Bound 4m48s ``` -{% endtab %} -{% endtabs %} - ## Step 2: Velero Installation @@ -73,7 +66,6 @@ velero install --use-node-agent --provider gcp --plugins velero/velero-plugin-fo Verify the Velero namespace for Node Agent and Velero pods: - ``` kubectl get pods -n velero ``` @@ -84,7 +76,6 @@ In this example, we create a new database with sample data in Cassandra, a distr ![](https://hackmd.io/_uploads/ryvcoj-l6.png) - The data is distributed across all replication instances. ![](https://hackmd.io/_uploads/ryzoojZgT.png) @@ -119,4 +110,7 @@ Check the backup status, run the following command: velero get backup | grep cassandra-backup-19-09-23 ``` +## See Also +- [Migration from Legacy Storage to Latest Storage Solution](../data-migration/migration-using-pv-migrate.md) +- [Migration for Replicated DB](../data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md) \ No newline at end of file diff --git a/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-restore.md b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-restore.md similarity index 88% rename from docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-restore.md rename to docs/main/user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-restore.md index 9bd448e70..1d24cfbf0 100644 --- a/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-restore.md +++ b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-restore.md @@ -1,21 +1,21 @@ --- id: distributeddb-restore -title: Restoring Mayastor +title: Restoring to Replicated Storage keywords: - - Restoring Mayastor -description: This section explains how to Restore from cStor Backup to Mayastor for Distributed DBs. + - Restoring to Mayastor + - Restoring to Replicated Storage +description: This section explains how to Restore from cStor Backup to Replicated Storage for Distributed DBs. --- -# Steps to Restore from cStor Backup to Mayastor for Distributed DBs (Cassandra) +# Steps to Restore from cStor Backup to Replicated Storage for Distributed DBs (Cassandra) -Cassandra is a popular NoSQL database used for handling large amounts of data with high availability and scalability. In Kubernetes environments, managing and restoring Cassandra backups efficiently is crucial. In this article, we'll walk you through the process of restoring a Cassandra database in a Kubernetes cluster using Velero, and we'll change the storage class to Mayastor for improved performance. +Cassandra is a popular NoSQL database used for handling large amounts of data with high availability and scalability. In Kubernetes environments, managing and restoring Cassandra backups efficiently is crucial. In this article, we will walk you through the process of restoring a Cassandra database in a Kubernetes cluster using Velero, and we will change the storage class to Replicated Storage (f.k.a Mayastor) for improved performance. - -{% hint style="info" %} +:::info Before you begin, make sure you have the following: - Access to a Kubernetes cluster with Velero installed. - A backup of your Cassandra database created using Velero. -- Mayastor configured in your Kubernetes environment. -{% endhint %} +- Replicated Storage configured in your Kubernetes environment. +::: ## Step 1: Set Up Kubernetes Credentials and Install Velero @@ -29,7 +29,6 @@ gcloud container clusters get-credentials CLUSTER_NAME --zone ZONE --project PRO Install Velero with the necessary plugins, specifying your backup bucket, secret file, and uploader type. Make sure to replace the placeholders with your specific values: - ``` velero get backup | grep YOUR_BACKUP_NAME ``` @@ -42,7 +41,7 @@ Confirm that your Cassandra backup is available in Velero. This step ensures tha velero get backup | grep YOUR_BACKUP_NAME ``` -Check the status of the BackupStorageLocation to ensure it's available: +Check the status of the BackupStorageLocation to ensure it is available: ``` kubectl get backupstoragelocation -n velero @@ -58,38 +57,37 @@ velero restore create RESTORE_NAME --from-backup YOUR_BACKUP_NAME ## Step 4: Monitor Restore Progress -Monitor the progress of the restore operation using the below commands. -Velero initiates the restore process by creating an initialization container within the application pod. This container is responsible for restoring the volumes from the backup. As the restore operation proceeds, you can track its status, which typically transitions from **in progress** to **Completed**. - +Monitor the progress of the restore operation using the below commands: +Velero initiates the restore process by creating an initialization container within the application pod. This container is responsible for restoring the volumes from the backup. As the restore operation proceeds, you can track its status, which typically transitions from **in progress** to **Completed**. In this scenario, the storage class for the PVCs remains as `cstor-csi-disk` since these PVCs were originally imported from a cStor volume. -{% hint style="note" %} +:::note Your storage class was originally set to `cstor-csi-disk` because you imported this PVC from a cStor volume, the status might temporarily stay as **In Progress** and your PVC will be in **Pending** status. -{% endhint %} - +::: ``` velero get restore | grep RESTORE_NAME ``` + Inspect the status of the PVCs in the cassandra namespace: ``` kubectl get pvc -n cassandra ``` + ``` kubectl get pods -n cassandra ``` - -## Step 5: Back Up PVC YAML +## Step 5: Back Up PVC YAML Create a backup of the Persistent Volume Claims (PVCs) and then modify their storage class to `mayastor-single-replica`. -{% hint style="note" %} +:::note The statefulset for Cassandra will still have the `cstor-csi-disk` storage class at this point. This will be addressed in the further steps. -{% endhint %} +::: ``` kubectl get pvc -n cassandra -o yaml > cassandra_pvc_19-09.yaml @@ -216,16 +214,16 @@ Run this command to check if all the pods are running: kubectl get pods -n cassandra ``` -## Step 8: Verify Cassandra Data and StatefulSet +## Step 8: Verify Cassandra Data and StatefulSet -### Access a Cassandra pod using cqlsh and check the data +### Access a Cassandra Pod using cqlsh and Check the Data - You can use the following command to access the Cassandra pods. This command establishes a connection to the Cassandra database running on pod `cassandra-1`: ``` cqlsh -u -p cassandra-1.cassandra-headless.cassandra.svc.cluster.local 9042 ``` -- The query results should display the data you backed up from cStor. In your output, you're expecting to see the data you backed up. +- The query results should display the data you backed up from cStor. In your output, youare expecting to see the data you backed up. ``` cassandra@cqlsh> USE openebs; @@ -245,13 +243,7 @@ cassandra@cqlsh:openebs> select * from openebs.data; - After verifying the data, you can exit the Cassandra shell by typing `exit`. - - - - - - -### Modify your Cassandra StatefulSet YAML to use the mayastor-single-replica storage class +### Modify your Cassandra StatefulSet YAML to use the Replicated Storage-Single-Replica Storage Class - Before making changes to the Cassandra StatefulSet YAML, create a backup to preserve the existing configuration by running the following command: @@ -259,7 +251,8 @@ cassandra@cqlsh:openebs> select * from openebs.data; kubectl get sts cassandra -n cassandra -o yaml > cassandra_sts_backup.yaml ``` -- You can modify the Cassandra StatefulSet YAML to change the storage class to `mayastor-single-replica`. Here's the updated YAML: +- You can modify the Cassandra StatefulSet YAML to change the storage class to `mayastor-single-replica`. Here is the updated YAML: + ``` apiVersion: apps/v1 kind: StatefulSet @@ -314,9 +307,7 @@ spec: kubectl apply -f cassandra_sts_modified.yaml ``` - - -### Delete the Cassandra StatefulSet with the --cascade=orphan flag +### Delete the Cassandra StatefulSet with the --cascade=orphan Flag Delete the Cassandra StatefulSet while keeping the pods running without controller management: @@ -324,9 +315,7 @@ Delete the Cassandra StatefulSet while keeping the pods running without controll kubectl delete sts cassandra -n cassandra --cascade=orphan ``` - - -### Recreate the Cassandra StatefulSet using the updated YAML +### Recreate the Cassandra StatefulSet using the Updated YAML - Use the kubectl apply command to apply the modified StatefulSet YAML configuration file, ensuring you are in the correct namespace where your Cassandra deployment resides. Replace with the actual path to your YAML file. @@ -346,7 +335,7 @@ kubectl get sts -n cassandra kubectl get pods -n cassandra ``` +## See Also - - - +- [Migration from Legacy Storage to Latest Storage Solution](../data-migration/migration-using-pv-migrate.md) +- [Migration for Replicated DB](../data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md) \ No newline at end of file diff --git a/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-backup.md b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md similarity index 83% rename from docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-backup.md rename to docs/main/user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md index 276be9d3f..7f88a844f 100644 --- a/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-backup.md +++ b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md @@ -7,24 +7,23 @@ description: This section explains how to backup from cStor for Replicated DBs. --- # Steps to take a Backup from cStor for Replicated DB (Mongo) -{% hint style="note" %} +:::note If you are deploying databases using operators, you need to find a way to actively modify the entire deployment through the operator. This ensures that you control and manage changes effectively within the operator-driven database deployment. -{% endhint %} +::: ## Step 1: Backup from cStor Cluster Currently, we have a cStor cluster as the source, with a clustered MongoDB running as a StatefulSet using cStor volumes. +**Command** -{% tabs %} -{% tab title="Command" %} -```text +``` kubectl get pods ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` NAME READY STATUS RESTARTS AGE mongo-client-758ddd54cc-h2gwl 1/1 Running 0 47m mongod-0 1/1 Running 0 47m @@ -32,89 +31,74 @@ mongod-1 1/1 Running 0 44m mongod-2 1/1 Running 0 42m ycsb-775fc86c4b-kj5vv 1/1 Running 0 47m ``` -{% endtab %} -{% endtabs %} +**Command** -{% tabs %} -{% tab title="Command" %} -```text +``` kubectl get pvc ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE mongodb-persistent-storage-claim-mongod-0 Bound pvc-cb115a0b-07f4-4912-b686-e160e8a0690d 3Gi RWO cstor-csi-disk 54m mongodb-persistent-storage-claim-mongod-1 Bound pvc-c9214764-7670-4cda-87e3-82f0bc59d8c7 3Gi RWO cstor-csi-disk 52m mongodb-persistent-storage-claim-mongod-2 Bound pvc-fc1f7ed7-d99e-40c7-a9b7-8d6244403a3e 3Gi RWO cstor-csi-disk 50m ``` -{% endtab %} -{% endtabs %} -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get cvc -n openebs ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` NAME CAPACITY STATUS AGE pvc-c9214764-7670-4cda-87e3-82f0bc59d8c7 3Gi Bound 53m pvc-cb115a0b-07f4-4912-b686-e160e8a0690d 3Gi Bound 55m pvc-fc1f7ed7-d99e-40c7-a9b7-8d6244403a3e 3Gi Bound 50m ``` -{% endtab %} -{% endtabs %} - ## Step 2: Install Velero -{% hint style="note" %} -For the prerequisites, refer to the [overview](replicateddb-overview.md) section. -{% endhint %} +:::info +For the prerequisites, see to the [overview](replicateddb-overview.md) section. +::: Run the following command to install Velero: -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` velero install --use-node-agent --provider gcp --plugins velero/velero-plugin-for-gcp:v1.6.0 --bucket velero-backup-datacore --secret-file ./credentials-velero --uploader-type restic ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` [Installation progress output] ``` -{% endtab %} -{% endtabs %} Verify the Velero namespace for Node Agent and Velero pods: -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get pods -n velero ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` NAME READY STATUS RESTARTS AGE node-agent-cwkrn 1/1 Running 0 43s node-agent-qg6hd 1/1 Running 0 43s node-agent-v6xbk 1/1 Running 0 43s velero-56c45f5c64-4hzn7 1/1 Running 0 43s ``` -{% endtab %} -{% endtabs %} - - - ## Step 3: Data Validation @@ -138,7 +122,7 @@ Velero supports two approaches for discovering pod volumes to be backed up using 1. **Opt-in approach**: Annotate pods containing volumes to be backed up. 2. **Opt-out approach**: Backup all pod volumes with the ability to opt-out specific volumes. -### Opt-In for Primary MongoDB Pod: +### Opt-In for Primary MongoDB Pod To ensure that our primary MongoDB pod, which receives writes and replicates data to secondary pods, is included in the backup, we need to annotate it as follows: @@ -146,7 +130,7 @@ To ensure that our primary MongoDB pod, which receives writes and replicates dat kubectl annotate pod/mongod-0 backup.velero.io/backup-volumes=mongodb-persistent-storage-claim ``` -### Opt-Out for Secondary MongoDB Pods and PVCs: +### Opt-Out for Secondary MongoDB Pods and PVCs To exclude secondary MongoDB pods and their associated Persistent Volume Claims (PVCs) from the backup, we can label them as follows: @@ -170,42 +154,42 @@ kubectl label pvc mongodb-persistent-storage-claim-mongod-2 velero.io/exclude-fr persistentvolumeclaim/mongodb-persistent-storage-claim-mongod-2 labeled ``` -### Backup Execution: +### Backup Execution Create a backup of the entire namespace. If any other applications run in the same namespace as MongoDB, we can exclude them from the backup using labels or flags from the Velero CLI: -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` velero backup create mongo-backup-13-09-23 --include-namespaces default --default-volumes-to-fs-backup --wait ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` Backup request "mongo-backup-13-09-23" submitted successfully. Waiting for backup to complete. You may safely press ctrl-c to stop waiting - your backup will continue in the background. ........... Backup completed with status: Completed. You may check for more information using the commands `velero backup describe mongo-backup-13-09-23` and `velero backup logs mongo-backup-13-09-23`. ``` -{% endtab %} -{% endtabs %} -### Backup Verification: +### Backup Verification To check the status of the backup using the Velero CLI, you can use the following command. If the backup fails for any reason, you can inspect the logs with the velero backup logs command: +**Command** -{% tabs %} -{% tab title="Command" %} -```text +``` velero get backup | grep 13-09-23 ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` mongo-backup-13-09-23 Completed 0 0 2023-09-13 13:15:32 +0000 UTC 29d default ``` -{% endtab %} -{% endtabs %} \ No newline at end of file + +## See Also + +- [Migration from Legacy Storage to Latest Storage Solution](../data-migration/migration-using-pv-migrate.md) +- [Migration for Distrubuted DB](../data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md) diff --git a/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-restore.md b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-restore.md similarity index 90% rename from docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-restore.md rename to docs/main/user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-restore.md index 261239e39..40f302f7f 100644 --- a/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-restore.md +++ b/docs/main/user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-restore.md @@ -1,32 +1,33 @@ --- id: replicateddb-restore -title: Restoring Mayastor +title: Restoring to Replicated Storage keywords: - - Restoring Mayastor -description: This section explains how to Restore from cStor Backup to Mayastor for Replicated DBs. + - Restoring to Mayastor + - Restoring to Replicated Storage +description: This section explains how to Restore from cStor Backup to Replicated Storage for Replicated DBs. --- -# Steps to Restore from cStor Backup to Mayastor for Replicated DBs (Mongo) +# Steps to Restore from cStor Backup to Replicated Storage for Replicated DBs (Mongo) -{% hint style=β€œinfo” %} +:::info Before you begin, make sure you have the following: - Access to a Kubernetes cluster with Velero installed. - A backup of your Mongo database created using Velero. -- Mayastor configured in your Kubernetes environment. -{% endhint %} +- Replicated Storage (f.k.a Mayastor) configured in your Kubernetes environment. +::: -## Step 1: Install Velero with GCP Provider on Destination (Mayastor Cluster) +## Step 1: Install Velero with GCP Provider on Destination (Replicated Storage Cluster) Install Velero with the GCP provider, ensuring you use the same values for the `BUCKET-NAME` and `SECRET-FILENAME` placeholders that you used originally. These placeholders should be replaced with your specific values: -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` velero install --use-node-agent --provider gcp --plugins velero/velero-plugin-for-gcp:v1.6.0 --bucket BUCKET-NAME --secret-file SECRET-FILENAME --uploader-type restic ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` CustomResourceDefinition/backuprepositories.velero.io: attempting to create resource CustomResourceDefinition/backuprepositories.velero.io: attempting to create resource client CustomResourceDefinition/backuprepositories.velero.io: created @@ -88,68 +89,51 @@ DaemonSet/node-agent: created Velero is installed! β›΅ Use 'kubectl logs deployment/velero -n velero' to view the status. thulasiraman_ilangovan@cloudshell:~$ ``` -{% endtab %} -{% endtabs %} - - ## Step 2: Verify Backup Availability -Check the availability of your previously-saved backups. If the credentials or bucket information doesn't match, you won't be able to see the backups: +Check the availability of your previously-saved backups. If the credentials or bucket information does not match, the backups cannot be seen: +**Command** -{% tabs %} -{% tab title="Command" %} -```text +``` velero get backup | grep 13-09-23 ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` mongo-backup-13-09-23 Completed 0 0 2023-09-13 13:15:32 +0000 UTC 29d default ``` -{% endtab %} -{% endtabs %} -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get backupstoragelocation -n velero ``` -{% endtab %} +**Output** -{% tab title="Output" %} -```text +``` NAME PHASE LAST VALIDATED AGE DEFAULT default Available 23s 3m32s true ``` -{% endtab %} -{% endtabs %} - - ## Step 3: Restore Using Velero CLI Initiate the restore process using Velero CLI with the following command: -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` velero restore create mongo-restore-13-09-23 --from-backup mongo-backup-13-09-23 ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` Restore request "mongo-restore-13-09-23" submitted successfully. Run `velero restore describe mongo-restore-13-09-23` or `velero restore logs mongo-restore-13-09-23` for more details. ``` -{% endtab %} -{% endtabs %} - - - ## Step 4: Check Restore Status @@ -161,11 +145,9 @@ velero get restore When Velero performs a restore, it deploys an init container within the application pod, responsible for restoring the volume. Initially, the restore status will be `InProgress`. -{% hint style="note" %} +:::note Your storage class was originally set to `cstor-csi-disk` because you imported this PVC from a cStor volume, the status might temporarily stay as **In Progress** and your PVC will be in **Pending** status. -{% endhint %} - - +::: ## Step 5: Backup PVC and Change Storage Class @@ -183,10 +165,9 @@ ls -lrt | grep pvc-mongo.yaml - Edit the `pvc-mongo.yaml` file to update its storage class. Below is the modified PVC configuration with `mayastor-single-replica` set as the new storage class: -{% hint style="note" %} +:::note The statefulset for Mongo will still have the `cstor-csi-disk` storage class at this point. This will be addressed in the further steps. -{% endhint %} - +::: ``` apiVersion: v1 @@ -210,7 +191,7 @@ spec: volumeMode: Filesystem ``` -## Step 6: Resolve issue where PVC is in a Pending +## Step 6: Resolve Issue where PVC is Pending - Begin by deleting the problematic PVC with the following command: @@ -224,7 +205,7 @@ kubectl delete pvc mongodb-persistent-storage-claim-mongod-0 kubectl apply -f pvc-mongo.yaml ``` -## Step 7: Check Velero init container +## Step 7: Check Velero Init Container After recreating the PVC with Mayastor storageClass, you will observe the presence of a Velero initialization container within the application pod. This container is responsible for restoring the required volumes. @@ -241,20 +222,18 @@ The output will display the pods' status, including the Velero initialization co You can track the progress of the restore by running: -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` velero get restore ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` NAME BACKUP STATUS STARTED COMPLETED ERRORS WARNINGS CREATED SELECTOR mongo-restore-13-09-23 mongo-backup-13-09-23 Completed 2023-09-13 13:56:19 +0000 UTC 2023-09-13 14:06:09 +0000 UTC 0 4 2023-09-13 13:56:19 +0000 UTC ``` -{% endtab %} -{% endtabs %} You can then verify the data restoration by accessing your MongoDB instance. In the provided example, we used the "mongosh" shell to connect to the MongoDB instance and check the databases and their content. The data should reflect what was previously backed up from the cStor storage. @@ -268,7 +247,6 @@ Due to the statefulset's configuration with three replicas, you will notice that ## Step 9: Capture the StatefulSet Configuration and Modify Storage Class - Capture the current configuration of the StatefulSet for MongoDB by running the following command: ``` @@ -377,10 +355,6 @@ spec: ``` - - - - ## Step 10: Delete StatefulSet (Cascade=False) Delete the StatefulSet while preserving the pods with the following command: @@ -421,29 +395,27 @@ kubectl delete pvc mongodb-persistent-storage-claim-mongod-1 Recreate the StatefulSet with the Yaml file. -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl apply -f sts-mongo-mayastor.yaml ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` statefulset.apps/mongod created ``` -{% endtab %} -{% endtabs %} -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl get pods ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` NAME READY STATUS RESTARTS AGE mongo-client-758ddd54cc-h2gwl 1/1 Running 0 31m mongod-0 1/1 Running 0 31m @@ -451,27 +423,21 @@ mongod-1 1/1 Running 0 7m54s mongod-2 1/1 Running 0 6m13s ycsb-775fc86c4b-kj5vv 1/1 Running 0 31m ``` -{% endtab %} -{% endtabs %} -{% tabs %} -{% tab title="Command" %} -```text +**Command** + +``` kubectl mayastor get volumes ``` -{% endtab %} -{% tab title="Output" %} -```text +**Output** + +``` ID REPLICAS TARGET-NODE ACCESSIBILITY STATUS SIZE THIN-PROVISIONED ALLOCATED f41c2cdc-5611-471e-b5eb-1cfb571b1b87 1 gke-mayastor-pool-2acd09ca-ppxw nvmf Online 3GiB false 3GiB 113882e1-c270-4c72-9c1f-d9e09bfd66ad 1 gke-mayastor-pool-2acd09ca-4v3z nvmf Online 3GiB false 3GiB fb4d6a4f-5982-4049-977b-9ae20b8162ad 1 gke-mayastor-pool-2acd09ca-q30r nvmf Online 3GiB false 3GiB ``` -{% endtab %} -{% endtabs %} - - ## Step 13: Verify Data Replication on Secondary DB @@ -517,4 +483,9 @@ rs0 [direct: secondary] mydb> db.accounts.find() } ] rs0 [direct: secondary] mydb> -``` \ No newline at end of file +``` + +## See Also + +- [Migration from Legacy Storage to Latest Storage Solution](../data-migration/migration-using-pv-migrate.md) +- [Migration for Distrubuted DB](../data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md) diff --git a/docs/main/user-guides/data-migration/migration-using-velero/overview.md b/docs/main/user-guides/data-migration/migration-using-velero/overview.md new file mode 100644 index 000000000..b3940bd55 --- /dev/null +++ b/docs/main/user-guides/data-migration/migration-using-velero/overview.md @@ -0,0 +1,24 @@ +--- +id: overview +title: Overview +keywords: + - Overview + - Velero +description: This section provides an overview on velero. +--- +# Overview + +This documentation outlines the process of migrating application volumes from CStor to Replicated Storage (f.k.a Mayastor). We will leverage Velero for backup and restoration, facilitating the transition from a CStor cluster to a Replicated Storage cluster. This example specifically focuses on a Google Kubernetes Engine (GKE) cluster. + +**Velero Support**: Velero supports the backup and restoration of Kubernetes volumes attached to pods through File System Backup (FSB) or Pod Volume Backup. This process involves using modules from popular open-source backup tools like Restic (which we will utilize). + +- For **cloud provider plugins**, see the [Velero Docs - Providers section](https://velero.io/docs/main/supported-providers/). +- **Velero GKE Configuration (Prerequisites)**: You can find the prerequisites and configuration details for Velero in a Google Kubernetes Engine (GKE) environment on the GitHub [here](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup). +- **Object Storage Requirement**: To store backups, Velero necessitates an object storage bucket. In our case, we utilize a Google Cloud Storage (GCS) bucket. Configuration details and setup can be found on the GitHub [here](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup). +- **Velero Basic Installation**: For a step-by-step guide on the basic installation of Velero, see the [Velero Docs - Basic Install section](https://velero.io/docs/v1.11/basic-install/). + +## See Also + +- [Migration from Legacy Storage to Latest Storage Solution](../data-migration/migration-using-pv-migrate.md) +- [Migration for Distrubuted DB](../data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup.md) +- [Migration for Replicated DB](../data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup.md) \ No newline at end of file diff --git a/docs/main/user-guides/migration/legacy-storage-to-new-storage/cstor-to-replicated.md b/docs/main/user-guides/migration/legacy-storage-to-new-storage/cstor-to-replicated.md deleted file mode 100644 index 1f17dbb58..000000000 --- a/docs/main/user-guides/migration/legacy-storage-to-new-storage/cstor-to-replicated.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -id: cstor-to-replicated -title: Migration from OpenEBS cStor to OpenEBS Replicated -keywords: - - Migration from OpenEBS cStor to OpenEBS Replicated - - cStor to Replicated - - cStor to Mayastor - - Jiva to Replicated - - Jiva to Mayastor -description: This section outlines the process of migrating OpenEBS cStor to OpenEBS Replicated. ---- - -:::info -The following steps are an example about migrating from legacy storage to latest storage solution. -You can also migrate OpenEBS Jiva to OpenEBS Replicated using the steps below. -::: - -## Assumptions - -- cStor is already deployed. -- MongoDB Standalone is deployed as below using the cStor PVC. (Here, MongoDB Standalone is an example.) - -``` -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: cstor-pvc -spec: - storageClassName: cstor-csi-disk - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi -``` - -- For validation, some data has been inserted in the MongoDB as an example below: - -``` -db.admin.insertMany([{name: "Max"}, {name:"Alex"}]) - -[ - { _id: ObjectId('65eaafa01cd2b6de45285d86'), name: 'Max' }, - { _id: ObjectId('65eaafa01cd2b6de45285d87'), name: 'Alex' } -] -``` -## Steps to migrate cStor to Replicated - -Follow the steps below to migrate OpenEBS cStor to OpenEBS Replicated (fka Mayastor). - -1. [Install Replicated Engine](../../../quickstart-guide/installation.md) on your cluster. - -2. Create a replicated PVC of the same [configuration](../../../user-guides/replicated-engine-user-guide/replicated-engine-deployment.md). See the example below: - -``` -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ms-volume-claim -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi - storageClassName: mayastor-2 -``` - -3. Scale down the MongoDB pod. - -:::note -In your case, scale down or delete the concerned application pod. -::: - -4. Start the migration and let it complete. - -:::info -Use the correct cStor PVC name that your application has. -::: - -See the example below: - -``` -pv-migrate migrate \ - --source-namespace default \ - --dest-namespace default \ - cstor-pvc ms-volume-claim - -πŸš€ Starting migration -πŸ’­ Will attempt 3 strategies: mnt2, svc, lbsvc -🚁 Attempting strategy: mnt2 -πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| (2.8 GB/s) -πŸ“‚ Copying data... 0% | | [0s:0s]🧹 Cleaning up -πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| -✨ Cleanup done -βœ… Migration succeeded -``` - -5. Deploy the MongoDB application using the Replicated PVC. - -6. Once the MongoDB pod is created, check the data that was persisted previously. - -``` -root@mongo-mayastor-c7d645666-b98pc:/# mongosh -u admin -p admin123 -Current Mongosh Log ID: 65eab3877cce529ad560c3e8 -Connecting to: mongodb://@127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+2.1.5 -Using MongoDB: 7.0.6 -Using Mongosh: 2.1.5 - -For mongosh info see: https://docs.mongodb.com/mongodb-shell/ - ------- - The server generated these startup warnings when booting - 2024-03-08T06:41:42.650+00:00: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem - 2024-03-08T06:41:44.268+00:00: vm.max_map_count is too low ------- - -test> db.admin.find().pretty() -[ - { _id: ObjectId('65eaafa01cd2b6de45285d86'), name: 'Max' }, - { _id: ObjectId('65eaafa01cd2b6de45285d87'), name: 'Alex' } -] -``` - -The migration is successful. - -The cStor volume and pools can now be removed and cStor can be uninstalled. \ No newline at end of file diff --git a/docs/main/user-guides/migration/legacy-storage-to-new-storage/device-to-lvm.md b/docs/main/user-guides/migration/legacy-storage-to-new-storage/device-to-lvm.md deleted file mode 100644 index f4ccbab5d..000000000 --- a/docs/main/user-guides/migration/legacy-storage-to-new-storage/device-to-lvm.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -id: device-to-lvm -title: Migration from OpenEBS Local PV Device to OpenEBS Local PV LVM -keywords: - - Migration from OpenEBS Local PV Device to OpenEBS LVM Local PV - - Local PV Device to Local PV LVM - - Local PV Device to Local PV ZFS - - Local PV Rawfile to Local PV LVM - - Local PV Rawfile to Local PV ZFS -description: This section outlines the process of migrating OpenEBS Local PV Device to OpenEBS Local PV LVM. ---- - -:::info -The following steps are an example about migrating from legacy storage to latest storage solution. -You can also migrate OpenEBS Local PV Device to OpenEBS Local PV ZFS. -::: - -## Assumptions - -- Local PV Device is already deployed. -- MongoDB Standalone is deployed as below using the Local PV Device PVC. (Here, MongoDB Standalone is an example.) - -``` -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: localpv-vol -spec: - storageClassName: openebs-device - accessModes: ["ReadWriteOnce"] - volumeMode: Filesystem - resources: - requests: - storage: 5Gi -``` - -- For validation, some data has been inserted in the MongoDB as an example below: - -``` -db.admin.insertMany([{name: "Max"}, {name:"Alex"}]) - -[ - { _id: ObjectId('65eaafa01cd2b6de45285d86'), name: 'Max' }, - { _id: ObjectId('65eaafa01cd2b6de45285d87'), name: 'Alex' } -] -``` -## Steps to migrate Local PV Device to Local PV LVM - -Follow the steps below to migrate OpenEBS Local PV Device to OpenEBS Local PV LVM. - -1. [Install Local Engine](../../../quickstart-guide/installation.md) on your cluster. - -2. Create a LVM PVC of the same [configuration](../../../user-guides/local-engine-user-guide/lvm-localpv.md#configuration). - -:::info -For the LVM volume to be created, the node (where the application was deployed) needs to be same as that of where Volume Group (VG) is created. -::: - -See the example below: - -``` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: openebs-lvmpv -allowVolumeExpansion: true -parameters: - storage: "lvm" - volgroup: "lvmvg" -provisioner: local.csi.openebs.io -allowedTopologies: -- matchLabelExpressions: - - key: kubernetes.io/hostname - values: - - node-1-152720 ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: csi-lvmpv -spec: - storageClassName: openebs-lvmpv - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi -``` - -3. Scale down the MongoDB pod. - -:::note -In your case, scale down or delete the concerned application pod. -::: - -4. Start the migration and let it complete. - -:::info -Use the correct Local PV Device PVC name that your application has. -::: - -See the example below: - -``` -pv-migrate migrate \ - --source-namespace default \ - --dest-namespace default \ - localpv-vol csi-lvmpv - -πŸš€ Starting migration -πŸ’­ Will attempt 3 strategies: mnt2, svc, lbsvc -🚁 Attempting strategy: mnt2 -πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| (3.4 GB/s) -πŸ“‚ Copying data... 0% | | [0s:0s]🧹 Cleaning up -πŸ“‚ Copying data... 100% |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| -✨ Cleanup done -βœ… Migration succeeded -``` - -5. Deploy the MongoDB application using the LVM PVC. - -6. Once the MongoDB pod is created, check the data that was persisted previously. - -``` -root@mongo-lvm-556f58cd7d-rws6l:/# mongosh -u admin -p admin123 -Current Mongosh Log ID: 65eabe0ee915a8cf7d9eee57 -Connecting to: mongodb://@127.0.0.1:27017/?directConnection=true&serverSelectionTimeoutMS=2000&appName=mongosh+2.1.5 -Using MongoDB: 7.0.6 -Using Mongosh: 2.1.5 - -For mongosh info see: https://docs.mongodb.com/mongodb-shell/ - ------- - The server generated these startup warnings when booting - 2024-03-08T07:27:19.404+00:00: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem - 2024-03-08T07:27:19.747+00:00: vm.max_map_count is too low ------- - -test> db.admin.find().pretty() -[ - { _id: ObjectId('65eab75b8f5d183790d7bbd5'), name: 'Max' }, - { _id: ObjectId('65eab75b8f5d183790d7bbd6'), name: 'Alex' } -] -``` - -The migration is successful. - -The Local PV Device volumes and pools can now be removed and Local PV Device can be uninstalled. \ No newline at end of file diff --git a/docs/main/user-guides/migration/legacy-storage-to-new-storage/migration-overview.md b/docs/main/user-guides/migration/legacy-storage-to-new-storage/migration-overview.md deleted file mode 100644 index 1131e1e67..000000000 --- a/docs/main/user-guides/migration/legacy-storage-to-new-storage/migration-overview.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -id: migration-overview -title: Migration from Legacy Storage to Latest Storage Solution -keywords: - - Migration - - Data Migration -description: This section outlines the process of migrating the legacy storage to latest storage solution. ---- - -# Data Migration - -Data migration is the process of transferring data from the legacy storage to the latest storage solution. -Also, the data can be migrated from any non-OpenEBS storage to OpenEBS storage. - - - -In this migration process, we are using [pv-migrate](https://github.com/utkuozdemir/pv-migrate) that is a CLI tool/kubectl plugin to easily migrate the contents of one Kubernetes `PersistentVolumeClaim` to another. - -This tool is binary and can be [downloaded](https://github.com/utkuozdemir/pv-migrate/releases/download/v1.7.1/pv-migrate_v1.7.1_linux_x86_64.tar.gz) from the release section for linux/amd64. For other OS and arch, download the respective binary from the latest [release section](https://github.com/utkuozdemir/pv-migrate/releases/tag/v1.7.1). - -1. Once downloaded, untar the binary as below: - -``` -tar -xvf pv-migrate_v1.7.1_linux_x86_64.tar.gz -``` - -2. Add the binary to `PATH` or move it to `/usr/local/bin` to use the binary like any usual binary. - -``` -mv pv-migrate /usr/local/bin -``` - -The binary can be used as specified in the migrate flows. \ No newline at end of file diff --git a/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-overview.md b/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-overview.md deleted file mode 100644 index 01959de4a..000000000 --- a/docs/main/user-guides/migration/migration-for-distributed-db/distributeddb-overview.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: distributeddb-overview -title: Distributed DB Overview -keywords: - - Distributed DB Overview -description: This section outlines the process of migrating application volumes from CStor to Mayastor for Distributed Databases. ---- -# Migration from CStor to Mayastor for Distributed Databases (Cassandra) - - -This documentation outlines the process of migrating application volumes from CStor to Mayastor. We will leverage Velero for backup and restoration, facilitating the transition from a CStor cluster to a Mayastor cluster. This example specifically focuses on a Google Kubernetes Engine (GKE) cluster. - -**Velero Support**: Velero supports the backup and restoration of Kubernetes volumes attached to pods through File System Backup (FSB) or Pod Volume Backup. This process involves using modules from popular open-source backup tools like Restic (which we will utilize). - -- For **cloud provider plugins**, refer to the [Velero Docs - Providers section](https://velero.io/docs/main/supported-providers/). -- **Velero GKE Configuration (Prerequisites)**: You can find the prerequisites and configuration details for Velero in a Google Kubernetes Engine (GKE) environment on the GitHub [here](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup). -- **Object Storage Requirement**: To store backups, Velero necessitates an object storage bucket. In our case, we utilize a Google Cloud Storage (GCS) bucket. Configuration details and setup can be found on the GitHub [here](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup). -- **Velero Basic Installation**: For a step-by-step guide on the basic installation of Velero, refer to the [Velero Docs - Basic Install section](https://velero.io/docs/v1.11/basic-install/). - diff --git a/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-overview.md b/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-overview.md deleted file mode 100644 index 7be772c35..000000000 --- a/docs/main/user-guides/migration/migration-for-replicated-db/replicateddb-overview.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -id: replicateddb-overview -title: Replicated DB Overview -keywords: - - Replicated DB Overview -description: This section outlines the process of migrating application volumes from CStor to Mayastor for Replicated Databases. ---- -# Migrating from CStor to Mayastor for Replicated Databases (MongoDB) - - -This documentation provides a comprehensive guide on migrating CStor application volumes to Mayastor. We utilize Velero for the backup and restoration process, enabling a seamless transition from a CStor cluster to Mayastor. This example specifically focuses on a GKE cluster. - -Velero offers support for the backup and restoration of Kubernetes volumes attached to pods directly from the volume's file system. This is known as File System Backup (FSB) or Pod Volume Backup. The data movement is facilitated through the use of modules from free, open-source backup tools such as Restic (which is the tool of choice in this guide). - -- For cloud providers, you can find the necessary plugins [here](https://velero.io/docs/main/supported-providers/). -- For detailed Velero GKE configuration prerequisites, refer to [this resource](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup). -- It's essential to note that Velero requires an object storage bucket for storing backups, and in our case, we use a [Google Cloud Storage (GCS) bucket](https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup). -- For detailed instructions on Velero basic installation, visit https://velero.io/docs/v1.11/basic-install/. - - - - - diff --git a/docs/sidebars.js b/docs/sidebars.js index 628269c0c..d485dbeac 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -302,84 +302,74 @@ module.exports = { { collapsed: true, type: "category", - label: "Migration", + label: "Data Migration", customProps: { icon: "Repeat" }, items: [ { - collapsed: true, - type: "category", - label: "Legacy Storage to New Storage", - customProps: { - icon: "File" - }, - items: [ - { - type: "doc", - id: "user-guides/migration/legacy-storage-to-new-storage/migration-overview", - label: "Data Migration" - }, - { - type: "doc", - id: "user-guides/migration/legacy-storage-to-new-storage/cstor-to-replicated", - label: "Migration - cStor to Replicated" - }, - { - type: "doc", - id: "user-guides/migration/legacy-storage-to-new-storage/device-to-lvm", - label: "Migration - Local PV Device to Local PV LVM" - } - ] + type: "doc", + id: "user-guides/data-migration/migration-overview", + label: "Migration Overview" }, { - collapsed: true, - type: "category", - label: "Migration for Distributed DB", - customProps: { - icon: "File" - }, - items: [ - { - type: "doc", - id: "user-guides/migration/migration-for-distributed-db/distributeddb-overview", - label: "Distributed DB Overview" - }, - { - type: "doc", - id: "user-guides/migration/migration-for-distributed-db/distributeddb-backup", - label: "Backing up from cStor" - }, - { - type: "doc", - id: "user-guides/migration/migration-for-distributed-db/distributeddb-restore", - label: "Restoring Mayastor" - } - ] + type: "doc", + id: "user-guides/data-migration/migration-using-pv-migrate", + label: "Migration using pv-migrate" }, { collapsed: true, type: "category", - label: "Migration for Replicated DB", + label: "Migration using Velero", customProps: { - icon: "File" + icon: "Book" }, items: [ { type: "doc", - id: "user-guides/migration/migration-for-replicated-db/replicateddb-overview", - label: "Replicated DB Overview" + id: "user-guides/data-migration/migration-using-velero/overview", + label: "Overview" }, { - type: "doc", - id: "user-guides/migration/migration-for-replicated-db/replicateddb-backup", - label: "Backing up from cStor" + collapsed: true, + type: "category", + label: "Migration for Distributed DB", + customProps: { + icon: "File" + }, + items: [ + { + type: "doc", + id: "user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-backup", + label: "Backing up from cStor" + }, + { + type: "doc", + id: "user-guides/data-migration/migration-using-velero/migration-for-distributed-db/distributeddb-restore", + label: "Restoring to Replicated Storage" + } + ] }, { - type: "doc", - id: "user-guides/migration/migration-for-replicated-db/replicateddb-restore", - label: "Restoring Mayastor" - } + collapsed: true, + type: "category", + label: "Migration for Replicated DB", + customProps: { + icon: "File" + }, + items: [ + { + type: "doc", + id: "user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-backup", + label: "Backing up from cStor" + }, + { + type: "doc", + id: "user-guides/data-migration/migration-using-velero/migration-for-replicated-db/replicateddb-restore", + label: "Restoring to Replicated Storage" + } + ] + }, ] }, ]