diff --git a/docs/i18n/en/code.json b/docs/i18n/en/code.json
index b995deeb5..9b1d981d4 100644
--- a/docs/i18n/en/code.json
+++ b/docs/i18n/en/code.json
@@ -235,4 +235,4 @@
"message": "New contributors",
"description": "New contributors label in footer"
}
-}
+}
\ No newline at end of file
diff --git a/docs/i18n/en/docusaurus-plugin-content-docs/current.json b/docs/i18n/en/docusaurus-plugin-content-docs/current.json
index 26ed23e89..8980989ae 100644
--- a/docs/i18n/en/docusaurus-plugin-content-docs/current.json
+++ b/docs/i18n/en/docusaurus-plugin-content-docs/current.json
@@ -118,5 +118,45 @@
"version.label": {
"message": "main",
"description": "The label for version current"
+ },
+ "sidebar.docs.category.Introduction to OpenEBS": {
+ "message": "Introduction to OpenEBS",
+ "description": "The label for category Introduction to OpenEBS in sidebar docs"
+ },
+ "sidebar.docs.category.Data Engines": {
+ "message": "Data Engines",
+ "description": "The label for category Data Engines in sidebar docs"
+ },
+ "sidebar.docs.category.Quickstart Guide": {
+ "message": "Quickstart Guide",
+ "description": "The label for category Quickstart Guide in sidebar docs"
+ },
+ "sidebar.docs.category.Local Engine User Guide": {
+ "message": "Local Engine User Guide",
+ "description": "The label for category Local Engine User Guide in sidebar docs"
+ },
+ "sidebar.docs.category.Additional Information": {
+ "message": "Additional Information",
+ "description": "The label for category Additional Information in sidebar docs"
+ },
+ "sidebar.docs.category.Replicated Engine User Guide": {
+ "message": "Replicated Engine User Guide",
+ "description": "The label for category Replicated Engine User Guide in sidebar docs"
+ },
+ "sidebar.docs.category.Advanced Operations": {
+ "message": "Advanced Operations",
+ "description": "The label for category Advanced Operations in sidebar docs"
+ },
+ "sidebar.docs.category.Migration for Distributed DB": {
+ "message": "Migration for Distributed DB",
+ "description": "The label for category Migration for Distributed DB in sidebar docs"
+ },
+ "sidebar.docs.category.Migration for Replicated DB": {
+ "message": "Migration for Replicated DB",
+ "description": "The label for category Migration for Replicated DB in sidebar docs"
+ },
+ "sidebar.docs.category.Support": {
+ "message": "Support",
+ "description": "The label for category Support in sidebar docs"
}
-}
+}
\ No newline at end of file
diff --git a/docs/i18n/en/docusaurus-plugin-content-docs/version-2.12.x.json b/docs/i18n/en/docusaurus-plugin-content-docs/version-2.12.x.json
index 7f2889c4d..365abcff4 100644
--- a/docs/i18n/en/docusaurus-plugin-content-docs/version-2.12.x.json
+++ b/docs/i18n/en/docusaurus-plugin-content-docs/version-2.12.x.json
@@ -35,4 +35,4 @@
"message": "cStor",
"description": "The label for category cStor in sidebar docs"
}
-}
+}
\ No newline at end of file
diff --git a/docs/main/commercial-support.md b/docs/main/commercial-support.md
index a4ef357eb..485b07d31 100644
--- a/docs/main/commercial-support.md
+++ b/docs/main/commercial-support.md
@@ -9,7 +9,7 @@ description: This is a list of third-party companies and individuals who provide
OpenEBS is an independent open source project which does not endorse any company.
-This is a list of third-party companies and individuals who provide products or services related to OpenEBS. If you are providing commercial support for OpenEBS, please [edit this page](https://github.com/openebs/website/edit/main/docs/main/introduction/commercial.md) to add yourself or your organization to the list.
+This is a list of third-party companies and individuals who provide products or services related to OpenEBS. If you are providing commercial support for OpenEBS, [edit this page](commercial-support.md) to add yourself or your organization to the list.
The list is provided in alphabetical order.
@@ -18,9 +18,9 @@ The list is provided in alphabetical order.
- [DataCore](https://www.datacore.com/support/openebs/)
- [Gridworkz Cloud Services](https://www.gridworkz.com/)
-## See Also:
+## See Also
-- [Community Support](/docs/introduction/community)
-- [Troubleshooting](/docs/troubleshooting)
-- [FAQs](/docs/additional-info/faqs)
+- [Community Support](community.md)
+- [Troubleshooting](../main/troubleshooting/)
+- [FAQs](../main/faqs/faqs.md)
diff --git a/docs/main/community.md b/docs/main/community.md
index 8b40203ce..21b06dce4 100644
--- a/docs/main/community.md
+++ b/docs/main/community.md
@@ -9,7 +9,7 @@ description: You can reach out to OpenEBS contributors and maintainers through S
## GitHub
-Raise an [GitHub issue](https://github.com/openebs/openebs/issues/new)
+Raise a [GitHub issue](https://github.com/openebs/openebs/issues/new)
## Slack
@@ -33,9 +33,10 @@ Join our OpenEBS CNCF Mailing lists
Join our weekly or monthly [community meetings](https://github.com/openebs/openebs/tree/master/community#community-meetings).
-## See Also:
+## See Also
+
+- [Releases](releases.md)
+- [Community Support](community.md)
+- [Troubleshooting](../main/troubleshooting/)
+- [FAQs](../main/faqs/faqs.md)
-- [Commercial Support](/docs/introduction/commercial)
-- [Troubleshooting](/docs/troubleshooting)
-- [FAQs](/docs/additional-info/faqs)
-- [Release details](/docs/introduction/releases)
diff --git a/docs/main/concepts/architecture.md b/docs/main/concepts/architecture.md
index 6eaf80726..516ec507c 100644
--- a/docs/main/concepts/architecture.md
+++ b/docs/main/concepts/architecture.md
@@ -10,7 +10,7 @@ keywords:
description: This document contains detailed description of OpenEBS Architecture
---
-OpenEBS is the leading Open Source implementation of the [Container Attached Storage(CAS)](/docs/concepts/cas) pattern. As a part of this approach, OpenEBS uses containers to dynamically provision volumes and provide data services like high availability. OpenEBS relies on and extends [Kubernetes](/docs/concepts/basics) itself to orchestrate its volume services.
+OpenEBS is the leading Open Source implementation of the [Container Native Storage (CNS)](container-native-storage.md) pattern. As a part of this approach, OpenEBS uses containers to dynamically provision volumes and provide data services like high availability. OpenEBS relies on and extends [Kubernetes](basics.md) itself to orchestrate its volume services.
![openebs hld](../assets/openebs-hld.svg)
@@ -25,7 +25,7 @@ The data engines are at the core of OpenEBS and are responsible for performing t
The data engines are responsible for:
- Aggregating the capacity available in the block devices allocated to them and then carving out volumes for applications.
-- Provide standard system or network transport interfaces(NVMe/iSCSI) for connecting to local or remote volumes
+- Provide standard system or network transport interfaces(NVMe) for connecting to local or remote volumes
- Provide volume services like - synchronous replication, compression, encryption, maintain snapshots, access to the incremental or full snapshots of data and so forth
- Provide strong consistency while persisting the data to the underlying storage devices
@@ -37,11 +37,11 @@ The OpenEBS Data Engines comprise of the following layers:
### Volume Access Layer
-Stateful Workloads use standard POSIX compliant mechanisms to perform read and write operations. Depending on the type of workloads, the application can prefer to perform the reads and writes either directly to the raw block device or using standard filesystems like XFS, Ext4.
+Stateful workloads use standard POSIX compliant mechanisms to perform read and write operations. Depending on the type of workloads, the application can prefer to perform the reads and writes either directly to the raw block device or using standard filesystems like XFS, Ext4.
The CSI node driver or the Kubelet will take care of attaching the volume to the required node where pod is running, formatting if necessary and mounting the filesystem to be accessible by the pod. Users have the option of setting the mount options and filesystem permissions at this layer which will be carried out by the CSI node driver or kubelet.
-The details required for attaching the volumes (using local, iSCSI or NVMe) and mounting (Ext4, XFS, etc) are available through the Persistent Volume Spec.
+The details required for attaching the volumes (using local or NVMe) and mounting (Ext4, XFS, etc) are available through the Persistent Volume Spec.
### Volume Services Layer
@@ -52,21 +52,20 @@ The implementation pattern used by data engines to provide high availability is
Using a single controller to implement synchronous replication of data to fixed set of nodes (instead of distribution via multiple metadata controller), reduces the overhead in managing the metadata and also reduces the blast radius related to a node failure and other nodes participating in the rebuild of the failed node.
The OpenEBS volume services layer exposes the volumes as:
-- Device or Directory paths in case of Local PV,
-- iSCSI Target in case of cStor and Jiva
-- NVMe Target in case of Mayastor.
+- Device or Directory paths in case of Local Engine
+- NVMe Target in case of Replicated Engine
### Volume Data Layer
-OpenEBS Data engines create a Volume Replica on top of the Storage Layer. Volume Replicas are pinned to a node and are created on top of the storage layer. The replica can be any of the following:
+OpenEBS Data Engines create a Volume Replica on top of the storage layer. Volume Replicas are pinned to a node and are created on top of the storage layer. The replica can be any of the following:
- Sub-directory - in case the storage layer used is a filesystem directory
- Full Device or Partitioned Device - in case the storage layer used is block devices
-- Logical Volume - in case the storage layer used is a device pool coming from LVM or ZFS.
+- Logical Volume - in case the storage layer used is a device pool coming from local engine
-In case the applications require only local storage, then the Persistent Volume will be created using one of the above directories, device (or partition) or logical volume. OpenEBS [control plane](#control-plane) will be used to provision one of the above replicas.
+In case the applications require only local storage, then the persistent volume will be created using one of the above directories, device (or partition) or logical volume. OpenEBS [control plane](#control-plane) will be used to provision one of the above replicas.
-OpenEBS can add the layer of high availability on top of the local storage using one of its replicated engines - Jiva, cStor and Mayastor. In this case, OpenEBS uses a light-weight storage defined storage controller software that can receive the read/write operations over a network end-point and then be passed on to the underlying storage layer. OpenEBS then uses this Replica network end-points to maintain a synchronous copy of the volume across nodes.
+OpenEBS can add the layer of high availability on top of the local storage using the replicated engine. In this case, OpenEBS uses a light-weight storage defined storage controller software that can receive the read/write operations over a network end-point and then be passed on to the underlying storage layer. OpenEBS then uses this Replica network end-points to maintain a synchronous copy of the volume across nodes.
OpenEBS Volume Replicas typically go through the following states:
- Initializing, during initial provisioning and is being registered to its volume
@@ -77,9 +76,9 @@ OpenEBS Volume Replicas typically go through the following states:
### Storage Layer
-Storage Layer forms the basic building blocks for persisting the data. The Storage Layer comprise of block devices attached to the node (either locally via PCIe, SAS, NVMe or via remote SAN/Cloud). The Storage Layer could also be a sub-directory on top of a mounted filesystem.
+Storage layer forms the basic building blocks for persisting the data. The storage layer comprise of block devices attached to the node (either locally via PCIe, SAS, NVMe or via remote SAN/Cloud). The storage layer could also be a sub-directory on top of a mounted filesystem.
-Storage Layer is outside the purview of the OpenEBS Data Engines and are available to the Kubernetes storage constructs using standard operating system or Linux software constructs.
+Storage layer is outside the purview of the OpenEBS Data Engines and are available to the Kubernetes storage constructs using standard operating system or Linux software constructs.
The Data Engines consume the storage as a device or a device pool or a filesystem directory.
@@ -91,39 +90,39 @@ The control plane in the context of OpenEBS refers to a set of tools or componen
- Interfacing with CSI to manage the lifecycle of volumes
- Interfacing with CSI and other tools carrying out operations like - snapshots, clones, resize, backup, restore, etc.
- Integrating into other tools like Prometheus/Grafana for telemetry and monitoring
-- Integrating into other tools for debugging, troubleshooting or log management
+- Integrating into other tools for debugging, troubleshooting, or log management
OpenEBS Control Plane comprises of a set of micro-services that are themselves managed by Kubernetes, making OpenEBS truly Kubernetes native. The configuration managed by the OpenEBS Control Plane is saved as Kubernetes custom resources. The functionality of the control plane can be decomposed into the various stages as follows:
![openebs control plane](../assets/openebs-control-plane.svg)
-### YAML or Helm chart
+### Helm Chart
-OpenEBS components can be installed by the administrator using a highly configurable Helm chart or kubectl/YAML. OpenEBS installation is also supported via the Management Kubernetes offerings such as OpenShift, EKS, DO, Rancher as marketplace applications or as add-on or plugins tightly integrated into Kubernetes distributions such as MicroK8s, Kinvolk, Kubesphere.
+OpenEBS components can be installed by the administrator using a highly configurable Helm chart. OpenEBS installation is also supported via the Management Kubernetes offerings such as OpenShift, EKS, DO, Rancher as marketplace applications or as add-on or plugins tightly integrated into Kubernetes distributions such as MicroK8s, Kinvolk, Kubesphere.
-As part of the OpenEBS install, the control plane components for the selected data engines will be installed as cluster and/or node components using standard Kubernetes primitives like Deployments, DaemonSets, Statefulsets and such. The OpenEBS installation also takes care of loading the OpenEBS custom resource definitions into the Kubernetes.
+As part of the OpenEBS install, the control plane components for the selected data engines will be installed as cluster and/or node components using standard Kubernetes primitives like Deployments, DaemonSets, Statefulsets, and such. The OpenEBS installation also takes care of loading the OpenEBS custom resource definitions into the Kubernetes.
-OpenEBS control plane components are all stateless and depend on the Kubernetes etcd server (custom resources) to managing their internal configuration state and reporting the status of the various components.
+OpenEBS control plane components are all stateless. It depends on the custom resources and etcd server for managing their internal configuration state and reporting the status of the various components.
### Declarative API
OpenEBS supports Declarative API for managing all of its operations and the APIs are exposed as Kubernetes custom resources. Kubernetes CRD validators and admission webhooks are used to validate the inputs provided by the users and to validate if the operations are allowed.
-The Declarative API is a natural extension to what Kubernetes administrators and user are accustomed to, where they can define the intent via a YAML and then Kubernetes and associated OpenEBS Operators will reconcile the state with the user's intent.
+The Declarative API is a natural extension to what Kubernetes administrators and user are accustomed to, where they can define the intent via a Helm chart and then Kubernetes and associated OpenEBS Operators will reconcile the state with the user's intent.
-The Declarative API can be used to configure the Data Engines and setup volume profiles/policies. Even upgrades of the data engines are performed using this API. The API can be used to:
-- Manage the configuration for each of the Data engine
+The Declarative API can be used to configure the Data Engines and setup volume profiles/policies. Even upgrades of the data engines are performed using this API. The API can be used to:
+- Manage the configuration for each Data Engine
- Manage the way the storage needs to be managed or storage pools
-- Manage the volumes and its services - creation, snapshots, clones, backup, restore, deletion
+- Manage the volumes and its services - creation, snapshots, clones, backup, restore, and deletion
- Manage upgrades of pools and volumes
### Data Engine Operators
All of the Data Engine operations from discovering the underlying storage to creation of pools and volumes is packaged as Kubernetes Operators. Each of the Data Engine either operates on top of a configuration provided during the installation or controlled via the corresponding Kubernetes custom resources.
-The Data engine operators can either be at the cluster scope or operating on a specific node. The cluster scope operators are usually involved in operations where interactions with the Kubernetes components are involved - in orchestrating the scheduling or migration of pools and volumes on various nodes. The node level operators operate on the local operations like creating volumes, replicas, snapshots and such on the storage or pools available on the node.
+The Data Engine operators can either be at the cluster scope or operating on a specific node. The cluster scope operators are usually involved in operations where interactions with the Kubernetes components are involved - in orchestrating the scheduling or migration of pools and volumes on various nodes. The node level operators operate on the local operations like creating volumes, replicas, snapshots, and such on the storage or pools available on the node.
-Data Engine Operators are often also called as control plane of the Data engines as they facilitate in managing the volumes and the data services offered by the corresponding data engines. Depending on the features provided or needed, some data engines like cstor, jiva and mayastor can have multiple operators, where as Local Volume operations can be embedded directly into the corresponding CSI controller / provisioner.
+Data Engine Operators are often also called as control plane of the Data Engines as they facilitate in managing the volumes and the data services offered by the corresponding Data Engines. Depending on the features provided or needed, some Data Engines can have multiple operators, where as local volume operations can be embedded directly into the corresponding CSI controller / provisioner.
### CSI Driver (Dynamic Volume Provisioner)
@@ -133,25 +132,26 @@ CSI Drivers act as the facilitators for managing the life-cycle of volumes withi
- Storage Drivers - which are CSI complaint and work very closely with the Kubernetes CSI layer to receive the requests and process them.
The Storage Drivers are responsible for:
-- Exposing the capabilities of the Data engines
+- Exposing the capabilities of the Data Engines
- Either directly interacting with the Data Engine or the Data Engine Operators to perform volume creation and deletion operations
-- Interface with the Data engines to attach/detach the volumes to the nodes where containers consuming the volumes are running
+- Interface with the Data Engines to attach/detach the volumes to the nodes where containers consuming the volumes are running
- Interface with standard linux utilities to format, mount/unmount the volumes to the containers
### Plugins
OpenEBS focuses on storage operations and provides plugins for other popular tools for performing the operations that fall outside of the core storage functionality but are very important for running OpenEBS in production. Examples of such operations are:
- Application Consistent Backup and Recovery (provided via integrations into Velero)
-- Monitoring and Alerting ( provided via integrations into Prometheus, Grafana, Alert manager)
-- Enforcing Security Policies ( provided via integrations with PodSecurityPolicies or Kyerno)
-- Logging ( provide via integration to any standard Logging stack setup by administrators like ELK, Loki, Logstash)
-- Visualizations (provided via standard Kubernetes Dashboards or custom Grafana dashboards)
+- Monitoring and Alerting (provided via integrations into Prometheus, Grafana, and Alert manager)
+- Enforcing Security Policies (provided via integrations with PodSecurityPolicies or Kyerno)
+- Logging (provide via integration to any standard Logging stack setup by administrators like ELK, Loki, and Logstash)
+- Visualizations (provided via standard Kubernetes Dashboards or custom Grafana dashboards)
+
### CLI
All the management functions on OpenEBS can be carried out via `kubectl` as OpenEBS uses Custom Resources for managing all of its configurations and reporting the status of the components.
In addition, OpenEBS also has released as alpha version `kubectl plugin` to help with providing information about the pools and volumes using a single command that aggregates the information obtained via multiple `kubectl` commands.
- ## See Also:
+ ## See Also
-[Understanding Data Engines](/docs/concepts/casengines) [Understanding Mayastor](https://mayastor.gitbook.io/introduction/) [Understanding Local PV](/docs/concepts/localpv) [Understanding cStor](/docs/concepts/cstor) [Understanding Jiva](/docs/concepts/jiva)
+[Understanding Data Engines](../concepts/data-engines/data-engines.md)
diff --git a/docs/main/concepts/basics.md b/docs/main/concepts/basics.md
index 3471ed0eb..174904952 100644
--- a/docs/main/concepts/basics.md
+++ b/docs/main/concepts/basics.md
@@ -13,10 +13,10 @@ description: This document provides you with a quick overview of the Kubernetes
---
:::note
-This page provides you with a quick overview of the [Kubernetes concepts](https://kubernetes.io/docs/concepts/storage/) you need to know for running Stateful Workloads. If you are already familiar with running Stateful workloads in Kubernetes, header over to the next section on [Container Attached Storage](/docs/concepts/cas).
+This page provides you with a quick overview of the [Kubernetes concepts](https://kubernetes.io/docs/concepts/storage/) you need to know for running Stateful Workloads. If you are already familiar with running Stateful workloads in Kubernetes, header over to the next section on [Container Native Storage](container-native-storage.md).
:::
-Kubernetes has made several enhancements to support running Stateful Workloads by providing the required abstractions for Platform (or Cluster Administrators) and Application developers. The abstractions ensure that different types of file and block storage (whether ephemeral or persistent, local or remote) are available wherever a container is scheduled (including provisioning/creating, attaching, mounting, unmounting, detaching, and deleting of volumes), storage capacity management (container ephemeral storage usage, volume resizing, etc.), influencing scheduling of containers based on storage (data gravity, availability, etc.), and generic operations on storage (snapshoting, etc.).
+Kubernetes has made several enhancements to support running Stateful workloads by providing the required abstractions for platform (or Cluster Administrators) and application developers. The abstractions ensure that different types of file and block storage (whether ephemeral or persistent, local or remote) are available wherever a container is scheduled (including provisioning/creating, attaching, mounting, unmounting, detaching, and deleting of volumes), storage capacity management (container ephemeral storage usage, volume resizing, etc.), influencing scheduling of containers based on storage (data gravity, availability, etc.), and generic operations on storage (snapshotting, etc.).
The most important Kubernetes Storage abstractions to be aware of for running Stateful workloads using OpenEBS are:
@@ -33,36 +33,36 @@ The [Container Storage Interface (CSI)](https://github.com/container-storage-int
When cluster administrators install OpenEBS, the required OpenEBS CSI driver components are installed into the Kubernetes cluster.
:::note
-Prior to CSI, Kubernetes supported adding storage providers using out-of-tree provisioners referred to as [external provisioners](https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner). And Kubernetes in-tree volumes pre-date the external provisioners. There is an ongoing effort in the Kubernetes community to deprecate in-tree volumes with CSI based volumes.
+Prior to CSI, Kubernetes supported adding storage providers using out-of-tree provisioners referred to as [external provisioners](https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner) and Kubernetes in-tree volumes pre-date the external provisioners. There is an ongoing effort in the Kubernetes community to deprecate in-tree volumes with CSI based volumes.
:::
## Storage Classes and Dynamic Provisioning
A [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) provides a way for administrators to describe the "classes" of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. This concept is sometimes called "profiles" in other storage systems.
-The dynamic provisioning feature eliminates the need for cluster administrators to pre-provision storage. Instead, it automatically provisions storage when it is requested by users. The implementation of dynamic volume provisioning is based on the `StorageClass` abstraction A cluster administrator can define as many StorageClass objects as needed, each specifying a volume plugin (aka provisioner) that provisions a volume and the set of parameters to pass to that provisioner when provisioning.
+The dynamic provisioning feature eliminates the need for cluster administrators to pre-provision storage. Instead, it automatically provisions storage when it is requested by users. The implementation of dynamic volume provisioning is based on the `StorageClass` abstraction. A cluster administrator can define as many StorageClass objects as needed, each specifying a volume plugin (aka provisioner) that provisions a volume and the set of parameters to pass to that provisioner when provisioning.
-A cluster administrator can define and expose multiple flavors of storage (from the same or different storage systems) within a cluster, each with a custom set of parameters. This design also ensures that end users don't have to worry about the complexity and nuances of how storage is provisioned, but still have the ability to select from multiple storage options.
+A cluster administrator can define and expose multiple flavors of storage (from the same or different storage systems) within a cluster, each with a custom set of parameters. This design also ensures that end users do not have to worry about the complexity and nuances of how storage is provisioned, but still have the ability to select from multiple storage options.
-When OpenEBS is installed, it ships with a couple of default storage classes that allow users to create either local (OpenEBS Local PV hostPath) or replicated (OpenEBS Jiva) volumes. The cluster administrator can enable the required storage engines and then create Storage Classes for the required Data Engines.
+When OpenEBS is installed, it ships with a couple of default Storage Classes that allow users to create either local or replicated volumes. The cluster administrator can enable the required storage engines and then create Storage Classes for the required Data Engines.
## Persistent Volume Claims
-PersistentVolumeClaim (PVC) is a user’s storage request that is served by a StorageClass offered by the cluster administrator. An application running on a container can request a certain type of storage. For example, a container can specify the size of storage it needs or the way it needs to access the data (read only, read/write, read-write many, etc., ).
+PersistentVolumeClaim (PVC) is a user’s storage request that is served by a Storage Class offered by the cluster administrator. An application running on a container can request a certain type of storage. For example, a container can specify the size of storage it needs or the way it needs to access the data (read only, read/write, etc.).
Beyond storage size and access mode, administrators create Storage Classes to provided PVs with custom properties, such as the type of disk (HDD vs. SSD), the level of performance, or the storage tier (regular or cold storage).
## Persistent Volumes
-The PersistentVolume(PV) is dynamically provisioned by the storage providers when users request for a PVC. PV contains the details on how the storage can be consumed by the container. Kubernetes and the Volume Drivers use the details in the PV to attach/detach the storage to the node where the container is running and mount/unmount storage to a container.
+The PersistentVolume (PV) is dynamically provisioned by the storage providers when users request for a PVC. PV contains the details on how the storage can be consumed by the container. Kubernetes and the volume drivers use the details in the PV to attach/detach the storage to the node where the container is running and mount/unmount storage to a container.
-OpenEBS Control Plane dynamically provisions OpenEBS Local and Replicated volumes and helps in creating the PV objects in the cluster.
+OpenEBS Control Plane dynamically provisions OpenEBS local and replicated volumes and helps in creating the PV objects in the cluster.
## StatefulSets and Deployments
Kubernetes provides several built-in workload resources such as StatefulSets and Deployments that let application developers define an application running on Kubernetes. You can run a stateful application by creating a Kubernetes Deployment/Statefulset and connecting it to a PersistentVolume using a PersistentVolumeClaim.
-For example, you can create a MySQL Deployment YAML that references a PersistentVolumeClaim. The MySQL PersistentVolumeClaim referenced by the Deployment should be created with the requested size and StorageClass. Once the OpenEBS control plane provisions a PersistenceVolume for the required StorageClass and requested capacity, the claim is set as satisfied. Kubernetes will then mount the PersistentVolume and launch the MySQL Deployment.
+For example, you can create a MongoDB Deployment YAML that references a PersistentVolumeClaim. The MongoDB PersistentVolumeClaim referenced by the Deployment should be created with the requested size and StorageClass. Once the OpenEBS control plane provisions a PersistenceVolume for the required StorageClass and requested capacity, the claim is set as satisfied. Kubernetes will then mount the PersistentVolume and launch the MongoDB Deployment.
## Kubernetes Persona
@@ -72,7 +72,7 @@ There are primarily two types of users that interact with Kubernetes and OpenEBS
These users are responsible for managing the life-cycle of the cluster and are often referred to as administrators or platform SREs. Administrators have full access to the cluster resources and can create policies and roles for other users that have access to the cluster.
-Cluster administrators are responsible for installing OpenEBS and configuring the OpenEBS StorageClasses that will be used by other application users.
+Cluster administrators are responsible for installing OpenEBS and configuring the OpenEBS Storage Classes that will be used by other application users.
### Application Owners
diff --git a/docs/main/concepts/container-native-storage.md b/docs/main/concepts/container-native-storage.md
index 8bd56ca7e..99e16e9c3 100644
--- a/docs/main/concepts/container-native-storage.md
+++ b/docs/main/concepts/container-native-storage.md
@@ -1,49 +1,51 @@
---
-id: cas
-title: Container Attached Storage (CAS)
+id: cns
+title: Container Native Storage (CNS)
keywords:
- - CAS
- - Container Attached Storage
-description: Container Attached Storage(CAS) is a software that includes microservice based storage controllers that are orchestrated by Kubernetes.
+ - CNS
+ - Container Native Storage
+description: Container Native Storage (CNS) is a software that includes microservice based storage controllers that are orchestrated by Kubernetes.
---
-## What is CAS?
+## What is CNS?
-Container Attached Storage(CAS) is software that includes microservice based storage controllers that are orchestrated by Kubernetes. These storage controllers can run anywhere that Kubernetes can run which means any cloud or even bare metal server or on top of a traditional shared storage system. Critically, the data itself is also accessed via containers as opposed to being stored in an off platform shared scale out storage system.
+Container Native Storage (CNS) is software that includes microservice based storage controllers that are orchestrated by Kubernetes. These storage controllers can run anywhere that Kubernetes can run which means any cloud or even bare metal server or on top of a traditional shared storage system. Critically, the data itself is also accessed via containers as opposed to being stored in an off platform shared scale out storage system.
-[![Container Attached Storage](../assets/cas.svg)](../assets/cas.svg)
+[![Container Native Storage](../assets/cas.svg)](../assets/cas.svg)
-CAS is a pattern very much in line with the trend towards disaggregated data and the rise of small, autonomous teams running small, loosely coupled workloads. For example, my team might need Postgres for our microservice, and yours might depend on Redis and MongoDB. Some of our use cases might require performance, some might be gone in 20 minutes, others are write intensive, others read intensive, and so on. In a large organization, the technology that teams depend on will vary more and more as the size of the organization grows and as organizations increasingly trust teams to select their own tools.
+CNS is a pattern very much in line with the trend towards disaggregated data and the rise of small, autonomous teams running small, loosely coupled workloads. For example, my team might need Postgres for our microservice, and yours might depend on Redis and MongoDB. Some of our use cases might require performance, some might be gone in 20 minutes, others are write intensive, others read intensive, and so on. In a large organization, the technology that teams depend on will vary more and more as the size of the organization grows and as organizations increasingly trust teams to select their own tools.
-CAS means that developers can work without worrying about the underlying requirements of their organizations' storage architecture. To CAS, a cloud disk is the same as a SAN which is the same as bare metal or virtualized hosts. Developers and Platform SREs don’t have meetings to select the next storage vendor or to argue for settings to support their use case, instead Developers remain autonomous and can spin up their own CAS containers with whatever storage is available to the Kubernetes clusters.
+CNS means that developers can work without worrying about the underlying requirements of their organizations' storage architecture. To CNS, a cloud disk is the same as a SAN which is the same as bare metal or virtualized hosts. Developers and Platform SREs don’t have meetings to select the next storage vendor or to argue for settings to support their use case, instead developers remain autonomous and can spin up their own CNS containers with whatever storage is available to the Kubernetes clusters.
-CAS reflects a broader trend of solutions – many of which are now part of Cloud Native Foundation – that reinvent particular categories or create new ones – by being built on Kubernetes and microservice and that deliver capabilities to Kubernetes based microservice environments. For example, new projects for security, DNS, networking, network policy management, messaging, tracing, logging and more have emerged in the cloud-native ecosystem and often in CNCF itself.
+CNS reflects a broader trend of solutions – many of which are now part of Cloud Native Computing Foundation (CNCF) – that reinvent particular categories or create new ones – by being built on Kubernetes and microservice and that deliver capabilities to Kubernetes based microservice environments. For example, new projects for security, DNS, networking, network policy management, messaging, tracing, logging, and more have emerged in the cloud-native ecosystem and often in CNCF itself.
-## Advantages of CAS
+## Advantages of CNS
### Agility
-Each storage volume in CAS has a containerized storage controller and corresponding containerized replicas. Hence, maintenance and tuning of the resources around these components are truly agile. The capability of Kubernetes for rolling upgrades enables seamless upgrades of storage controllers and storage replicas. Resources such as CPU and memory can be tuned using container cGroups.
+Each storage volume in CNS has a containerized storage controller and corresponding containerized replicas. Hence, maintenance and tuning of the resources around these components are truly agile. The capability of Kubernetes for rolling upgrades enables seamless upgrades of storage controllers and storage replicas. Resources such as CPU and memory can be tuned using container cGroups.
+
### Granularity of Storage Policies
-Containerizing the storage software and dedicating the storage controller to each volume brings maximum granularity in storage policies. With CAS architecture, you can configure all storage policies on a per-volume basis. In addition, you can monitor storage parameters of every volume and dynamically update storage policies to achieve the desired result for each workload. The control of storage throughput, IOPS, and latency increases with this additional level of granularity in the volume storage policies.
+Containerizing the storage software and dedicating the storage controller to each volume brings maximum granularity in storage policies. With CNS architecture, you can configure all storage policies on a per-volume basis. In addition, you can monitor storage parameters of every volume and dynamically update storage policies to achieve the desired result for each workload. The control of storage throughput, IOPS, and latency increases with this additional level of granularity in the volume storage policies.
### Avoids Lock-in
-Avoiding cloud vendor lock-in is a common goal for many Kubernetes users. However, the data of stateful applications often remains dependent on the cloud provider and technology or on an underlying traditional shared storage system, NAS or SAN. With the CAS approach, storage controllers can migrate the data in the background per workload and live migration becomes simpler. In other words, the granularity of control of CAS simplifies the movement of stateful workloads from one Kubernetes cluster to another in a non-disruptive way.
+Avoiding cloud vendor lock-in is a common goal for many Kubernetes users. However, the data of stateful applications often remains dependent on the cloud provider and technology or on an underlying traditional shared storage system, NAS or SAN. With the CNS approach, storage controllers can migrate the data in the background per workload and live migration becomes simpler. In other words, the granularity of control of CNS simplifies the movement of stateful workloads from one Kubernetes cluster to another in a non-disruptive way.
### Cloud Native
-CAS containerizes the storage software and uses Kubernetes Custom Resource Definitions (CRDs) to represent low-level storage resources, such as disks and storage pools. This model enables storage to be integrated into other cloud-native tools seamlessly. The storage resources can be provisioned, monitored, and managed using cloud-native tools such as Prometheus, Grafana, Fluentd, Weavescope, Jaeger, and others.
+CNS containerizes the storage software and uses Kubernetes Custom Resource Definitions (CRDs) to represent low-level storage resources, such as disks and storage pools. This model enables storage to be integrated into other cloud-native tools seamlessly. The storage resources can be provisioned, monitored, and managed using cloud-native tools such as Prometheus, Grafana, Fluentd, Weavescope, Jaeger, and others.
-Similar to hyperconverged systems, storage and performance of a volume in CAS are scalable. As each volume has it's own storage controller, the storage can scale up within the permissible limits of a storage capacity of a node. As the number of container applications increases in a given Kubernetes cluster, more nodes are added, which increases the overall availability of storage capacity and performance, thereby making the storage available to the new application containers. This process of scalability is similar to successful hyperconverged systems like Nutanix.
+Similar to hyperconverged systems, storage and performance of a volume in CNS are scalable. As each volume has it's own storage controller, the storage can scale up within the permissible limits of a storage capacity of a node. As the number of container applications increases in a given Kubernetes cluster, more nodes are added, which increases the overall availability of storage capacity and performance, thereby making the storage available to the new application containers. This process of scalability is similar to successful hyperconverged systems like Nutanix.
-### Lower blast radius
+### Lower Blast Radius
-Because the CAS architecture is per workload and components are loosely coupled, CAS has a much smaller blast radius than a typical distributed storage architecture.
+As the CNS architecture is per workload and components are loosely coupled, CNS has a much smaller blast radius than a typical distributed storage architecture.
-CAS can deliver high availability through synchronous replication from storage controllers to storage replicas. The metadata required to maintain the replicas is simplified to saving the information of the nodes that have replicas and information about the status of replicas to help with quorum. If a node fails, the storage controller, which is a stateless container in this case, is spun on a node where second or third replica is running and data continues to be available. Hence, with CAS the blast radius is much lower and also localized to the volumes that have replicas on that node.
+CNS can deliver high availability through synchronous replication from storage controllers to storage replicas. The metadata required to maintain the replicas is simplified to saving the information of the nodes that have replicas and information about the status of replicas to help with quorum. If a node fails, the storage controller, which is a stateless container in this case, is spun on a node where second or third replica is running and data continues to be available. Hence, with CNS the blast radius is much lower and also localized to the volumes that have replicas on that node.
## See Also:
-[OpenEBS Architecture](/docs/concepts/architecture) [Blog: Container Attached Storage is Cloud Native Storage (CAS)](https://www.cncf.io/blog/2020/09/22/container-attached-storage-is-cloud-native-storage-cas/) [Blog: Container Attached Storage](https://www.cncf.io/blog/2018/04/19/container-attached-storage-a-primer/) [Webinar: Need for Container Attached Storage](https://www.cncf.io/webinars/kubernetes-for-storage-an-overview/) [Connect with Community](/docs/introduction/community)
+[OpenEBS Architecture](architecture.md)
+[Connect with Community](../community.md)
\ No newline at end of file
diff --git a/docs/main/concepts/cstor.md b/docs/main/concepts/cstor.md
deleted file mode 100644
index bb31ed0af..000000000
--- a/docs/main/concepts/cstor.md
+++ /dev/null
@@ -1,334 +0,0 @@
----
-id: cstor
-title: cStor Overview
-keywords:
- - cStor
- - cStor pools
- - cStor volumes
- - cStor cli
-description: This document provides you with a detailed overview of cStor
----
-
-cStor is the recommended way to provide additional resilience to workloads via OpenEBS and is the second most widely deployed storage engine behind LocalPV. cStor was originally introduced in OpenEBS 0.7 release and has been tested well in the community and in production deployments. The primary function of cStor is to serve iSCSI block storage using the underlying disks or cloud volumes in a cloud native way. cStor is a very light weight and feature rich storage engine. It provides enterprise grade features such as synchronous data replication, snapshots, clones, thin provisioning of data, high resiliency of data, data consistency and on-demand increase of capacity or performance.
-
-When the stateful application desires the storage to provide high availability of data, cStor is configured to have 3 replicas where data is written synchronously to all the three replicas. As shown below, the cStor target is replicating the data to Node1 (R1), Node3(R2) and Node4(R3). The data is written to the three replicas before the response is sent back to the application. It is important to note that the replicas R1, R2 and R3 are copies of the same data written by the target, data is not striped across replicas or across nodes.
-
-[![cStor components](../assets/cstor-for-deployment.png)](../assets/cstor-for-deployment.png)
-
-When the stateful application itself is taking care of data replication, it is typically deployed as a Kubernetes *statefulset*. For a statefulset, it is typical to configure cStor with single replica. This is a use case where the use of LocalPV may be preferred.
-
-[![cStor components](../assets/cstor-for-statefulset.png)](../assets/cstor-for-statefulset.png)
-
-cStor has two main components:
-
-(a) **cStor Pool Pods:** cStor pool pods are responsible for persisting data into the disks. The cStor pool pods are instantiated on a node and are provided with one or more disks on which data will be saved. Each cStor pool pod can save data of one or more cStor volumes. cStor pool pod carves out space for each volume replica, manages the snapshots and clones of the replica. A set of such cStor pool pods form a single Storage Pool. The administrator will have to create a Storage Pool of type cStor, before creating a StorageClass for cStor Volumes.
-
-(b) **cStor Target Pods:** When a cStor Volume is provisioned, it creates a new cStor target pod that is responsible for exposing the iSCSI LUN. cStor target pod receives the data from the workloads, and then passes it on to the respective cStor Volume Replicas (on cStor Pools). cStor target pod handles the synchronous replication and quorum management of its replicas.
-
-## cStor targets
-
-cStor target runs as a pod and exposes an iSCSI LUN on 3260 port. It also exports the volume metrics that can be scraped by Prometheus.
-
-## cStor pools
-
-A cStor pool is local to a node in OpenEBS. A pool on a node is an aggregation of set of disks which are attached to that node. A pool contains replicas of different volumes, with not more than one replica of a given volume. OpenEBS scheduler at run time decides to schedule replica in a pool according to the policy. A pool can be expanded dynamically without affecting the volumes residing in it. An advantage of this capability is the thin provisioning of cStor volumes. A cStor volume size can be much higher at the provisioning time than the actual capacity available in the pool.
-
-[![cStor components](../assets/cstor-pool.png)](../assets/cstor-pool.png)
-
-A pool is an important OpenEBS component for the Kubernetes administrators in the design and planning of storage classes which are the primary interfaces to consume the persistent storage by applications.
-
-**Benefits of a cStor pool**
-
-- Aggregation of disks to increase the available capacity and/or performance on demand.
-- Thin provisioning of capacity. Volumes can be allocated more capacity than what is available in the node.
-- When the pool is configured in mirror mode, High Availability of storage is achieved when disk loss happens.
-
-### Relationship between cStor volumes and cStor pools
-
-cStor pool is a group of individual pools with one pool instance on each participating node. Individual pools in the group are named as pool instances and the corresponding pod for each pool instance is referred to as cStor pool pod. The pools are totally independent from each other in that each one is a different pool itself and could host different number of volumes. They simply contain volume replicas. For example, replica3 of pool1 in Node3 has two volumes whereas the other two pool replicas have only one volume each. The pool replicas are related to each other only at the level of target where target decides where to host the volume/data replicas/copies.
-
-Replication of data does not happen at the pool level. Synchronous data replication and rebuilding happen at volume level by the cStor target. Volume replicas are created on cStor pools located on different nodes. In the following example figure, a pool configuration is defined to have three replicas or three independent pools .
-
-![cStor Pools in OpenEBS](../assets/cstorpools.png)
-
-### Relationship among PVC, PV, Storage Class, cStor pool and disks
-
-Storage administrators or DevOps administrators first build cStor pools using discovered disks on the designated nodes. Once the pools are built, they are used to design and build storage classes. Application developers then use storage class to dynamically provision PV for the applications.
-
-[![PVC and Storage Pool relationship](../assets/pvcspc.png)](../assets/pvcspc.png)
-
-### cStor pool spec details
-
-A cStor pool spec consists of :
-
-- Number of pools
-- List of nodes that host the pools
-- List of blockdevices on each node that constitute the pool on that given node
-- RAID type within the pool. Supported RAID types are striped, mirrored, raidz and raidz2.
-
-**Number of pools:** It is good to start with 3 pools as the number of volume replicas will be typically three or one. The pool capacity can be increased on the fly and different types of pool expansion can be found [here](/docs/deprecated/spc-based-cstor#expanding-cStor-pool-to-a-new-node).
-
-**List of nodes that host the pools:** This information and the number of pool replicas are implicitly provided by analyzing the provided blockdevice CRs in the Storage Pool Claim spec file. For example, if the Storage Pool Claim spec file has 3 blockdevice CRs, which belong to 3 different nodes, it implicitly means the number of pool replicas are 3 and the list of nodes taken from the blockdevice CR's metadata information.
-
-**List of blockdevices:** This is perhaps the most important information in the pool specification. The blockdevice CRs need to be listed and carefully taken by first determining the list of nodes and the number of disks on each node.
-
-**Type of pool:** This is also another important configuration information in the pool specification. It defines how these disks are pooled together within a node for creating the Storage pool. Possible configurations are
-
-- STRIPE
-- MIRROR
-- RAIDZ1
-- RAIDZ2
-
-***Note:*** A pool cannot be extended beyond a node. When the pool type is STRIPE, it should not be assumed that data is striped across the nodes. The data is striped across the disks within the pool on that given node. As mentioned in the data replication section, the data is synchronously replicated to as many number of volume replicas in to as many number of pools irrespective of the type of pool.
-
-### Operations on cStor Pool
-
-cStor Pool is an important component in the storage management. It is fundamental to storage planning especially in the context of hyperconvergence planning. The following are the different operations that can be performed on cStor Pools.
-
-**Create a Pool :** Create a new pool with all the configuration. It is typical to start a pool with 3 pool instances on three nodes. Currently RAID types supported for a given pool instance are striped, mirrored, raidz and raidz2. A pool needs to be created before storage classes are created. So, pool creation is the first configuration step in the cStor configuration process.
-
-**Add a new pool instance** : A new pool instance may need to be added for many different reasons. The steps for expanding a cStor pool to a new node can be found [here](/docs/deprecated/spc-based-cstor#expanding-cStor-pool-to-a-new-node). Few example scenarios where the need of cStor pool expansion to new nodes are:
-
-- New node is being added to the Kubernetes cluster and the blockdevices in new node needs to be considered for persistent volume storage.
-- An existing pool instance is full in capacity and it cannot be expanded as either local disks or network disks are not available. Hence, a new pool instance may be needed for hosting the new volume replicas.
-- An existing pool instance is fully utilized in performance and it cannot be expanded either because CPU is saturated or more local disks are not available or more network disks or not available. A new pool instance may be added and move some of the existing volumes to the new pool instance to free up some disk IOs on this instance.
-
-**Expand a given pool instance :** cStor Pool supports thin provisioning, which means that the volume replica that resides on a given cStor pool can be given much bigger size or quota than the physical storage capacity available in the pool. When the actual capacity becomes nearly full (80% or more for example), the pool instance is expanded by adding a set of blockdevices to it. If the pool instance's disk RAID type is STRIPE, then the disks can be added in any multiples of disks (1 disk or more) at a time, but if the type is any of the RAIDZx, then the expansion is done by adding any multiples of RAIDZ groups (1 group or more).
-
-**Delete a pool instance** : When a Kubernetes node needs to be drained in a planned manner, then the volume replicas in the pool instance that resides on that node need to be drained by moving them to other pool instance(s). Once all the volume replicas are drained, the pool instance can be deleted.
-
-## cStor Volume snapshots
-
-cStor snapshots are taken instantaneously as they are point-in-time copy of the volume data. OpenEBS supports standard Kubernetes API to take snapshots and restore volumes from snapshots.
-
-Example specification for a snapshot is shown below.
-
-*Note that the snapshot is taken on a PVC and not on PV.*
-
-```
-apiVersion: volumesnapshot.external-storage.k8s.io/v1
-kind: VolumeSnapshot
-metadata:
- name: snapshot-cstor-volume
- namespace:
-spec:
- persistentVolumeClaimName: cstor-vol1-claim
-```
-
-When the above snapshot specification is run through *kubectl* it creates two Kubernetes resources.
-
-- Volume snapshot
-- Volume snapshot data
-
-Following command is used to list the snapshots in a given namespace
-
-```
-kubectl get volumesnapshots -n
-```
-
-*Note 1: When cStor volume has three replicas, creation of volume snapshots is possible when the volume is in quorum, which means that least two of the replicas are online and fully synced.*
-
-[![cStor components](../assets/snapshot-scope.png)](../assets/snapshot-scope.png)
-
-## cStor volume clones
-
-Clones in cStor are also instantaneous. They are created in the same namespace as that of the snapshot and belong to the same cStor Pool. In Kubernetes storage architecture, clone from a snapshot is the same as creating a new PV.
-
-Example specification for creating a clone out of snapshot
-
-```
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: vol-claim-cstor-snapshot
- namespace:
- annotations:
- snapshot.alpha.kubernetes.io/snapshot: snapshot-cstor-volume
-spec:
- storageClassName: openebs-snapshot-promoter
-```
-
-For StatefulSet, snapshots are created against each PV. For creating a clone, any one of the snapshot is used in the volumeClaimTemplates specification. Kubernetes will launch the PVs for all the statefulset pods from the chosen snapshot. Example specification for creating a clone out of snapshot for a StatefulSet application is shown below.
-
-
-```
-volumeClaimTemplates:
-
-- metadata:
- name: elasticsearch-logging-clone
- annotations:
- snapshot.alpha.kubernetes.io/snapshot: snapshot-20181121103439-078f
- spec:
- storageClassName: openebs-snapshot-promoter
-```
-
-*Note: One can mix and match the snapshots between deployments and statefulsets while cloning. For example, a snapshot taken using the PVC of a deployment can be used to clones for statefulset.*
-
-
-## cStor cli
-
-| CLI command | Purpose or remarks |
-| ---------------------------------- | ------------------------------------------------------------ |
-| kubectl get spc | Get the list of storage pool claims (SPC is the spec for each cStor storage pool) |
-| kubectl get csp | Get the list of storage pools (cstor storage pools). One SPC refers to one or more CSPs. If there are two pools - cstor-ssd-pool on 3 nodes and cstor-sas-pool on 4 nodes, then there will be two SPCs and 7 CSPs |
-| kubectl get blockdevice --labels | Get the list of all blockdevice CRs in the entire cluster |
-| kubectl get cstorvolume -n openebs | Get the list of cStor volumes in the entire cluster |
-| kubectl get cvr -n openebs | Get the list of cStor volumes in the entire cluster |
-| kubectl get volumesnapshot | Get the list of volumesnapshots in the entire cluster |
-
-## High Availability of cStor
-
-cStor volumes when deployed in 3 replica mode, it provides high availability of data as long as the replicas are in quorum. At least two replicas are required to be healthy to call the volume is in quorum. In a 3 replicas setup, if two replicas become unavailable due to either pool failure or unavailability, the volume is set to read-only by the target. When the volume replicas are back online, it will start rebuilding data from the healthy replica one by one and the volume is set to be read-write as soon as the quorum is achieved.
-
-## Ephemeral Disk Support
-
-Kubernetes services such as GKE, EKS and AKS have cloud VMs where when a node is lost a new replacement node is provided with formatted new blockdevice as part of their Auto Scaling policy which means that the data on local disks of the original node is lost permanently. However, with cStor, you can still build a reliable and highly available persistent storage solution using these ephemeral local disks by using cStor's replication feature.
-
-For this to work, cStor StorageClass has to be configured with `ReplicaCount=3`. With this setting data on cStor volume is replicated to three copies on different nodes. In the ephemeral nodes scenario, when a node is lost, Kubernetes brings up a new node with the same label. Data of cStor volumes continues to be available and will be served from one of the other two remaining replicas. OpenEBS detects that a new node has come up with all new disks and it will automatically reconfigure the blockdevices CR to the existing StoragePoolClaim config or StoragePool configuration. The net effect is that the cStorPool instance that was gone with the lost node is recreated on the newly replaced node. cStor will then start rebuilding the cStor volume replicas onto this new cStorPool instance.
-
-**Note:** Rebuilding of data onto the new cStorPool instance can take time depending on the amount of data to be rebuilt. During this time the volume quorum needs to be maintained. In other words, during rebuilding time, the cStorPool is in an unprotected state where losing another node will cause permanent loss of data. Hence, during Kubernetes node upgrades, administrators need to make sure that the cStorPools are fully rebuilt and volumes are healthy/online before starting the upgrade of the next node.
-
-## Thin provisioning in cStor
-
-cStor supports thin provisioning of volumes. By default, a volume is provisioned with whatever capacity that is mentioned in StorageClass. Capacity of the pool can be expanded on demand by adding more disks to the cStor pool. cStor architecture also supports the resizing of a provisioned volume on the fly using CSI provisioner supported cStor volume.
-
-
-## Performance testing of cStor
-
-Performance testing includes setting up the pools, StorageClasses and iSCSI server tunables. Some best practices include
-
-- Number of replicas - For Statefulsets, when the application is doing the required replication, one replica per volume may be sufficient
-
-- Network latency - Latency between the pods and zones (if the replicas are placed across AZs) plays a major role in the performance results and it needs to be in the expected range
-
-## Known limitations
-
-**After a node shutdown, I see application stuck in container creating waiting for PV to be attached.:**
-
-When a Kubernetes node is involved in an unplanned shutdown like a power loss or software hang etc, the PVs which are mounted on that node will not be mounted by Kubelet till the timeout of 30 minutes or 1800 seconds. In such scenarios, the application will lose connectivity to persistent storage. This limitation of Kubernetes will be resolved for OpenEBS PVs when the CSI driver support is available for OpenEBS. With OpenEBS CSI driver in place, the unavailability of the node can be detected by the CSI driver node agent and do the force mount of the PV on the new node. The alpha version of CSI support is available from OpenEBS 1.2.0.
-
-**Cannot disable thin provisioning**
-
-By default, cStor supports thin provisioning, which means that when a storage class or PVC specifies the size of the volume and the pool from which the volume must be provisioned, and volume of that size is provisioned irrespective of whether that much free space is available in the pool or not. There is no option to specify thick provision while requesting a volume provisioning.
-
-**Delayed snapshots**
-
-In cStor, snapshots are taken only when the volume replicas are in quorum. For example, as soon as the volume is provisioned on cStor, the volume will be in ready state but the quorum attainment may take few minutes. Snapshot commands during this period will be delayed or queued till the volumes replicas attain quorum. The snapshot commands received by the target are also delayed when the cStor volume is marked read-only because of no-quorum.
-
-## Troubleshooting areas
-
-Following are most commonly observed areas of troubleshooting
-
-1. **iSCSI tools are not installed or iscsi.d service is not running**
-
- **Symptom:**
-
- If iSCSI tools are not installed on the nodes, after launching the application with PVC pointing to OpenEBS provisioner, you may observe the application is in "ContainerCreating" state.
-
- `kubectl describe pod ` may give an error that looks like the following
-
- ```
- MountVolume.WaitForAttach failed for volume "pvc-33408bf6-2307-11e9-98c4-000d3a028f9a" : executable file not found in $PATH
- ```
-
- **Resolution**:
-
- Install iSCSI tools and make sure that iSCSI service is running. See [iSCSI installation](/docs/next/prerequisites.html)
-
-2. **Multi-attach error is seen in the logs**
-
- **Symptom:**
-
- In a node failure case, it is sometimes observed that the application pods that were running on the node that went down are moved, the cStor target pods are also moved to other nodes, but the application throws up an error similar to below:
-
- ```
- Warning FailedAttachVolume 4m15s attachdetach-controller
- Multi-Attach error for volume "pvc-d6d52184-1f24-11e9-bf1d-0a0c03d0e1ae"
- Volume is already exclusively attached to one node and can't be attached to another
- ```
-
- **Resolution :**
-
- This is because of the Kubernetes limitation [explained above](#known-limitations). OpenEBS CSI driver will resolve this issue. See [roadmap](#cstor-roadmap). In this situation if the node is stuck in a non-responsive state and if the node has to be permanently deleted, remove it from the cluster.
-
-3. **Application is in ContainerCreating state, observed connection refused error**
-
- **Symptom:**
-
- Application is seen to be in the `ContainerCreating` state, with kubectl describe showing `Connection refused` error to the cStor PV.
-
- **Resolution**:
-
- This error eventually could get rectified on the further retries, volume gets mounted and application is started. This error is usually seen when cStor target takes some time to initialize on low speed networks as it takes time to download cStor image binaries from repositories ( or ) or because the cstor target is waiting for the replicas to connect and establish quorum. If the error persists beyond 5 minutes, logs need to be verified, contact [OpenEBS Community](/docs/introduction/community) for support.
-
-4. **Kubelet seen consuming high RAM usage with cStor volumes**
-
- The cause of high memory consumption of Kubelet is seen on Fedora 29 mainly due to the following.
-
- There are 3 modules are involved - `cstor-istgt`, `kubelet` and `iscsiInitiator(iscsiadm)`.
- kubelet runs iscsiadm command to do discovery on cstor-istgt. If there is any delay in receiving response of discovery opcode (either due to network or delay in processing on target side), iscsiadm retries few times and gets into infinite loop dumping error messages as below:
-
- ```
- iscsiadm: Connection to Discovery Address 127.0.0.1 failed
- iscsiadm: failed to send SendTargets PDU
- iscsiadm: connection login retries (reopen_max) 5 exceeded
- iscsiadm: Connection to Discovery Address 127.0.0.1 failed
- iscsiadm: failed to send SendTargets PDU
- ```
-
- kubelet keeps taking this response and accumulates the memory.More details can be seen [here](https://github.com/openebs/openebs/issues/2382).
-
-**Resolution:**
-
-This issue is fixed in 0.8.1 version.
-
-## cStor roadmap
-
-| Feature | Release |
-| ------------------------------------------------------------ | ----------------- |
-| **cStor Pool features** | |
-| cStor pool creation and initial use with either stripe mode or RAIDZ0 (mirror) mode | 0.8.0 |
-| Adding a new cStorPool instance to the existing cstor-pool-config(SPC) | 0.8.1 |
-| Ephemeral disk/pool support for rebuilding | 0.8.1 |
-| Expanding a given pool replica (add disks to a pool after it is created) (Alpha) | 1.2.0 |
-| Support for RAIDZ1 in cStorPool | 1.1.0 |
-| Support for RAIDZ2 in cStorPool | 1.1.0 |
-| Deleting a pool replica (Alpha) | 1.2.0 |
-| Disk replacement in a given cStor pool instance created using CSPC way(Alpha) | 1.5.0 |
-| |
-| **cStor volume features** | |
-| Expanding the size of a cStor volume using CSI provisioner (Alpha) | 1.2.0 |
-| CSI driver support(Alpha) | 1.1.0 |
-| Snapshot and Clone of cStor volume provisioned via CSI provisioner(Alpha) | 1.4.0 |
-| Scaling up of cStor Volume Replica | 1.3.0 |
-| Scaling down of cStor Volume Replica | 1.4.0 |
-
-## Advanced concepts in cStor
-
-### Custom resources related to cStor
-
-[![cStor custom resources](../assets/cstor-cr.png)](../assets/cstor-cr.png)
-
-**Storage Pool Claim or SPC:**
-
-Pool specification or Pool aggregate that holds all CSPs together
-
-**cStor Storage Pool or CSP :**
-
-An individual cStor pool on one node. There will also be a cStor-Pool pod corresponding to each CSP custom resource. *When a new node is added to the Kubernetes cluster and configured to host a cStor pool, a new CSP CR and a cStor Pool Pod are provisioned on that node and CVRs are migrated from other nodes for volume rebalancing. (CSP auto scaling feature is in the roadmap)*
-
-**cStor Volume or CV :**
-
-An individual persistent volume. For each PV provisioned through CAS type as `cStor`, there will be a corresponding CV custom resource
-
-**cStor Volume Replica or CVR :**
-
-Each CV will have as many CVRs as the number of replicas configured in the corresponding cStor storage class.
-
-**Blockdevice :**
-
-Each discovered blockdevice on a node is added as a blockdevice CR. This is needed to identify a blockdevice uniquely across the Kubernetes cluster. SPC specification contains the information about the blockdevice CRs that correspond to a CSP on that given node
-
-## See Also:
-
-[Storage Engines in OpenEBS](/docs/concepts/casengines) [Creating cStor Pool](/docs/deprecated/spc-based-cstor#creating-cStor-storage-pools) [Provisioning cStor volumes](/docs/deprecated/spc-based-cstor#provisioning-a-cStor-volume)
diff --git a/docs/main/concepts/data-engines/data-engines.md b/docs/main/concepts/data-engines/data-engines.md
index 95a9ffd91..dd9bd6bfb 100644
--- a/docs/main/concepts/data-engines/data-engines.md
+++ b/docs/main/concepts/data-engines/data-engines.md
@@ -1,6 +1,6 @@
---
-id: casengines
-title: OpenEBS Data Engines
+id: dataengines
+title: Overview of OpenEBS Data Engines
keywords:
- Data Engines
- OpenEBS Data Engines
@@ -11,49 +11,49 @@ OpenEBS Data Engine is the core component that acts as an end-point for serving
OpenEBS provides a set of Data Engines, where each of the engines is built and optimized for running stateful workloads of varying capabilities and running them on Kubernetes nodes with varying range of resources.
-Platform SRE or administrators typically select one or more [data engines](#data-engine-capabilities) to be used in their Kubernetes cluster. The selection of the data engines depend on the following two aspects:
+Platform Site Reliability Engineering (SRE) or administrators typically select one or more [data engines](#data-engine-capabilities) to be used in their Kubernetes cluster. The selection of the data engines depend on the following two aspects:
- [Node Resources or Capabilities](#node-capabilities)
- [Stateful Application Capabilities](#stateful-workload-capabilities)
## Node Capabilities
-Node Resources or Capabilities refer to the CPU, RAM, Network and Storage available to Kubernetes nodes.
+Node Resources or Capabilities refer to the CPU, RAM, Network, and Storage available to Kubernetes nodes.
-Based on the CPU, RAM and Network bandwidth available to the nodes, the nodes can be classified as:
+Based on the CPU, RAM, and Network bandwidth available to the nodes, the nodes can be classified as:
-* Small Instances that typically have up to 4 cores, 16GB RAM and Gigabit Ethernet
-* Medium Instances that typically have up to 16 cores, 32GB RAM and up to 10G Networks
-* Large Instances that typically have more than 16 - even 96 cores, up to 256G or more RAM and 10 to 25G Networks
+* Small instances that typically have up to 4 cores, 16GB RAM and Gigabit Ethernet
+* Medium instances that typically have up to 16 cores, 32GB RAM and up to 10G Networks
+* Large instances that typically have more than 16 - even 96 cores, up to 256G or more RAM and 10 to 25G Networks
The Storage to the above instance can be made available in the following ways:
-* Ephemeral storage - where storage is lost when node is taken out of the cluster as part of auto-scaling or upgrades.
-* Cloud Volumes or Network Attached storage - that can be re-attached to new nodes if the older node is removed from cluster.
+* Ephemeral Storage - where storage is lost when node is taken out of the cluster as part of auto-scaling or upgrades
+* Cloud Volumes or Network Attached Storage - that can be re-attached to new nodes if the older node is removed from cluster
* Direct Attached Storage
* Categorize based on the performance of the storage like slow (HDD via SAS), medium (SSD via SAS), fast (SSD or Persistent Flash via NVMe)
-Another key aspect that needs to be considered is the nature of the Kubernetes cluster size:
-- Is it for an Edge or Home cluster with single node
-- Hyperconverged nodes - where Stateful workload and its storage can be co-located.
-- Disaggregated - where Stateful workload and its storage will be served from different nodes.
+Another key aspect that must be considered is the nature of the Kubernetes cluster size:
+- Is it for an edge or home cluster with single node?
+- Hyperconverged nodes - where Stateful workload and its storage can be co-located
+- Disaggregated - where Stateful workload and its storage will be served from different nodes
The following table summarizes the recommendation for small to medium instances, with HDDs, SSDs limited to 2000 IOPS:
| Node Capabilities | | | |
| ----------------------------| :--------------: | :---------------------: | :---------------: |
-| Ephemeral Node or Storage | Non-ephemeral | Non-Ephemeral | Ephemeral |
-| Size of cluster | Single Node | Multiple nodes | Multiple nodes |
-| Storage Deployment type | Hyperconverged | Hyperconverged | Disaggregated |
-| Recommended Data Engines | Local PV | Local PV, cStor, Jiva | cStor, Jiva |
+| Ephemeral Node or Storage | Non-Ephemeral | Non-Ephemeral | Ephemeral |
+| Size of Cluster | Single Node | Multiple Nodes | Multiple Nodes |
+| Storage Deployment Type | Hyperconverged | Hyperconverged | Disaggregated |
+| Recommended Data Engines | Local Engine | Local Engine and Replicated Engine | Replicated Engine |
The following table summarizes the recommendation for small to medium instances with fast SSDs capable of higher IOPS and Throughput, typically connected using NVMe:
| Node Capabilities | | | |
| ----------------------------| :--------------: | :---------------------: | :---------------: |
-| Ephemeral Node or Storage | Non-ephemeral | Non-Ephemeral | Ephemeral |
-| Size of cluster | Single Node | Multiple nodes | Multiple nodes |
-| Storage Deployment type | Hyperconverged | Hyperconverged | Disaggregated |
-| Recommended Data Engines | Local PV | Local PV, Mayastor | Mayastor |
+| Ephemeral Node or Storage | Non-Ephemeral | Non-Ephemeral | Ephemeral |
+| Size of Cluster | Single Node | Multiple Nodes | Multiple Nodes |
+| Storage Deployment Type | Hyperconverged | Hyperconverged | Disaggregated |
+| Recommended Data Engines | Local Engine | Local Engine and Replicated Engine | Replicated Engine |
## Stateful Workload Capabilities
@@ -61,10 +61,10 @@ The following table summarizes the recommendation for small to medium instances
Often storage is an integral part of any application, used without realizing that it actually exists.
Storage can be further decomposed into two distinct layers:
-- Stateful Workloads or the Data Platform Layer - which comprises of SQL/NoSQL Database, Object and Key/Value stores, Message Bus and so forth.
-- Storage engine or Data Engine layer that provides block storage to the Stateful workloads to persist the data onto the storage devices.
+- Stateful Workloads or the Data Platform Layer - which comprises of SQL/NoSQL Database, Object and Key/Value stores, Message Bus, and so forth.
+- Storage Engine or Data Engine layer that provides block storage to the Stateful workloads to persist the data onto the storage devices.
-The key features or capabilities provided by the Storage can be classified as:
+The key features or capabilities provided by the storage can be classified as:
- Availability
- Consistency
- Durability
@@ -73,17 +73,17 @@ The key features or capabilities provided by the Storage can be classified as:
- Security
- Ease of Use
-With serverless and cloud native becoming mainstream a key shift has happened in terms of how the Stateful workloads are developed, with many of the workloads natively supporting the key storage features like Availability, Consistency and Durability. For example:
-- **Distributed:** Stateful workloads like MongoDB have availability features like protecting against node failures built into them. Such systems will expect the Data engines to provide capacity and performance required with the data consistency/durability at the block level.
-- **Distributed and Standalone:** Stateful workloads like Cassandra can benefit from the availability features from the data engines as it might help speed up the rebuild times required to rebuild a failed cassandra node. However, this comes at the cost of using extra storage by the data engines.
-- **Standalone:** Stateful workloads like MySQL (standalone) focus more on Consistency and Database features and depending on the underlying data engine for providing Availability, Performance, Durability and other features.
+With serverless and cloud native becoming mainstream a key shift has happened in terms of how the Stateful workloads are developed, with many of the workloads natively supporting the key storage features like availability, consistency, and durability. For example:
+- **Distributed:** Stateful workloads like MongoDB have availability features like protecting against node failures built into them. Such systems will expect the Data Engines to provide capacity and performance required with the data consistency/durability at the block level.
+- **Distributed and Standalone:** Stateful workloads like Cassandra can benefit from the availability features from the Data Engines as it might help speed up the rebuild times required to rebuild a failed Cassandra node. However, this comes at the cost of using extra storage by the Data Engines.
+- **Standalone:** Stateful workloads like MongoDB (standalone) focus more on consistency and database features. It depends on the underlying Data Engine for providing availability, performance, durability, and other features.
-Each stateful application comes with a certain capabilities and depends on the [data engines](#data- engine-capabilities) for complementary capabilities. The following table summarizes the recommendation on data engines based on the capabilities required by Applications:
+Each stateful application comes with a certain capabilities and depends on the [Data Engines](#data- engine-capabilities) for complementary capabilities. The following table summarizes the recommendation on Data Engines based on the capabilities required by Applications:
| Workload Type | Distributed | Stand-alone | Distributed and/or Stand-alone |
| ----------------------------| :--------------: | :---------------------: | :---------------------------: |
| Required Capabilities | Performance | Availability | Performance and Availability |
-| Recommended Data Engines | Local PV | Jiva,cStor, Mayastor | Mayastor |
+| Recommended Data Engines | Local Engine | Replicated Engine | Replicated Engine |
## Data Engine Capabilities
@@ -94,55 +94,46 @@ All OpenEBS Data Engines support:
- Dynamic Provisioning of Persistent Volumes
- Strong Data Consistency
-OpenEBS data engines can be classified into two categories.
+OpenEBS Data Engines can be classified into two categories.
### Local Engines
-OpenEBS Local Engines can create persistent volumes or PVs out of local disks or hostpaths or using the volume managers like LVM or ZFS on the Kubernetes worker nodes. Local Engines are well suited for cloud native applications that have the availability, scalability features built into them. Local Engines are also well suited for stateful workloads that are short lived like Machine Learning jobs or Edge cases where there is a single node Kubernetes cluster.
-
-Depending on the type of storage attached to the Kubernetes worker nodes and your preference of local filesystem, you can select from different flavors of Dynamic [Local PV](/docs/concepts/localpv) - Hostpath, Device, LVM, ZFS or Rawfile.
-- [Local PV hostpath](/docs/user-guides/localpv-hostpath)
-- [Local PV device](/docs/user-guides/localpv-device)
-- [ZFS Local PV](https://github.com/openebs/zfs-localpv)
-- [LVM Local PV](https://github.com/openebs/lvm-localpv)
-- [Rawfile Local PV](https://github.com/openebs/rawfile-localpv)
+OpenEBS Local Engines can create Persistent Volumes (PVs) out of local disks or hostpaths or using the volume managers on the Kubernetes worker nodes. Local Engines are well suited for cloud native applications that have the availability, scalability features built into them. Local Engines are also well suited for stateful workloads that are short lived like Machine Learning (ML) jobs or edge cases where there is a single node Kubernetes cluster.
:::note
-Local Volumes are only available from the the node on which the persistent volume is created. If that node fails, the application pod will not be re-scheduled to another node.
+Local volumes are only available from the the node on which the persistent volume is created. If that node fails, the application pod will not be re-scheduled to another node.
:::
-The below table identifies few differences among the different OpenEBS Local engines.
+The below table identifies few differences among the different OpenEBS Local Engines.
| Feature | Hostpath | Rawfile | Device | ZFS | LVM |
| -------------------------------------------- | :---: | :------: | :--------: | :------: | :------: |
-| Near disk performance | Yes | Yes | Yes | No | Yes |
+| Near Disk Performance | Yes | Yes | Yes | No | Yes |
| Full Backup and Restore using Velero | Yes | Yes | Yes | Yes | Yes |
| Thin Provisioning | Yes | Yes | No | Yes | Yes |
-| On demand capacity expansion | Yes | Yes | No | Yes | Yes |
-| Disk pool or aggregate support | Yes | Yes | No | Yes | Yes |
-| Disk resiliency (RAID support ) | Yes | Yes | No | Yes | Yes |
+| On-demand Capacity Expansion | Yes | Yes | No | Yes | Yes |
+| Disk Pool or Aggregate Support | Yes | Yes | No | Yes | Yes |
+| Disk Resiliency (RAID Support) | Yes | Yes | No | Yes | Yes |
| Snapshots | No | No | No | Yes | Yes |
| Incremental Backups | No | No | No | Yes | Yes |
| Clones | No | No | No | Yes | No |
-| Works on OS mounted storage | Yes | Yes | No | No | No |
+| Works on OS Mounted storage | Yes | Yes | No | No | No |
### Replicated Engines
-Replicated Volumes as the name suggests, are those that can synchronously replicate the data to multiple nodes. These engines provide protection against node failures, by allowing the volume to be accessible from one of the other nodes where the data was replicated to. The replication can also be setup across availability zones helping applications move across availability zones. Replicated Volumes are also capable of enterprise storage features like snapshots, clone, volume expansion and so forth.
-
-Depending on the type of storage attached to your Kubernetes worker nodes and application performance requirements, you can select from [Jiva](/docs/concepts/jiva), [cStor](/docs/concepts/cstor) or [Mayastor](/docs/concepts/mayastor).
+Replicated Engines (aka Replicated Volumes) are those that can synchronously replicate the data to multiple nodes. These engines provide protection against node failures, by allowing the volume to be accessible from one of the other nodes where the data was replicated to. The replication can also be setup across availability zones helping applications move across availability zones. Replicated Volumes are also capable of enterprise storage features like snapshots, clone, volume expansion, and so forth.
-- [Mayastor](https://mayastor.gitbook.io/introduction/)
-- [cStor](https://github.com/openebs/cstor-operators/blob/master/docs/quick.md)
-- [Jiva](https://github.com/openebs/jiva-operator)
+:::tip
+Depending on the type of storage attached to your Kubernetes worker nodes and application performance requirements, you can select from [Local Engine](local-engine.md) or[Replicated Engine](replicated-engine.md).
+:::
:::note
-An important aspect of the OpenEBS Data Layer is that each volume replica is a full copy of the data. This leads to the following capacity constraints that need to be kept in mind when using OpenEBS replicated volumes.
+An important aspect of the OpenEBS Data Layer is that each volume replica is a full copy of the data. This leads to the following capacity constraints that need to be kept in mind when using OpenEBS Replicated Volumes.
- Volumes can only be provisioned with capacity that can be accommodated in a single node by a single storage device or a pool of devices. Volume replica data will not be stripped or sharded across different nodes.
- Depending on the number of replicas configured, OpenEBS will use as many Volume Replicas. Example: A 10GB volume with 3 replicas will result in using 10GB on 3 different nodes where replicas are provisioned.
-- Volume Replicas are thin provisioned, so the used capacity will increase only when the applications really write data into Volumes.
-- When Volume Snapshots is taken, the snapshot is taken on all its healthy volume replicas
+- Volume Replicas are thin provisioned, so the used capacity will increase only when the applications really write data into volumes.
+- When Volume Snapshots is taken, the snapshot is taken on all its healthy volume replicas.
:::
Below table identifies few differences among the different OpenEBS Replicated engines.
@@ -163,70 +154,30 @@ Below table identifies few differences among the different OpenEBS Replicated en
| Suitable for high capacity (>50GB) workloads | No | Yes | Yes |
| Near disk performance | No | No | Yes |
-
-## When to choose which OpenEBS engine? {#cstor-vs-jiva-vs-localpv-features-comparison}
-
-As indicated in the above table, each storage engine has its own advantage. Choosing an engine depends completely on your platform (resources and type of storage), the application workload as well as its current and future growth in capacity and/or performance. Below guidelines provide some help in choosing a particular engine.
-
-### Ideal conditions for choosing cStor:
-
-- When you want synchronous replication of data and have multiple disks on the nodes.
-- When you are managing storage for multiple applications from a common pool of local or network disks on each node. Features such as thin provisioning, on demand capacity expansion of the pool and volume will help manage the storage layer.
-- When you want to to build Kubernetes native storage services similar to AWS EBS or Google PD on the Kubernetes clusters running on-premise.
-- When you need storage level snapshot and clone capability.
-- When you need enterprise grade storage protection features like data consistency, resiliency (RAID protection).
-- When you need to provide cross-az available volumes in Cloud or On-premise.
-
-Do not use cStor when your underlying storage devices are NVMe SSDs and your applications need high performance.
-
-
-### Ideal conditions for choosing Jiva:
-
-- When you want synchronous replication of data and have a single local disk or a single managed disk such as cloud disks (EBS, GPD) and you don't need snapshots or clones feature.
-- Jiva is easiest to manage as disk management or pool management is not in the scope of this engine. A Jiva pool is a mounted path of a local disk or a network disk or a virtual disk or a cloud disk.
-- Jiva is a preferred engine than cStor when
- - Your application does not require snapshots and clones features.
- - When you do not have free disks on the node. Jiva can be used on a `hostdir` and still achieve replication.
- - When you do not need to expand storage dynamically on local disk. Adding more disks to a Jiva pool is not possible, so the size of Jiva pool is fixed if it on a physical disk. However if the underlying disk is a virtual or network or cloud disk then, it is possible to change the Jiva pool size on the fly.
-- Capacity requirements are small.
-- When you need to provide cross-az available volumes in Cloud or On-premise.
-
-### Ideal conditions for choosing OpenEBS hostpath LocalPV:
+### Ideal Conditions for selecting OpenEBS Local Engine:
- When applications are managing replication and availability themselves and there is no need of replication at storage layer. In most such situations, the applications are deployed as `statefulset`.
-- When higher performance than Jiva or cStor is desired.
-- hostpath is recommended when dedicated local disks are not available for a given application or dedicated storage is not needed for a given application. If you want to share a local disk across many applications hostpath, Rawfile, ZFS and LVM LocalPV is the right approach.
-
-### Ideal conditions for choosing OpenEBS device LocalPV:
-
-- When applications are managing replication themselves and there is no need of replication at storage layer. In most such situations, the applications are deployed as `statefulset`
-- When higher performance than Jiva or cStor is desired.
-- When near disk performance is a need and you need to avoid noisy neighbor effect of shared volumes. The device volume is dedicated to write a single SSD or NVMe interface to get the highest performance.
-
-### Ideal conditions for choosing OpenEBS ZFS or LVM LocalPV:
-
-- When applications are managing replication themselves and there is no need of replication at storage layer. In most such situations, the applications are deployed as `statefulset`
-- When higher performance than Jiva or cStor is desired.
+- Local Engine is recommended when dedicated local disks are not available for a given application or dedicated storage is not needed for a given application.
- When near disk performance is a need along with features like snapshots, volume expansion, pooling of storage from multiple storage devices.
-### Ideal conditions for choosing OpenEBS Mayastor:
+### Ideal Conditions for selecting OpenEBS Replicated Engine:
- When you need high performance storage using NVMe SSDs and the cluster is capable of NVMeoF.
- When you need replication or availability features to protected against node failures.
-- Mayastor is designed for the next gen compute and storage technology and is under active development.
+- Replicated Engine is designed for the next gen compute and storage technology and is under active development.
-As Mayastor is under active development, you can also influence the features that are being built by joining [OpenEBS community on Kubernetes Slack](https://kubernetes.slack.com). If you are already signed up, head to our discussions at [#openebs](https://kubernetes.slack.com/messages/openebs/) channel.
+As Replicated Engine is under active development, you can also influence the features that are being built by joining [OpenEBS community on Kubernetes Slack](https://kubernetes.slack.com). If you are already signed up, head to our discussions at [#openebs](https://kubernetes.slack.com/messages/openebs/) channel.
### Summary
A short summary is provided below.
-- LocalPV is preferred if your application is in production and does not need storage level replication.
-- cStor is preferred if your application is in production and requires storage level replication.
-- Jiva is preferred if your application is small, requires storage level replication but does not need snapshots or clones.
-- Mayastor is preferred if your application needs low latency and near disk throughput, requires storage level replication and your nodes have high CPU, RAM and NVMe capabilities.
+- Local Engine is preferred if your application is in production and does not need storage level replication.
+- Replicated Engine is preferred if your application needs low latency and near disk throughput, requires storage level replication and your nodes have high CPU, RAM, and NVMe capabilities.
## See Also:
-[Mayastor User Guide](https://mayastor.gitbook.io/introduction/) [cStor User Guide](/docs/user-guides/cstor-csi) [Jiva User Guide](/docs/user-guides/jiva-guide) [Local PV Hostpath User Guide](/docs/user-guides/localpv-hostpath) [Local PV Device User Guide](/docs/user-guides/localpv-device)
+[User Guides](../../user-guides/)
+[Local Engine User Guide](../../user-guides/local-engine-user-guide/)
+[Replicated Engine User Guide](../../user-guides/replicated-engine-user-guide/)
\ No newline at end of file
diff --git a/docs/main/concepts/data-engines/local-engine.md b/docs/main/concepts/data-engines/local-engine.md
index 1e494ca72..e01e8c867 100644
--- a/docs/main/concepts/data-engines/local-engine.md
+++ b/docs/main/concepts/data-engines/local-engine.md
@@ -1,52 +1,46 @@
---
-id: localpv
-title: OpenEBS Local PV
+id: localengine
+title: OpenEBS Local Engine
keywords:
- - Local PV
- - OpenEBS Local PV
-description: This document provides you with a brief explanation of OpenEBS Local PV, quick start guides, and when to use OpenEBS Local PV, and when not to use OpenEBS Local PV and its limitations.
+ - Local Engine
+ - OpenEBS Local Engine
+description: This document provides you with a brief explanation of OpenEBS Local Engine, quick start guides, and when to use OpenEBS Local Engine, and when not to use OpenEBS Local Engine, and its limitations.
---
-## Overview
+## Local Engine Overview
-OpenEBS provides Dynamic PV provisioners for [Kubernetes Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local). A local volume implies that storage is available only from a single node. A local volume represents a mounted local storage device such as a disk, partition or directory.
+OpenEBS provides Dynamic PV provisioners for [Kubernetes Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local). A Local Engine (aka Local Volume) implies that storage is available only from a single node. A local volume represents a mounted local storage device such as a disk, partition, or directory.
-As the Local Volume is accessible only from a single node, local volumes are subject to the availability of the underlying node and are not suitable for all applications. If a node becomes unhealthy, then the local volume will also become inaccessible, and a Pod using it will not be able to run. Applications using local volumes must be able to tolerate this reduced availability, as well as potential data loss, depending on the durability characteristics of the underlying disk.
+As the local volume is accessible only from a single node, local volumes are subject to the availability of the underlying node and are not suitable for all applications. If a node becomes unhealthy, then the local volume will also become inaccessible and a Pod using it will not be able to run. Applications using local volumes must be able to tolerate this reduced availability, as well as potential data loss, depending on the durability characteristics of the underlying disk.
## Use Cases
Examples of good workloads that can benefit from local volumes are:
-- Replicated databases like MongoDB, Cassandra
+- Replicated databases like MongoDB and Cassandra
- Stateful workloads that can be configured with their own high-availability configuration like Elastic, MinIO
-- Edge workloads that typically run on a single node or in Single node Kubernetes Clusters.
+- Edge workloads that typically run on a single node or in single node Kubernetes Clusters.
OpenEBS helps users to take local volumes into production by providing features that are currently missing in Kubernetes like:
-- Dynamic PV Provisioners for local volumes.
-- Local Volumes backed by hostpath on filesystems like Ext3, XFS, LVM or ZFS.
-- Monitoring the health of underlying devices or storage used to create Local Volumes.
+- Dynamic PV provisioners for local volumes.
+- Local volumes backed by hostpath on filesystems like Ext3, XFS, LVM, or ZFS.
+- Monitoring the health of underlying devices or storage used to create local volumes.
- Capacity management features like over-provisioning and/or quota enforcement.
-- Make use of the underlying storage capabilities like snapshot, clone, compression and so forth when local volumes are backed by advanced filesystem like ZFS.
+- Make use of the underlying storage capabilities like snapshot, clone, compression, and so forth when local volumes are backed by advanced filesystem like LVM and ZFS.
- Backup and Restore via Velero.
-- Secure the local volumes via LUKS or by using in-build encryption support of the underlying filesystem like ZFS.
+- Secure the local volumes via LUKS or by using in-built encryption support of the underlying filesystem.
## Quick Start Guides
-OpenEBS provides different types of Local Volumes that can be used to provide locally mounted storage to Kubernetes Stateful workloads. Follow these guides to get started with each type of Local Volume.
+OpenEBS provides Local Volume that can be used to provide locally mounted storage to Kubernetes Stateful workloads. Refer to the [Quickstart Guide](../../quickstart-guide/) for more information.
-- [OpenEBS Local PV using Hostpath](/docs/user-guides/localpv-hostpath)
-- [OpenEBS Local PV using Block Devices](/docs/user-guides/localpv-device)
-- [OpenEBS Local PV using LVM](https://github.com/openebs/lvm-localpv)
-- [OpenEBS Local PV using ZFS](https://github.com/openebs/zfs-localpv)
-- [OpenEBS Local PV using Rawfile (sparse file)](https://github.com/openebs/rawfile-localpv)
+## When to use OpenEBS Local Engine?
-## When to use OpenEBS Local PVs
-
-- High performance is needed by those applications which manage their own replication, data protection and other features such as snapshots and clones.
+- High performance is needed by those applications which manage their own replication, data protection, and other features such as snapshots and clones.
- When local disks need to be managed dynamically and monitored for impending notice of them going bad.
-## When not to use OpenEBS Local PVs
+## When not to use OpenEBS Local Engine?
- When applications expect replication from storage.
- When the volume size may need to be changed dynamically but the underlying disk is not resizable.
@@ -55,20 +49,18 @@ OpenEBS provides different types of Local Volumes that can be used to provide lo
OpenEBS Local Volumes can be backed up and restored along with the application using [Velero](https://velero.io).
-Velero uses [Restic](https://github.com/restic/restic) for backing up and restoring Kubernetes local volumes. Velero can be configured to save the backups either in the cloud or on-premise with any S3 compatible storage like Minio. When user initiates the backup, Velero via the Restic, will copy the entire data from the Local PV to the remote location. Later, when the user wants to restore the application, velero injects an init container into the application that will download and populate the data into the volume from the backed up location. For more details on how Velero Restic works, please see documentation on [Velero Restic integration](https://velero.io/docs/v1.3.2/restic/).
+Velero uses [Restic](https://github.com/restic/restic) for backing up and restoring Kubernetes local volumes. Velero can be configured to save the backups either in the cloud or on-premise with any S3 compatible storage like MinIO. When user initiates the backup, Velero via the Restic, will copy the entire data from the Local Engine to the remote location. Later, when the user wants to restore the application, Velero injects an init container into the application that will download and populate the data into the volume from the backed up location. For more details on how Velero Restic works, refer to the [Velero Restic integration](https://velero.io/docs/v1.3.2/restic/) documentation.
While the preferred way for Backup and Restore for cloud native applications using Local Volumes is to use the application specific backup solution, you can use the Velero based Backup and Restore in the following cases:
-- Application doesn't natively provide a Backup and Restore solution
+- Application does not natively provide a Backup and Restore solution
- Schedule a daily or weekly backups of the data during off-peak hours
-- Migrating the application using Local Volumes to a new Cluster.
-
-You can refer to the [Local PV user guides](#/docs/next/uglocalpv-hostpath.html#backup-and-restore) for detailed instructions on Backup and Restore.
+- Migrating the application using Local Volumes to a new Cluster
A quick summary of the steps to backup include:
1. Install and Setup Velero by following the [Velero Documentation](https://velero.io/docs/).
-2. Prepare the application that needs to be backed up. Label and annotate the application, indicating that you would like to use velero to backup the volumes. For example, if you would like to backup an application pod named `hello-local-hostpath-pod` with a volume mount `local-storage`, you would need to run the following commands.
+2. Prepare the application that needs to be backed up. Label and annotate the application, indicating that you want to use Velero to backup the volumes. For example, if you want to backup an application pod named `hello-local-hostpath-pod` with a volume mount `local-storage`, run the following commands.
```
kubectl label pod hello-local-hostpath-pod app=test-velero-backup
@@ -83,23 +75,27 @@ A quick summary of the steps to restore include:
1. Install and Setup Velero, with the same provider where backups were saved.
-2. Local PVs are created with node affinity. As the node names will change when a new cluster is created, create the required PVC(s) prior to proceeding with restore.
+2. Local Engines are created with node affinity. As the node names will change when a new cluster is created, create the required PVC(s) prior to proceeding with restore.
```
kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pvc.yaml
```
-3. Use velero to restore the application and populate the data into the volume from the backup.
+3. Use Velero to restore the application and populate the data into the volume from the backup.
```
velero restore create rbb-01 --from-backup bbb-01 -l app=test-velero-backup
```
-## Limitations (or Roadmap items) of OpenEBS Local PVs
+## Limitations (or Roadmap items) of OpenEBS Local Engine
-- Size of the Local PV cannot be increased dynamically. LVM type of functionality inside Local PVs is a potential feature in the roadmap.
-- Disk quotas are not enforced by Local PV. An underlying device or hostpath can have more data than requested by a PVC or storage class. Enforcing the capacity is a roadmap feature.
+- Size of the Local Engine cannot be increased dynamically.
+- Disk quotas are not enforced by Local Engine. An underlying device or hostpath can have more data than requested by a PVC or storage class. Enforcing the capacity is a roadmap feature.
- Enforce capacity and PVC resource quotas on the local disks or host paths.
- SMART statistics of the managed disks is also a potential feature in the roadmap.
## See Also:
-[OpenEBS Architecture](/docs/concepts/architecture) [Understanding NDM](/docs/concepts/ndm) [Local PV Hostpath User Guide](/docs/user-guides/localpv-hostpath) [Local PV Device User Guide](/docs/user-guides/localpv-device)
+[OpenEBS Architecture](../architecture.md)
+[Local Engine Prerequisites](../../user-guides/local-engine-user-guide/prerequisites.mdx)
+[Installation](../../quickstart-guide/installation.md)
+[Local Engine User Guide](../../user-guides/local-engine-user-guide/)
+
diff --git a/docs/main/concepts/data-engines/replicated-engine.md b/docs/main/concepts/data-engines/replicated-engine.md
index c54831715..c55aaacc4 100644
--- a/docs/main/concepts/data-engines/replicated-engine.md
+++ b/docs/main/concepts/data-engines/replicated-engine.md
@@ -1,52 +1,49 @@
---
-id: mayastor
-title: Mayastor
+id: replicated-engine
+title: Replicated Engine
keywords:
- - Mayastor
-description: In this document you will learn about Mayastor and it's design goals.
+ - Replicated Engine
+description: In this document you will learn about Replicated Engine and it's design goals.
---
-## Mayastor Overview
+## Replicated Engine Overview
-**Mayastor** is a progressive sub-project of the CNCF (Cloud Native Computing Foundation) Open Source initiative [**OpenEBS**](https://openebs.io/). OpenEBS is a "Container Attached Storage" (CAS) solution that extends Kubernetes by providing a declarative data plane, offering resilient and adaptable storage for stateful applications.
+**Replicated Engine** is a progressive sub-project of the CNCF (Cloud Native Computing Foundation) Open Source initiative [**OpenEBS**](https://openebs.io/). OpenEBS is a Container Native Storage (CNS) solution that extends Kubernetes by providing a declarative data plane, offering resilient and adaptable storage for stateful applications.
-----
+## Replicated Engine Design Goals
-## Mayastor Design Goals
+The fundamental design objectives driving Replicated Engine's development are as follows:
-The fundamental design objectives driving Mayastor's development are as follows:
-
-- **High Availability and Durability**: Mayastor aims to ensure the persistence of data with high levels of availability and durability, contributing to the reliability of applications in a Kubernetes environment.
-- **Simplified Deployment and Management**: The project endeavors to achieve seamless deployment and effortless management, empowering autonomous SRE (Site Reliability Engineering) or development teams to handle the storage infrastructure efficiently.
-- **Low Overhead Abstraction**: Mayastor is designed to be a lightweight abstraction, minimizing resource overhead while delivering optimal storage performance for workloads.
-
-----
+- **High Availability and Durability**: Replicated Engine aims to ensure the persistence of data with high levels of availability and durability, contributing to the reliability of applications in a Kubernetes environment.
+- **Simplified Deployment and Management**: The project endeavors to achieve seamless deployment and effortless management, empowering autonomous Site Reliability Engineering (SRE), or development teams to handle the storage infrastructure efficiently.
+- **Low Overhead Abstraction**: Replicated Engine is designed to be a lightweight abstraction, minimizing resource overhead while delivering optimal storage performance for workloads.
## NVMe-oF Semantics and Performance
-Mayastor is built on the foundation of Intel's cutting-edge [Storage Performance Development Kit (SPDK)](https://spdk.io/). The project fully leverages the protocol and computational efficiency of NVMe-oF (Non-Volatile Memory Express over Fabrics) semantics. This approach harnesses the immense performance capabilities of the latest generation solid-state storage devices, delivering a storage abstraction that incurs performance overhead within single-digit percentages.
+Replicated Engine is built on the foundation of Intel's cutting-edge [Storage Performance Development Kit (SPDK)](https://spdk.io/). The project fully leverages the protocol and computational efficiency of Non-Volatile Memory Express over Fabrics (NVMe-oF) semantics. This approach harnesses the immense performance capabilities of the latest generation solid-state storage devices, delivering a storage abstraction that incurs performance overhead within single-digit percentages.
-In contrast, traditional pre-CAS shared storage systems are known to introduce overhead, often exceeding 40% and occasionally reaching as high as 80% of the underlying device or cloud volume capabilities. Moreover, pre-CAS shared storage can scale unpredictably as various workloads compete for access to shared storage resources.
+In contrast, traditional pre-CNS shared storage systems are known to introduce overhead, often exceeding 40% and occasionally reaching as high as 80% of the underlying device or cloud volume capabilities. Moreover, pre-CNS shared storage can scale unpredictably as various workloads compete for access to shared storage resources.
{% hint style=“note” %}
-Although Mayastor utilizes NVMe-oF, it doesn't impose any requirements for the use of NVMe devices or cloud volumes.
+Although Replicated Engine utilizes NVMe-oF, it does not impose any requirements for the use of NVMe devices or cloud volumes.
{% endhint %}
----
-
-## Getting Started and User Documentation
+## Quick Start Guides
-For comprehensive insights into Mayastor's architecture, core concepts, and to begin using the platform, refer to the official user documentation available in GitBook format: [mayastor.gitbook.io](https://mayastor.gitbook.io/).
-
-----
+OpenEBS provides Local Volume that can be used to provide locally mounted storage to Kubernetes Stateful workloads. Refer to the [Quickstart Guide](../../quickstart-guide/) for more information.
## Source Code and Contributions
-To access the Mayastor source code or actively contribute to the project, visit the GitHub repository: https://github.com/openebs/mayastor.
+To access the Replicated Engine source code or actively contribute to the project, visit the [GitHub repository](https://github.com/openebs/mayastor).
----
## Community Support via Slack
-Join the vibrant[OpenEBS community on Kubernetes Slack](https://kubernetes.slack.com) for assistance and discussions related to OpenEBS and Mayastor. If you have questions or seek further information, visit the [#openebs](https://kubernetes.slack.com/messages/openebs/) channel. If you're not already part of the community, you can sign up on Kubernetes Slack for a collaborative experience.
+Join the vibrant [OpenEBS community on Kubernetes Slack](https://kubernetes.slack.com) for assistance and discussions related to OpenEBS and Replicated Engine. If you have questions or seek further information, visit the[#openebs](https://kubernetes.slack.com/messages/openebs/) channel. If you are not already part of the community, you can sign up on Kubernetes Slack for a collaborative experience.
+
+## See Also:
+[OpenEBS Architecture](../architecture.md)
+[Replicated Engine Prerequisites](../../user-guides/replicated-engine-user-guide/prerequisites.md)
+[Installation](../../quickstart-guide/installation.md)
+[Replicated Engine User Guide](../../user-guides/replicated-engine-user-guide/)
\ No newline at end of file
diff --git a/docs/main/concepts/jiva.md b/docs/main/concepts/jiva.md
deleted file mode 100644
index c76b09c9e..000000000
--- a/docs/main/concepts/jiva.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-id: jiva
-title: Jiva Overview
-keywords:
- - Jiva
- - OpenEBS
- - OpenEBS community
-description: This document provides you with a detailed overview of Jiva
----
-
-### Jiva
-
-Each Jiva Volume comprises of a Controller (or Target) and a set of Replicas. Both Controller and Replica functionalities are provided by the same binary and hence the same [docker image](https://hub.docker.com/r/openebs/jiva/). Jiva simulates a block device which is exposed via an iSCSI target implementation(gotgt - part of the Controller). This block device is discovered and mounted remotely on the host where the application pod is running. The Jiva Controller parallelly replicates the incoming IOs to its replicas. The Replica, in turn, writes these IOs to a sparse file.
-
-![Jiva storage engine of OpenEBS](../assets/jiva.png)
-
-#### Jiva Sparse File Layout
-
-The following content is modified with some architectural change as compared to Rancher's LongHorn [documentation](https://rancher.com/microservices-block-storage/).
-
-**Replica Operations of Jiva**
-
-------
-
-Jiva replicas are built using Linux sparse files, which support thin provisioning. Jiva does not maintain additional metadata to indicate which blocks are used. The block size is 4K. When a replica gets added to the controller, it creates an auto-generated snapshot(differencing disk). As the number of snapshots grows, the differencing disk chain could get quite long. To improve read performance, Jiva, therefore, maintains a read index table that records which differencing disk holds valid data for each 4K block. In the following figure, the volume has eight blocks. The read index table has eight entries and is filled up lazily as read operation takes place. A write operation writes the data on the latest file(head), deletes(fallocate) the corresponding block from the older snapshots(or differencing disks) and updates the index in the table, which now points to the live data.
-
-![Longhorn read index](../assets/Longhorn-blog-new.png)
-
-The read index table is kept in memory and consumes two bytes for each 4K block. A maximum of 512 auto-generated snapshots can be created for each volume. The read index table consumes a certain amount of in-memory space for each replica. A 1TB volume, for example, consumes 512MB of in-memory space.
-
-**Replica Rebuild**
-
-------
-
-The Jiva volume controller is responsible for initiating and coordinating the process of syncing the replicas. Once a replica comes up, it tries to get added to controller and controller will mark it as WO(Write Only). Then the replica initiates the rebuilding process from other healthy replicas. After the sync is completed, the volume controller sets the new replica to RW (read-write) mode.
-
-When the controller detects failures in one of its replicas, it marks the replica as being in an error state and the rebuilding process is triggered.
-
-**Note:** If REPLICATION_FACTOR is still met even after a replica is marked faulty, the controller will continue to serve R/W IOs. Else, it will wait for satisfying REPLICATION_FACTOR((n/2)+1; where n is the number of replicas).
-
-## See Also:
-
-[Which storage engine should I use?](/docs/concepts/casengines#cstor-vs-jiva-vs-localpv-features-comparison) [Jiva User Guide](/docs/user-guides/jiva-guide)
diff --git a/docs/main/concepts/ndm.md b/docs/main/concepts/ndm.md
deleted file mode 100644
index cf241b06d..000000000
--- a/docs/main/concepts/ndm.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-id: ndm
-title: Node Disk Manager
-keywords:
- - Node Disk Manager
- - NDM
- - NDM daemonset
- - NDM Roadmap
-description: Node Disk Manager(NDM) is an important component in the OpenEBS architecture. It is a daemonset which runs on each node, detects attached block devices based on the filters and loads them as block devices custom resource into Kubernetes.
----
-
-Node Disk Manager(NDM) is an important component in the OpenEBS architecture. NDM treats block devices as resources that need to be monitored and managed just like other resources such as CPU, Memory and Network. It is a daemonset which runs on each node, detects attached block devices based on the filters and loads them as block devices custom resource into Kubernetes. These custom resources are aimed towards helping hyper-converged Storage Operators by providing abilities like:
-
-- Easy to access inventory of Block Devices available across the Kubernetes Cluster.
-- Predict failures on the Disks to help with taking preventive actions.
-- Allow dynamically attaching/detaching disks to a storage pod, without restarting the corresponding NDM pod running on the Node where the disk is attached/detached.
-
-In spite of doing all of the above, NDM contributes to overall ease of provisioning persistent volumes.
-
-[![NDM Architecture](../assets/ndm.svg)](../assets/ndm.svg)
-
-NDM is deployed as a daemonset during installation of OpenEBS. NDM daemonset discovers the disks on each node and creates a custom resource called Block Device or BD.
-
-## Privileged access
-
-NDM daemon runs in containers and has to access the underlying storage devices and run in Privileged mode. NDM requires privileged mode because it requires access to /dev, /proc and /sys directories for monitoring the attached devices and also to fetch the details of the attached device using various probes. NDM is responsible for the discovery of block devices and filtering out devices that should not be used by OpenEBS; for example, detecting the disk that has OS filesystem. NDM pod by default mounts the `/proc` directory of the host inside the container and then load the `/proc/1/mounts` file to find the disk used by OS.
-
-To allow OpenEBS to run in privileged mode in `selinux=on` nodes, the cluster should be configured to grant privileged access to OpenEBS service account.
-
-## NDM daemonset functions:
-
-- *Discover* block devices attached to a Kubernetes Node
- - Discover block devices on startup - create and/or update status.
- - Maintain cluster-wide unique id of the disk using the following scheme:
- - md5 hash of WWN / PartitionUUID / FileSystemUUID / DeviceMapperUUID.
-- Detect block device addition/removal from a node and update the status of Block device.
-- Add `blockDevice` as Kubernetes custom resource with the following properties:
- - spec: The following will be updated if they are available.
- - Device Path
- - Device Links (by id, by name)
- - Vendor and Model information
- - WWN and Serial
- - Capacity
- - Sector and Block Size
- - labels:
- - hostname (kubernetes.io/hostname)
- - blockdevice-type (ndm.io/blockdevice-type)
- - Managed (ndm.io/managed)
- - status can have the following values:
- - Active : Block device is available on the node
- - Inactive : Block device is not available on the given node anymore
- - Unknown : NDM was stopped on the node where Block device was last detected / not able to determine the status
-
-## Filters:
-
-- Configure filters for the type of block device to be created as blockdevice CR. The filters can be configured either via vendor type or via device path patterns or mount point.
-- Filters are either `include filters` or `exclude filters`. They are configured as configmap. Admin user can configure these filters at the time of OpenEBS installation by changing the NDM configmap either in the OpenEBS operator yaml file or in the helm `values.yaml` file. If these filters need to be updated after the installation, then one of the following methods can be followed.
- - If OpenEBS is installed using operator.yaml file, update the filters in the configmap and apply the operator.yaml
- - If OpenEBS is installed using helm, update the filters in the configmap of values.yaml and do the helm upgrade
- - Or, directly edit NDM configmap using `kubectl edit` and update the filters
-
-More details can be found [here](/docs/user-guides/ndm).
-
-## NDM Roadmap:
-
-- **Auto provisioning of disks using CSI drivers of external storage:** NDM provisioner will invoke the NDM agent which in turn will initiate the provisioning of an external disk through a corresponding CSI driver
-
-## See Also:
-
-[OpenEBS Architecture](/docs/concepts/architecture) [Local PV User Guide](/docs/user-guides/localpv-device) [cStor User Guide](/docs/deprecated/spc-based-cstor) [Understanding Disk Mount Status on Node](/docs/additional-info/faqs#what-must-be-the-disk-mount-status-on-node-for-provisioning-openebs-volume)
diff --git a/docs/main/concepts/read-write-many.md b/docs/main/concepts/read-write-many.md
deleted file mode 100644
index 59e04f265..000000000
--- a/docs/main/concepts/read-write-many.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-id: rwm
-title: Provisioning Read-Write-Many (RWX) PVCs
-keywords:
- - Read-Write-Many
- - RWX
- - NFS server
- - OpenEBS
- - OpenEBS community
-description: In this document, you learn about Provisioning Read-Write-Many (RWX) PVCs, Setting up RWM NFS share on OpenEBS, Setting up a single NFS server, and Setting up multiple NFS servers
----
-
-NFS server provisioner stable helm chart is widely used for deploying NFS servers on Kubernetes cluster. This server provides PVCs in RWX mode so that multiple web applications can access the data in a shared fashion. OpenEBS cStor volumes are used as persistent backend storage for these nfs servers to provide a scalable and manageable RWX shared storage solution.
-
-:::note OpenEBS Dynamic NFS Provisioner
-OpenEBS includes an alpha version of OpenEBS Dynamic NFS provisioner that allows users to create an NFS PV that sets up an new Kernel NFS instance for each PV on top of users choice of backend storage.
-
-This project is currently under active development. For getting started and getting involved in developing this project, check out https://github.com/openebs/dynamic-nfs-provisioner.
-
-The rest of this document contains instructions about the NFS server provisioner maintained by the Kubernetes SIGs community.
-:::
-
-[![OpenEBS and NFS provisioner](../assets/rwm-single.svg)](../assets/rwm-single.svg)
-
-Below are advantage of using NFS provisioner over OpenEBS cStor volumes
-
-- NFS data is replicated, highly available across zones if configured accordingly
-- Data is thin provisioned. Persistent volume mounts are configured at the required size and cStor physical pools can be started with as low as one disk per pool instance and grow as the storage is used up
-
-## Setting up a single NFS server
-
-**Select or create a cStor pool**
-
-Select or [create a cStor pool](/docs/deprecated/spc-based-cstor#creating-cStor-storage-pools) that satisfies the performance, and availability requirements
-
-**Select or create a cStor storage Class**
-
-[Create a storage class](/docs/deprecated/spc-based-cstor#creating-cStor-storage-class) to point to the above selected pool and also select number of replicas and default size of the volume.
-
-
-**Create a namespace for deploying NFS server provisioner**
-
-```
-kubectl create ns
-```
-
-**Deploy NFS server provisioner**
-
-Deploy NFS server provisioner into the above namespace using stable helm chart. Pass the following main parameter values.
-
- - **namespace**: Namespace for the NFS server provisioner which you have created in the previous section.
- - **name:** Release name for helm installation.
- - **persistence.storageClass:** StorageClass used for provisioning cStor volume.
- - **persistence.size:** cStor volume size which will be used for running nfs provisioner.
- - **storageClass.name:** Provide a name for NFS StorageClass to be created which can be used by the web application PVCs.
-
-```
-helm install stable/nfs-server-provisioner --namespace= --name= --set=persistence.enabled=true,persistence.storageClass=,persistence.size=,storageClass.name=,storageClass.provisionerName=openebs.io/nfs
-```
-
-An example helm install command is
-
-```
-helm install stable/nfs-server-provisioner --namespace=nfs-wp-provisioner --name=openebs-nfs-wordpress --set=persistence.enabled=true,persistence.storageClass=openebs-cstor-disk,persistence.size=5Gi,storageClass.name=wordpress-nfs-sc1,storageClass.provisionerName=openebs.io/nfs
-```
-
-**Note:** It is recommended that the OpenEBS storage class specifies 10% more space than what is required by the RWX PVC. For example, if RWX PVC requires 100G, then provision cStor volume with 110G.
-
-**Provision RWX volume using the PVC**
-
-Use the StorageClass which is created in above command and create a new PVC and use the volume in your applications.
-
-## Setting up multiple NFS servers
-
-When multiple NFS shares are needed, use multiple NFS provisioners. Each NFS server manages one NFS server. Same or different OpenEBS StorageClass can be used for multiple NFS provisioners.
-
-![OpenEBS and NFS provisioner](../assets/rwm-multiple.svg)
-
-## See Also:
-
-[cStor Overview](/docs/user-guides/cstor-csi) [cStorPools](/docs/deprecated/spc-based-cstor#creating-cStor-storage-pools) [Setting up Object Storage](/docs/stateful-applications/minio)
diff --git a/docs/main/deprecated/mayactl.md b/docs/main/deprecated/mayactl.md
deleted file mode 100644
index 856ba3cfd..000000000
--- a/docs/main/deprecated/mayactl.md
+++ /dev/null
@@ -1,269 +0,0 @@
----
-id: mayactl
-title: mayactl
-keywords:
- - mayactl
- - Commands used with mayactl
- - Accessing mayactl
- - Using mayactl
- - mayactl for OpenEBS Storage Volume
- - mayactl for OpenEBS Storage Pools
- - mayactl Version
-description: The mayactl is the command line tool for interacting with OpenEBS volumes and Pools. The mayactl is not used or required while provisioning or managing the OpenEBS volumes, but it is currently used while debugging and troubleshooting.
----
-
-The `mayactl` is a command line tool for interacting with OpenEBS volumes and Pools. The `mayactl` is not used or required while provisioning or managing the OpenEBS volumes, but it is currently used while debugging and troubleshooting. OpenEBS volume and pool status can be get using the `mayactl` command.
-
-### Summary
-
-[Command used with mayactl](#commands-used-with-mayactl)
-
-[Accessing mayactl](#accessing-mayactl)
-
-[Using mayactl](#using-mayactl)
-
-## Commands used with mayactl
-
-The following commands can be run using mayactl to get the details of OpenEBS volume, StoragePool and installed version.
-
-1. OpenEBS volume related
- - mayactl volume list
- - mayactl volume stats
- - mayactl volume describe
-2. OpenEBS StoragePool related
- - mayactl pool list
- - mayactl pool describe
-3. OpenEBS version related
- - mayactl version
-
-## Accessing mayactl
-
-For getting access to `mayactl` command line tool, you have to login or execute into the maya-apiserver pod on Kubernetes. The steps are outlined below.
-
-1. Find out the name of the maya-apiserver
-
- ```
- kubectl get pod -n openebs | grep -i api
- ```
-
- Following is an example output.
-
- ```shell hideCopy
- maya-apiserver-7f5689b96b-p1p2p 1/1 Running 0 10d
- ```
-
-2. It is possible that there are multiple instances of maya-apiserver pods for scaling purposes. You can run mayactl in any one of them. Shell into one of the pods using ` kubectl exec` command . You can do as following way.
-
- ```
- kubectl exec -it /bin/bash -n openebs
- ```
-
- You will get access to the bash shell of maya-apiserver pod like shown below.
-
- ```bash hideCopy
- bash-4.3#
- ```
-
-## Using mayactl
-
-Once you are inside the maya -apiserver,use mayactl help command for more details.
-
-```
-mayactl help
-```
-
-**Example Output:**
-
-```shell hideCopy
-Maya means 'Magic' a tool for storage orchestration
-Usage:
- mayactl [command]
-Available Commands:
- completion Outputs shell completion code for the specified shell (bash or zsh)
- help Help about any command
- pool Provides operations related to a storage pool
- version Prints version and other details relevant to maya
- volume Provides operations related to a Volume
-```
-
-### mayactl for OpenEBS Storage Volume
-OpenEBS storage volume command usage examples are shown below.
-
-```
-mayactl volume
-```
-
- **Example Output:**
-
-```shell hideCopy
-The following commands helps in operating a Volume such as create, list, and so on.
-Usage: mayactl volume [options] [args]
-Examples:
- > List Volumes:
- $ mayactl volume list
- > Statistics of a Volume:
- $ mayactl volume stats --volname
- > Statistics of a Volume created in 'test' namespace:
- $ mayactl volume stats --volname --namespace test
- > Info of a Volume:
- $ mayactl volume describe --volname
- > Info of a Volume created in 'test' namespace:
- $ mayactl volume describe --volname --namespace test
-Usage:
- mayactl volume [command]
-Available Commands:
- describe Displays Openebs Volume information
- list Displays status information about Volume(s)
- stats Displays the runtime statistics of Volume
-```
-
-The following command shows the list of all OpenEBS volumes including both Jiva and cStor.
-
-```
-mayactl volume list
-```
-
-**Example Output:**
-
-```shell hideCopy
-Namespace Name Status Type Capacity StorageClass Access Mode
---------- ---- ------ ---- -------- ------------- -----------
-openebs pvc-dc3cb979-51ec-11e9-803f-42010a800179 Running cstor 8G openebs-cstor-sparse ReadWriteOnce
-```
-
-The following command shows the description of a OpenEBS volume.
-
-```
-mayactl volume describe --volname pvc-dc3cb979-51ec-11e9-803f-42010a800179 -n openebs
-```
-
-**Example Output:**
-
-```shell hideCopy
-Portal Details :
--------- --------
-IQN : iqn.2016-09.com.openebs.cstor:pvc-dc3cb979-51ec-11e9-803f-42010a800179
-Volume : pvc-dc3cb979-51ec-11e9-803f-42010a800179
-Portal : 10.67.247.34:3260
-Size : 8G
-Controller Status : running,running,running
-Controller Node : gke-ranjith-082-default-pool-2cd2b6cb-l4ck
-Replica Count : 3
-Replica Details :
------------------
-NAME STATUS POOL NAME NODE
----- ------ --------- -----
-pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-ejs2 Running cstor-sparse-pool-ejs2 gke-ranjith-082-default-pool-2cd2b6cb-d456
-pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-gf1d Running cstor-sparse-pool-gf1d gke-ranjith-082-default-pool-2cd2b6cb-l4ck
-pvc-dc3cb979-51ec-11e9-803f-42010a800179-cstor-sparse-pool-m8cy Running cstor-sparse-pool-m8cy gke-ranjith-082-default-pool-2cd2b6cb-x571
-```
-
-The following command shows the live statistics of OpenEBS volume.
-
-```
-mayactl volume stats --volname pvc-448deccf-40d9-11e9-a23b-0050569331ce -n openebs
-```
-
-**Example Output:**
-
-```shell hideCopy
-Portal Details :
------- ---------
-Volume : pvc-dc3cb979-51ec-11e9-803f-42010a800179
-Size : 5.000000
-Performance Stats :
------- ---------
-r/s w/s r(MB/s) w(MB/s) rLat(ms) wLat(ms)
----- ---- -------- -------- --------- ---------
-0 121 0.000 0.013 0.000 9.495
-Capacity Stats :
------- ---------
-LOGICAL(GB) USED(GB)
------------- ---------
-0.000 3.246
-```
-
-### mayactl for OpenEBS Storage Pools
-OpenEBS storage pool command usage examples are shown below.
-
-```
-mayactl pool
-```
-
-It will show the available commands which can run with `mayactl` for getting details of OpenEBS pools.
-
-```shell hideCopy
-Command provides operations related to a storage pools.
-Usage: mayactl pool [options] [args]
-Examples:
- > Lists pool:
- $ mayactl pool list
-Usage:
- mayactl pool [command]
-Available Commands:
- describe Describes the pools
- list Lists all the pools
-```
-
-The following command shows the list of all OpenEBS StoragePools.
-
-```
-mayactl pool list
-```
-
-**Example Output:**
-
-```shell hideCopy
-POOL NAME NODE NAME POOL TYPE
---------- --------- ---------
-cstor-pool1-5lwv node3.mayalab.com striped
-cstor-pool1-qba6 node2.mayalab.com striped
-cstor-pool1-v4oy node4.mayalab.com striped
-```
-
-The following command show the description of OpenEBS StoragePool.
-
-```
- mayactl pool describe --poolname cstor-pool1-5lwv
-```
-
-**Example Output:**
-
-```shell hideCopy
-Pool Details :
------- ------
-Storage Pool Name : cstor-pool1-5lwv
-Node Name : node3.mayalab.com
-CAS Template Used : cstor-pool-create-default-0.9.0
-CAS Type : cstor
-StoragePoolClaim : cstor-pool1
-UID : fb2bd1d8-2f88-11e9-a23b-0050569331ce
-Pool Type : striped
-Over Provisioning : false
-Disk List :
------- ------
-disk-42b4fb20cd36896dfc2a486b977363de
-```
-
-### mayactl Version
-OpenEBS installed version can be obtained using the following command. This will show the status of maya-apiserver and its URL.
-
-```
-mayactl version
-```
-
- **Example Output:**
-
-```shell hideCopy
-Version: 1.2.0-released
-Git commit: c00fc22aab1425e824ed24ed4b7e6f49c9c1468c
-GO Version: go1.11.2
-GO ARCH: amd64
-GO OS: linux
-m-apiserver url: http://10.44.1.5:5656
-m-apiserver status: running
-```
-
-## See Also:
-
-[FAQ](/additional-info/faqs) [Troubleshooting Guide](/troubleshooting)
diff --git a/docs/main/deprecated/releases-0x.md b/docs/main/deprecated/releases-0x.md
deleted file mode 100644
index 6c0abcb9a..000000000
--- a/docs/main/deprecated/releases-0x.md
+++ /dev/null
@@ -1,311 +0,0 @@
----
-id: releases-0x
-title: OpenEBS 0.x Deprecated Releases
-keywords:
- - Change summary
- - Alpha features
- - Upgrade steps
- - Release notes
-description: A release notes for all the versions of OpenEBS 0.x, which contains the change summary, alpha features and upgrade steps.
----
-
-## 0.9.0 - May 24 2019
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Enhanced the cStor Data Engine containers to contain troubleshooting utilities.
-- Enhanced cStor Data Engine to allow interoperability of cStor Replicas across different versions.
-- Support for using Block Devices for OpenEBS Local PV.
-- Support for Dynamic Provisioning of Local PV
-- Enhanced the cStor Volumes to support Backup/Restore to S3 compatible storage using the incremental snapshots supported by cStor Volumes.
-- Enhanced the cStor Volume Replica to support an anti-affinity feature that works across PVs.
-- Enhanced the cStor Volume to support scheduling the cStor Volume Targets along side the application pods that interacts with the cStor Volume.
-- Enhances the Jiva Volume provisioning to provide an option called DeployInOpenEBSNamespace.
-- Enhanced the cStor Volume Provisioning to be customized for varying workload or platform type during the volume provisioning.
-- Enhanced the cStor Pools to export usage statistics as prometheus metrics.
-- Enhanced the Jiva Volume replica rebuild process by eliminating the need to do a rebuild if the Replica already has all the required data to serve the IO.
-- Enhanced the Jiva Volume - replica provisioning to pin the Replica’s to the nodes where they are initially scheduled using Kubernetes nodeAffinity.
-- Fixes an issue where NDM pods failed to start on nodes with selinux=on.
-- Fixes an issue where cStor Volume with single replicas were shown to be in Degraded, rebuilding state.
-- Fixes an issue where user was able to delete a PVC, even if there were clones created from it, resulting in data loss for the cloned volumes.
-- Fixes an issue where cStor Volumes failed to provision if the `/var/openebs/` directory was not editable by cStor pods like in the case of SuSE Platforms.
-- Fixes an issue where Jiva Volume - Target can mark a replica as offline if the replica takes longer than 30s to complete the sync/unmap IO.
-- Fixes an issue with Jiva volume - space reclaim thread, that was erroring out with an exception if the replica is disconnected from the target.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/0.9)
-
-## 0.8.2 - Apr 15 2019
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Enhanced the metrics exported by cStor Pools to include details of the provisioning errors.
-- Fixed an issue causing cStor Volume Replica CRs to be stuck, when the OpenEBS namespace was being deleted.
-- Fixed an issue where a newly added cStor Volume Replica may not be successfully registered with the cStor target, if the cStor tries to connect to Replica before the replica is completely initialized.
-- Fixed an issue with Jiva Volumes where target can mark the Replica as Timed out on IO, even when the Replica might actually be processing the Sync IO.
-- Fixed an issue with Jiva Volumes that would not allow for Replicas to re-connect with the Target, if the initial Registration failed to successfully process the hand-shake request.
-- Fixed an issue with Jiva Volumes that would cause Target to restart when a send diagnostic command was received from the client
-- Fixed an issue causing PVC to be stuck in pending state, when there were more than one PVCs associated with an Application Pod
-- Toleration policy support for cStorStoragePool.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/0.8.2)
-
-## 0.8.1 - Feb 23 2019
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Ephemeral Disk Support
-- Enhanced the placement of cStor volume replica in a distributed randomly between the available pools.
-- Enhanced the NDM to fetch additional details about the underlying disks via SeaChest.
-- Enhanced the NDM to add additional information to the DiskCRs like if the disks is partitioned or has a filesystem on it.
-- Enhanced the OpenEBS CRDs to include custom columns to be displayed using `kubectl get ` output of the CR. This feature requires K8s 1.11 or higher.
-- Fixed an issue where cStor volume causes timeout for iSCSI discovery command and can potentially trigger a K8s vulnerability that can bring down a node with high RAM usage.
-
-**Addition details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-releases-0-8-1-with-stability-fixes-and-improved-documentation/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/0.8.1)
-
-
-## 0.8.0 - Dec 07 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- cStor Snapshot & Clone
-- cStor volume & Pool runtime status
-- Target Affinity for both Jiva & cStor
-- Target namespace for cStor
-- Enhance the volume metrics exporter
-- Enhance Jiva to clear up internal snapshot taken during Replica rebuild
-- Enhance Jiva to support sync and unmap IOs
-- Enhance cStor for recreating pool by automatically selecting the disks.
-
-**Additional details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-0-8-release-allows-you-to-snapshot-and-clone-cstor-volumes/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/0.8)
-
-## 0.7.2 - Nov 20 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Fixes an issue where cStor volume used space was showing a very low value than actually used.
-- Fixes an issue where cStor replica snapshots created for the rebuild were not deleted, causing space reclamation to fail.
-- Support for clearing space used by Jiva replica after the volume is deleted using Cron Job.
-- Support for a storage policy that can disable the Jiva Volume Space reclaim.
-- Support Target Affinity fort Jiva target Pod on the same node as the Application Pod.
-- Enhanced Jiva related to internal snapshots for rebuilding Jiva.
-- Enhanced exporting cStor volume metrics to prometheus
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/0.7.2)
-
-## 0.7.0 - Sep 09 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Enhanced NDM to discover block devices attached to Nodes.
-- Alpha support for cStor Engine
-- Naming convention of Jiva Storage pool as 'default' and StorageClass as 'openebs-jiva-default'
-- Naming convention of cStor Storage pool as 'cstor-sparse-pool' and StorageClass as 'openebs-cstor-sparse'
-- Support for specifying replica count,CPU/Memory Limits per PV,Choice of Storage Engine, Nodes on which data copies should be copied.
-
-**Additional details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-0-7-release-pushes-cstor-storage-engine-to-field-trials/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.7)
-
-
-## 0.6.0 - Jul 20 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Fixes an issue where jiva replica data was not clean up if the PVC and its namespace were deleted prior to scrub job completion.
-- Fixes an issue where jiva replicas failed to register with its target if there was an error during initial registration.
-- Integrate the Volume Snapshot capabilities with Kubernetes Snapshot controller.
-- Enhance maya-apiserver to use CAS Templates for orchestrating new Storage Engines.
-- Enhance mayactl to show details about replica and Node details where replicas are running.
-- Enhance maya-apiserver to schedule Replica Pods on specific nodes using nodeSelector.
-- Enhance e2e tests to simulate chaos at different layers such as - CPU, RAM, Disk, Network, and Node.
-- Enhanced Jiva volume to handle more read only volume scenarios
-
-
-**Additional details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-0-6-serves-ios-amidst-chaos-and-much-more/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.6)
-
-## 0.5.4 - May 14 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Fixes an issue where NDM would create a partitioned OS device as a block device.
-- Provision to specify filesystems other than ext4 (default).
-- Support for XFS filesystem format for mongodb StatefulSet using OpenEBS Persistent Volume.
-- Increased integration test & e2e coverage in the CI
-- OpenEBS is now available as a stable chart from Kubernetes
-
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.5.4)
-
-## 0.5.3 - Mar 14 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Fixes an issue where jiva replica data was not clean up if the PVC and its namespace were deleted prior to scrub job completion.
-- Fixed usage of StoragePool issue when RBAC settings are applied
-- Enhanced memory consumption usage for Jiva Volume
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.5.3)
-
-## 0.5.2 - Feb 07 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Support to set non-SSL Kubernetes endpoints to use by specifying the ENV variables on maya-apiserver and openebs-provisioner.
-
-**Additional details:**
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.5.2)
-
-## 0.5.1 - Jan 10 2018
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Upgraded the base ubuntu images for the containers to fix the security vulnerabilities reported in Ubuntu Xenial.
-- Support to use Jiva volume from CentOS iSCSI Initiator
-- Support openebs-k8s-provisioner to be launched in non-default namespace
-
-
-**Additional details:**
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.5.1)
-
-## 0.5.0 - Nov 30 2017
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Enhanced Storage Policy Enforcement Framework for Jiva.
-- Extend OpenEBS API Server to expose volume snapshot API.
-- Support for deploying OpenEBS via helm charts.
-- Sample Prometheus configuration for collecting OpenEBS Volume Metrics.
-- Sample Grafana OpenEBS Volume Dashboard - using the prometheus Metrics
-
-**Additional details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-0-5-enables-storage-policies-for-kubernetes-persistent-volumes/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.5.0)
-
-## 0.4.0 - Sep 08 2017
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Enhanced MAYA cli support for managing snapshots,usage statistics.
-- Support OpenEBS Maya API Server uses the Kubernetes scheduler logic to place OpenEBS Volume Replicas on different nodes
-- Support Extended deployment of OpenEBS in AWS.
-- Support OpenEBS can be deployed in a minikube setup.
-- Enhanced openebs-k8s-provisioner to recover from crashloopbackoff state
-
-**Additional details:**
-- [Release Blog](https://openebs.io/blog/quick-update-on-openebs-v0-4-a-developer-friendly-release/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.4.0)
-
-## 0.3.0 - Jun 29 2017
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Support OpenEBS hyper-converged with Kubernetes Minion Nodes.
-- Enable OpenEBS via the openebs-operator.yaml
-- Supports creation of OpenEBS volumes using Dynamic Provisioner.
-- Storage functionality and Orchestration/Management functionality is delivered as container images on DockerHub.
-
-**Additional details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-on-the-growth-path-releases-0-3/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.3)
-
-## 0.2.0 - Apr 07 2017
-
-:::note Deprecation notice
-This release has been deprecated. Please upgrade to the latest release. See [upgrade instructions](/docs/user-guides/upgrade).
-:::
-
-**Change summary:**
-
-- Integrated OpenEBS FlexVolume Driver and Dynamically Provision OpenEBS Volumes into Kubernetes.
-- Support Maya api server to provides new AWS EBS-like API for provisioning Block Storage.
-- Enhanced Maya api server to Hyper Converged with Nomad Scheduler.
-- Backup/Restore Data from Amazon S3.
-- Node Failure Resiliency Fixes
-
-**Additional details:**
-
-- [Release Blog](https://openebs.io/blog/openebs-sprinting-ahead-0-2-released/)
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/v0.2)
-
-
-## See Also:
-
-[OpenEBS Upgrade](/docs/user-guides/upgrade) [OpenEBS Releases](/docs/introduction/releases) [OpenEBS FAQ](/docs/additional-info/faqs) [Container Attached Storage or CAS](/docs/concepts/cas)
diff --git a/docs/main/deprecated/releases-1x.md b/docs/main/deprecated/releases-1x.md
deleted file mode 100644
index 4fee595c7..000000000
--- a/docs/main/deprecated/releases-1x.md
+++ /dev/null
@@ -1,216 +0,0 @@
----
-id: releases-1x
-title: OpenEBS 1.x Deprecated Releases
-keywords:
- - Change for OpenEBS 1.x
- - Alpha features for OpenEBS 1.x
- - Upgrade steps for OpenEBS 1.x
- - Release notes for OpenEBS 1.x
-description: A release notes for all the versions of OpenEBS 1.x, which contains the change summary, alpha features and upgrade steps.
-
----
-
-## 1.7.0 - Feb 15 2020
-
-**Change summary:**
-
-- Fixes an issue where Jiva Replicas could get stuck in WO or NA state, when the size of the replica data grows beyond 300GB.
-- Fixes an issue where unused custom resources from older versions are left in the etcd, even after openebs is upgraded.
-- Fixes an issue where cleanup of Jiva volumes on OpenShift 4.2 environment was failing.
-- Fixes an issue where custom resources used by cStor Volumes fail to get deleted when the underlying pool was removed prior to deleting the volumes.
-- Fixes an issue where a cStor Volume Replica would be incorrectly marked as invalid due to a race condition caused between a terminating and its corresponding newly launched pool pods.
-
-**Alpha Features**
-
-- Support for generating automated ARM builds for NDM.
-- Support for managing snapshot and clones of ZFS Local PV.
-- Support for setting up PDB and PriorityClasses on cStor Pool Pods. Increasing the e2e coverage and fixing the issue uncovered.
-- Support for resizing Jiva Volume via CSI driver and other bug fixes.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.7.0)
-- [Upgrade Steps](https://docs.openebs.io/v170/docs/next/upgrade.html)
-
-## 1.6.0 - Jan 15 2020
-
-**Change summary:**
-
-- Add support for building cStor on ARM builds by moving cstor-pool-mgmt, cstor-volume-mgmt, maya-exporter and cspi-mgmt build scripts to their specific folders and add arm build scripts.
-- Add support for provisioning Local PV hostpath when the nodes are tainted. It also handles the implementation of Local PV helper pods where the cleanup pod will be launched with the tolerations.
-- Enhance the logging mechanism for cStor pool pods during pool import time. These changes of logs will help to identify the existence of bad disk on the node.
-- Support for enabling core dump by adding an ENV variable ENABLE_COREDUMP= “1” for cStor pool pod to control whether cores need to be dumped in case of process crashes. By default, dumping cores will be disabled. Make sure this environment variable is not enabled if mountPoint of `SparseDir` has been changed in CAS Templates.
-- Enhance upgrade logs by providing pool and volume status information during the upgrade and also helps in estimating the time taken for deployment pods to come up.
-- Improves Jiva rebuilding process by check pointing the io numbers. Now only those snapshots will be synced which has less no of io’s.
-- Fixes an issue with Jiva controller by removing WO replica if new replica with greater revision count get added to controller.
-- Disable core dump in NDM daemon by default. This can be enabled by setting an ENV variable `ENABLE_COREDUMP` to `1`. Core files will be stored inside `/var/openebs/ndm/core`.
-- Fixes issues in default core dumping location for NDM. System core pattern which is common for all processes on the node will not be modified. NDM will dump the cores in a location under openebs base directory. NDM process will be launched from the openebs directory, so core files will get automatically written to the $PWD, without requiring to change the core pattern.
-- Fixes an issue in NDM which caused cleanup pods not being scheduled on nodes with taints, causing BD to be stuck in release state. The fix will add tolerations for the node taints to the cleanup pod.
-- Fixes an issue in `cstor-velero plugin` for getting the StorageClass name from the annotation if StorageClass is mentioned in PVC Annotation, not in PVC spec.
-- Fixes an issue while cloning an filesystem based cStor volume, get the FStype info from the CAS config instead of using the default ext4 FSType.
-- Fixes an issue during upgrade where image tag contains multiple “:”. This usually happens where the proxy image URL is used which can contain multiple “:”.
-
-**Alpha Features**
-
-- Adding support to have the ZFS Local PV Provisioner in HA mode. Default replica count is set 1 and by changing the replica count will enable a new Provisioner pod with anti-affinity so that no two pods get scheduled on the same node.
-- Adding volume metric information for ZFS Local PV. These metrics can be collected using Prometheus.
-- Add support for configuring volume policies such as tolerations, nodeSelector , priorityClass, resourceLimits for main and sidecar target pod containers of cStor CSI volume using CStorVolumePolicy resource.
-- Add metrics support for cStor CSI volume which can be pulled by Prometheus to show the metrics in k8s cluster. Available metrics are `Total capacity`, `Used capacity` and `Available capacity` in Bytes.
-- Add raw block volume support for cStor CSI volume to be attached as Raw Block Volume to pods.
-- Add support for performing CSPC general validation in admission server. Some of the checks are included based on scenarios like use of duplicate block devices, duplicate nodes, block device should not be claimed by other CSPC/third party, the capacity validations of replacing block devices etc.
-- Add support for new setting requests and limits to cStor pool pod sidecar containers via poolConfig. It will take default auxResource values if it is not specified in poolConfig for a particular pool. The default can be specified in `spec.auxResources.requests` and `spec.auxResources.limits`. These values will be applied for all the pool configuration mentioned in the CSPC spec. It is also possible to specify separate auxResource values to each pool separately by adding those details to the poolConfig for a particular pool.
-- Configure Jiva CSI Driver to listen to custom ports for metrics.
-- Add metrics support for Jiva CSI volume which can be pulled by Prometheus to show the metrics in k8s cluster. Available metrics are `Total capacity`, `Used capacity` and `Available capacity` in Bytes.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.6.0)
-- [Upgrade Steps](https://docs.openebs.io/v160/docs/next/upgrade.html)
-
-## 1.5.0 - Dec 15 2019
-
-**Change summary:**
-
-- Support BlockVolumeMode for OpenEBS Local PV backed by devices
-- Support ZFS as a filesystem type for OpenEBS ZFS Local PV.
-- Support for Block Device Replacement via the cStor YAML file (using new schema)
-- Support resizing and remounting of Volumes when using cStor CSI Driver
-- Support for generating of ARM builds for cStor Data Engine.
-- Introduce block device hierarchy to NDM. 4 fields `Parent` ,`Partitions`, `Holders` and `Slaves` are used in defining the hierarchy tree of a device. Also, all the dependent devices of the discovered block device will be logged during the initial udev scan to get the disk level hierarchy of each node in the cluster.
-- Add support for applications to provision a "zfs" filesystem directly in the ZFS POOL storage which will get the optimal performance.
-- Enhanced the cStor pools to handle auto scale down scenarios to avoid shutting down the node where cStor pool is running. This is achieved by adding `cluster-autoscaler.kubernetes.io/safe-to-evict": "false"` to the annotation of each cStor pool pod.
-- Fixes an issue with liveness probe on `cstor-pool` container by adding a timeout setting for command execution. Setting the timeout value as 120 sec will kill the process if command exceeds more than 120 seconds.
-- Fixes an issue in cStor CSI volume unit size conversion while transitioning from PVC to CVC storage capacity the way kubernetes handles, by converting to Gi.
-- Fixes a bug where OpenEBS Local PV with hostpaths in OpenShift 4.1 environments was failing.
-- Fixes a vulnerability issue with default helper pod image by using the latest tag for helper pods so new version of OpenEBS will automatically get updated with new images. |
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.5.0)
-- [Upgrade Steps](https://docs.openebs.io/v150/docs/next/upgrade.html)
-
-## 1.4.0 - Nov 15 2019
-
-**Change summary:**
-
-- Support for scale down of cStor volume replicas
-- Support an alpha feature in NDM to add Prometheus exporter for exposing disk-level metrics. This Cluster level exporter gives details of blockdevice such as state, claim state etc.
-- Support of setup arm builds for apiserver and local provisioner. The images will be built on top of arm64v8/ubuntu:18.04.
-- Supporting automated creation of TLS certificate signed by Kubernetes cluster root Certificate Authority for external admission-webhook-server. This will establish a trust to secure the communication between admission-webhook-server and kube-apiserver.
-- Support the snapshot and clone feature in cStor volume provisioned via CSI provisioner. This feature will be available in alpha.
-- Support of encryption feature in Local PV created on a ZFS based storage pool.
-- Support of adding topology information for Local PV on ZFS based storage pool. The details of appropriate ZFS storage pool can be mentioned in corresponding StorageClass via topology so that the scheduler will take care of scheduling the application on the appropriate node.
-- Support for scheduling Local PV on appropriate ZFS storage pool on a node which has less number of Local PV volume provisioned in the given pool.
-- Support of XFS file system for Local PV creation on ZFS based storage pools.
-- Enhanced cStor volume description by fixing output of `knownreplicas` information which will help to maintain trusty/known replica information of the particular cStor volume in the cStor volume specification.
-- Enhanced cStor volume replica status with additional 2 phases based on different scenarios. The additional phases are `NewReplicaDegraded ` and `ReconstructingNewReplica `.
-- Enhanced `maya-exporter` by adding pool last sync time metric as `openebs_zpool_last_sync_time ` . This also modifies value of `openebs.io:livenesstimestamp` in cStor pool YAML to set date in epoch timestamp.
-- Enhanced admission webhook server by adding missing labels in config,secret and service and will fatal out when a missing ENV's error and configs happen.
-- Fixes a bug in Jiva where Jiva replica pods are stuck in `crashloopbackoff` state after a restart.
-- Fixes a bug in cStor target while rebuilding process in a single replica quorum case.
-- Fixes a bug in NDM for device detection in KVM-based virtual machines.. A new disk model `QEMU_HARDDISK` is added to the list of disk models.
-- Fixes a bug in NDM, where the os-disk filter was not able to exclude the blockdevices if the OS was installed on an NVMe device.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.4.0)
-- [Upgrade Steps](https://docs.openebs.io/v140/docs/next/upgrade.html)
-
-## 1.3.0 - Oct 15 2019
-
-**Change summary:**
-
-- Add support to scale up replicas, replica movement across pools and replica replacement scenarios. This feature is in alpha state. This feature will work for cStor volumes which are created with existing SPC configuration.
-- Availability of NDM on different platforms like amd64 and arm64. NDM can now also be compiled in ARM architecture using manual steps.
-- Added support for provisioning CSI based volume using lease leader election API.
-- Support of running OpenEBS in Kubernetes 1.16 version. The k8s v1.16 release will stop serving the deprecated API versions in favour of newer and more stable API versions.
-- Support the addition of resource limit to cStor pool pod using with CSPC configuration. `resource` field on CSPC is used to pass resource limit and requests to `cstor-pool` container and `auxResource` field on CSPC is used to pass `resource limit` and `requests` to other 2 containers such as `cstor-mgmt` and `m-exporter`.
-- Enhanced backup capability of openebs-velero plugin by checking the status of `Healthy` cStor Volume Replica. In the previous version, a check was performed for healthy CVR during setup only. There might be some chances of cStor pod restart and CVR becomes degraded when we trigger the backup.
-- Enhanced CVC(cStor Volume Claim) CR by adding provisioning failure events while provisioning cStor volume using CSI provisioner.
-- Fixed a bug where cStor volume becoming read-only due to restart of cstor-volume-mgmt container alone in the target pod.
-- Fixed wrong status on CVR from Duplicate to Online. Duplicate state on CVR was blocking reconcile on the volume creation in ephemeral case.
-- Fixed a bug in cStor storage pool YAML. The livenessProbe command `zfs set io.openebs:livenesstimestamp` sets the value of io.openebs:livenesstimestamp to the current timestamp. In previous versions, this value was not set properly because of some shell quoting issues in the command.
-- Fixed a bug where Jiva volume running on CentOS 7 / RHEL in OpenShift cluster is going to read-only when the controller pod is restarted either due to node restart or upgrades or any other reason. This is due to iSCSI default timeout is replaced with 5 sec if multipath support is enabled on the node.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.3.0)
-- [Upgrade Steps](https://docs.openebs.io/v130/docs/next/upgrade.html)
-
-## 1.2.0 - Sep 10 2019
-
-**Change summary:**
-
-- CSI Driver for cStor Volumes (currently in Alpha) has added support for resizing and volume expansion feature.
-- The new version of cStor Schema has been introduced to address the user feedback in terms of ease of use for cStor provisioning as well as to make a way in the schema to perform Day 2 Operations using GitOps.
-- Enhanced error logging of cStor storage pool with a new format for automatic alert generation.
-- Enhanced Jiva internal snapshot deletion when a number of internal snapshots are more than 10. The deletion happens automatically one by one.
-- Enhanced velero-plugin to support backup/restore for OpenEBS installed in a different namespace other than `openebs` namespace.
-- Enhanced NDM to include NodeAttributes in BD and BDC. This will support storing of node name along with the hostname on the BD and BDC CRs.
-- Fixes BlockDevice CRD by adding node name to the printer column. This feature will get the name of the node to which the BD is attached while performing `kubectl get bd -n `.
-- Fixes a bug in Jiva when patching and clean up operation of Jiva deployments are failing on Nodes where `hostname` is not the same as `nodename`. The fix will set nodeSelector in deployment and clean-up job after converting the nodename into hostname.
-- Support of provisioning Local PV in clusters where `nodename` and `hostname` are different.
-- Support customization of default hostpath for Jiva and Local PV. With the current implementation, customization will not persisted when a restart happened on the Node where maya-apiserver pod is running or when maya-apiserver pod is restarted.
-- Fixes a bug in NDM where all devices on a node were getting excluded when os-disk-exclude-filter is failed to find the device where OS is installed.
-- Fixes a bug in snapshot controller where snapshot operation is not throwing any error for invalid `cas-type`. This fix will add `cas-type` validation before triggering the snapshot operations. The valid `cas-type` are cStor and Jiva.
-- Fixes the bug where more than required BlockDevicesClaims are created for requested SPC in auto pool method.
-- Fixes an issue in maya-api installer to skip re-apply of default SPC and SC resources if they were installed previously by older version(s) of maya or prior to maya-api-server restart(s)
-- Fixes a bug in cStor pool when cStor Storage Pool management creates pool if pool import failed when a disk is not accessible momentarily just at the time of import. cStor storage pool will be in the pending state when this scenario occurs. This PR will fix cStor pool creation by looking on `Status.Phase` as `Init` or `PoolCreationFailed` to create the pool. If `Status.Phase` is any other string, cStor Storage Pool management will try to import the pool. This can cause impact to the current workflow of Ephemeral disks, which works as of now, as NDM can't detect it as different disk and recognizes as the previous disk.
-- Fixes a bug during a cleanup operation performed on BlockDevice and clean up job is not getting canceled when the state of BlockDevice is changed from `Active` to other states.
-- Fixes a bug in NDM where cleanup jobs remain in pending state in Openshift cluster. The fix will add service account to cleanup jobs, so that clean-up job pods acquire privileged access to perform the action.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.2.0)
-- [Upgrade Steps](https://docs.openebs.io/v120/docs/next/upgrade.html)
-
-## 1.1.0 - Aug 03 2019
-
-**Change summary:**
-
-- Support for an alpha version of CSI driver with limited functionality for provisioning and de-provisioning of cStor volumes.
-- Support for the upgrade of OpenEBS storage pools and volumes through Kubernetes Job. As a user, you no longer have to download scripts to upgrade from 1.0 to 1.1, like in earlier releases.
-- Enhanced Prometheus metrics exported by Jiva for identifying whether an iSCSI Initiator is connected to Jiva target.
-- Enhanced NDM operator capabilities for handling NDM CRDs installation and upgrade. Earlier this process was handled through maya-apiserver.
-- Enhanced velero-plugin to take backup based on the `openebs.io/cas-type:cstor` and it will skip backup for unsupported volumes(or storage providers).
-- Enhanced velero-plugin to allow users to specify a `backupPathPrefix` for storing the volume snapshots in a custom location. This allows users to save/backup configuration and volume snapshot data under the same location.
-- Added an ENV flag which can be used to disable default config creation. The default storage configuration can be modified after installation, but it is going to be overwritten by the OpenEBS API Server.The recommended approach for customizing is to create their own storage configuration using the default options as examples/guidance.
-- Fixes an issue where rebuilding cStor volume replica failed if the cStor volume capacity was changed after the initial provisioning of the cStor volume.
-- Fixes an issue with cStor snapshot taken during transition of replica's rebuild status.
-- Fixes an issue where application file system was breaking due to the deletion of Jiva auto-generated snapshots.
-- Fixes an issue where NDM pod was getting restarted while probing for details from the devices that had write cache supported.
-- Fixes an issue in NDM where Seachest probe was holding open file descriptors to LVM devices and LVM devices were unable to detach from the Node due to NDM hold on device.
-- Fixes a bug where backup was failing where `openebs operator` was installed through helm. `velero-plugin` was checking `maya-apiserver` name and it was different when you have installed via helm based method. Updated velero-plugin to check label of maya-apiserver service name.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.1.0)
-- [Upgrade Steps](https://docs.openebs.io/v110/docs/next/upgrade.html)
-
-## 1.0.0 - Jun 22 2019
-
-**Change summary:**
-
-- Introduced a cluster level component called NDM operator to manages the access to block devices, selecting & binding BD to BDC, cleaning up the data from the released BD.
-- Support for using Block Devices for OpenEBS Local PV.
-- Enhanced cStor Data Engine to allow interoperability of cStor Replicas across different versions.
-- Enhanced the cStor Data Engine containers to contain troubleshooting utilities.
-- Enhanced the metrics exported by cStor Pools to include details of the provisioning errors.
-- Fixes an issue where cStor replica snapshots created for the rebuild were not deleted, causing space reclamation to fail.
-- Fixes an issue where cStor volume used space was showing a very low value than actually used.
-- Fixes an issue where Jiva replicas failed to register with its target if there was an error during initial registration.
-- Fixes an issue where NDM would create a partitioned OS device as a block device.
-- Fixes an issue where Jiva replica data was not clean up if the PVC and its namespace were deleted prior to scrub job completion.
-- Fixes an issue where Velero Backup/Restore was not working with hostpath Local PVs.
-- Upgraded the base ubuntu images for the containers to fix the security vulnerabilities reported in Ubuntu Xenial.
-- Custom resource (Disk) used in earlier releases has been changed to Block Device.
-
-**Additional details:**
-
-- [Release Notes](https://github.com/openebs/openebs/releases/tag/1.0.0)
-- [Release Blog](https://openebs.io/blog/openebs-announces-the-availability-of-version-1-0/)
-- [Upgrade Steps](https://docs.openebs.io/v100/docs/next/upgrade.html)
-
-## See Also:
-
-[OpenEBS Upgrade](/docs/user-guides/upgrade) [OpenEBS Releases](/docs/introduction/releases) [OpenEBS FAQ](/docs/additional-info/faqs) [Container Attached Storage or CAS](/docs/concepts/cas)
diff --git a/docs/main/faqs/cStor.md b/docs/main/faqs/cStor.md
deleted file mode 100644
index 48a8bf950..000000000
--- a/docs/main/faqs/cStor.md
+++ /dev/null
@@ -1,328 +0,0 @@
----
-id: cstor-faq
-title: cStor FAQs
-keywords:
- - cStor FAQ
- - FAQs
-description: The FAQ section about cStor helps to address common concerns, questions, and objections that users have about cStor.
----
-
-[Prerequisites to run CStor-CSI in rancher-based clusters](#prerequisites-for-rancher)
-
-[How to verify cStor volume is running fine?](#verify-cstor-volume-running-fine)
-
-[How to handle replicas with slow disks or slow connectivity in case of cStor volumes?](#slow-replicas-in-cstor-volumes)
-
-[How OpenEBS detects disks for creating cStor Pool?](#how-openebs-detects-disks-for-creating-cstor-pool)
-
-[What is the difference between cStor Pool creation using manual method and auto method?](#what-is-the-difference-between-cstor-pool-creation-using-manual-method-and-auto-method)
-
-[How the data is distributed when cStor maxPools count is 3 and replicaCount as 2 in StorageClass?](#how-the-data-is-distributed-when-cstor-maxpools-count-is-3-and-replicacount-as-2-in-storageclass)
-
-[How to create a cStor volume on single cStor disk pool?](#create-cstor-volume-single-disk-pool)
-
-[How to get the details of cStor Pool, cStor Volume Replica , Cstor Volumes and Disks ?](#more-info-pool-cvr-cv-disk)
-
-
----
-
-### What are the prerequisites to run CStor-CSI in rancher-based clusters {#prerequisites-for-rancher}
-
-For RancherOS,
-If the operating system used is RancherOS, the iSCSI service needs to be enabled. Once it is enabled it must be started on each of the worker nodes.
-To run iSCSI services, execute the following commands on each of the cluster hosts or nodes.
-
-```
-sudo ros s enable open-iscsi
-sudo ros s up open-iscsi
-```
-
-Next, run the below mentioned commands on all the nodes. This ensures that these directories are persistent, by default they are ephemeral.
-
-```
-ros config set rancher.services.user-volumes.volumes [/home:/home,/opt:/opt,/var/lib/kubelet:/var/lib/kubelet,/etc/kubernetes:/etc/kubernetes,/var/openebs]
-system-docker rm all-volumes
-reboot
-```
-
- For Ubuntu or RHEL,
-
- If the operating system is Ubuntu or RHEL the following needs to be done,
- + Verify if iSCSI initiator is installed and its services are running.
-
- The following list of commands can be used to install and verify iSCSI services on the nodes
-
-| OPERATING SYSTEM | ISCSI PACKAGE | COMMANDS |
-| ---------------- | --------------------- | -------------------------------------------------------- |
-| RHEL/CentOS | iscsi-initiator-utils |
|
-
- + Add the extra_binds under Kubelet service in cluster YAML file to mount the iSCSI binary and configuration inside the kubelet.
- After installing the iSCSI initiator on your nodes, bind them into the kubelet container by editing rancher cluster.yaml, as shown in the sample below.
-
- ```
- services:
- kubelet:
- extra_binds:
- - "/etc/iscsi:/etc/iscsi"
- - "/sbin/iscsiadm:/sbin/iscsiadm"
- - "/var/lib/iscsi:/var/lib/iscsi"
- - "/lib/modules"
- ```
-
-[Go to top](#top)
-
-### How to verify cStor volume is running fine? {#verify-cstor-volume-running-fine}
-
-The following steps will help to verify the cStor volume running status.
-
-1. Check PVC is created successfully using the following command.
-
- ```shell
- kubectl get pvc -n
- ```
-
-2. If PVC is created successfully, check corresponding PV is also created successfully.
-
- ```
- kubectl get pv
- ```
-
-3. Check the corresponding target pod of the cStor volume is running using the following command.
-
- ```
- kubectl get pod -n openebs
- ```
-
- The target pod should be in running state.
-
-4. Now check the status of cStor volume using the following command.
-
- ```
- kubectl get cstorvolume -n openebs
- ```
-
- The output of above command will show status as `Offline` , `Degraded` and `Healthy` . Following are the definition for each of these status.
-
- **Init:** Init status of cStor volume is due to the following cases:
-
- - when the cStor volume is created.
- - when the replicas are not connected to target pod.
-
- **Healthy:** Healthy status of cStor volume represents that 51% of healthy replicas are connected to the target and volume is ready IO operations.
-
- **Degraded:** Minimum 51% of replicas are connected and some of these replicas are in degraded state, then volume will be running as degraded state and IOs are operational in this state.
-
- **Offline:** When number of replicas which is equal to Consistency Factor are not yet connected to the target due to network issues or some other reasons In this case, volume is not ready to perform IOs.
-
- Note: If target pod of corresponding cStor volume is not running, then the status of cStor volume shown in the output of above command may be stale.
-
-5. Check the cStorVolumeReplica(CVR) status of the corresponding cStor volume using the following command.
-
- ```
- kubectl get cvr -n openebs
- ```
-
- Status of each cStor volume Replica can be found under `STATUS` field.
-
- **Note:** If the pool pod of corresponding cStor volume replica is not running, then the status of CVR shown in the output of the above command may be stale.
-
- The following are the different type of STATUS information of cStor Volumes Replica and their definition.
-
- **Healthy:** Healthy state represents volume is healthy and volume data existing on this replica is up to date.
-
- **Offline:** cStor volume replica status is offline due to the following cases:
-
- - when the corresponding cStor pool is not available to create volume.
- - when the creation of cStor volume fails.
- - when the replica is not yet connected to the target.
-
- **Degraded:** cStor volume replica status is degraded due to the following case
-
- - when the cStor volume replica is connected to the target and rebuilding is not yet started on this replica.
-
- **Rebuilding:** cStor volume replica status is rebuilding when the cStor volume replica is undergoing rebuilding, that means, data sync with another replica.
-
- **Error:** cStor volume replica status is in error state due to the following cases:
-
- - when the volume replica data set is not existing in the pool.
- - when an error occurs while getting the stats of cStor volume.
- - when the unit of size is not mentioned in PVC spec. For example, if the size is 5 instead of 5G.
-
- **DeletionFailed:** cStor volume replica status is deletion failed while destroying cStor volumes fails.
-
- **Invalid:** cStor volume replica status is invalid when a new cstor-pool-mgmt container in a new pod is communicating with the old cstor-pool container in an old pod.
-
- **Init:** cStor volume replica status init represents the volume is not yet created.
-
- **Recreate:** cStor volume replica status recreate represents an intermediate state before importing the volume(this can happen only when pool pod got restarted) in case of a non-ephemeral disk. If the disk is ephemeral then this status represents volume is going to recreate.
-
- **NewReplicaDegraded:** cStor volume replica is newly created and it make successful connection with the target pod.
-
- **ReconstructingNewReplica:** cStor volume replica is newly created and it started reconstructing entire data from another healthy replica.
-
-
-### How to handle replicas with slow disks or slow connectivity in case of cStor volumes? {#slow-replicas-in-cstor-volumes}
-
-CStor target pod disconnects a replica if IO response is not received from a replica within 60 seconds. This can happen due to slow disks in cStor pools or slow connectivity between target pod and cStor pool pods. In order to allow tuning of IO wait time from its default value of 60 seconds, there is an environment variable IO_MAX_WAIT_TIME in `cstor-istgt` container of target pod.
-Add below kind of configuration in target pod deployment under `env` section of `cstor-istgt` container:
-
-```
- env:
- - name: IO_MAX_WAIT_TIME
- value: 120
-```
-
-Please note that target pod gets restarts which can impact ongoing IOs.
-
-### How OpenEBS detects disks for creating cStor Pool?
-
-Any block disks available on the node (that can be listed with say `lsblk` ) will be discovered by OpenEBS.
-Node Disk Manager(NDM) forms the BlockDevice CRs in the following way
-
-* Scan the list of disks.
-* Filter out the OS disks
-* Filter out any other disk patterns that are mentioned in `openebs-ndm-config` under `Configmap` in `openebs-operator.yaml`.
-
-NDM do some filtering on the disks to exclude, for example boot disk. By default, NDM excludes the following device path to create blockdevice CR. This configuration is added in `openebs-ndm-config` under `Configmap` in `openebs-operator.yaml` .
-
-```
-/dev/loop - loop devices.
-/dev/fd - file descriptors.
-/dev/sr - CD-ROM devices.
-/dev/ram - ramdisks.
-/dev/dm -lvm.
-/dev/md - multiple device ( software RAID devices).
-/dev/rbd - ceph block devices
-/dev/zd - zfs volumes
-```
-
-It is also possible to customize by adding more disk types associated with your nodes. For example, used disks, unwanted disks and so on. This change must be done in the 'openebs-operator.yaml' file that you have downloaded before OpenEBS installation.
-
-**Example:**
-
-```
- filterconfigs:
- - key: path-filter
- name: path filter
- state: true
- include: ""
- exclude: "/dev/loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd,/dev/zd"
-```
-
-[Go to top](#top)
-### What is the difference between cStor Pool creation using manual method and auto method?
-
-By using manual method, you must give the selected disk name which is listed by NDM. This details has to be entered in the StoragePoolClaim YAML under `diskList` . See [storage pool](/deprecated/spc-based-cstor#creating-cStor-storage-pools) for more info.
-It is also possible to change `maxPools` count and `poolType` in the StoragePoolClaim YAML.
-Consider you have 4 nodes with 2 disks each. You can select `maxPools` count as 3, then cStor pools will be created in any 3 nodes out of 4. The remaining disks belonging to 4th Node can be used for horizontal scale up in future.
-Advantage is that there is no restriction in the number of disks for the creation of cStor storage pool using `striped` or `mirrored` Type.
-
-By auto method, its not need to provide the disk details in the StoragePoolClaim YAML. You have to specify `maxPools` count to limit the storage pool creation in OpenEBS cluster and `poolType` for the type of storage pool such as Mirrored or Striped. See [storage pool](/deprecated/spc-based-cstor#creating-cStor-storage-pools) for more info.
-
-But the following are the limitations with this approach.
-
-1. For Striped pool, it will take only one disk per Node even Node have multiple disks.
-2. For Mirrored pool, it will take only 2 disks attached per Node even Node have multiple disks.
-
-Consider you have 4 nodes with 4 disks each. If you set `maxPools` as 3 and `poolType` as `striped` , then Striped pool will created with Single disk on 3 Nodes out of 4 Nodes.
-If you set `maxPools` as 3 and `poolType` as `mirrored` , then Mirrored cStor pool will create with single Mirrored pool with 2 disks on 3 Nodes out of 4 Nodes.
-
-[Go to top](#top)
-
-### How the data is distributed when cStor maxPools count is 3 and replicaCount as 2 in StorageClass?
-
-If `maxPool` count is 3 in StoragePoolClaim, then 3 cStor storage pools will be created if it meets the required number of nodes, say 3 in this example.
-If `replicaCount` is 2 in StorageClass, then 2 replicas of an OpenEBS volume will be created on the top of any 2 cStor storage pool out of 3.
-
-[Go to top](#top)
-### How to create a cStor volume on single cStor disk pool?
-
-You can give the maxPools count as 1 in StoragePoolClaim YAML and `replicaCount` as `1` in StorageClass YAML. In the following sample SPC and SC YAML, cStor pool is created using auto method. After applying this YAML, one cStor pool named cstor-disk will be created only in one Node and `StorageClass` named `openebs-cstor-disk` . Only requirement is that one node has at least one disk attached but unmounted. See [here](/additional-info/faqs#what-must-be-the-disk-mount-status-on-node-for-provisioning-openebs-volume) to understand more about disk mount status.
-
-```
----
-apiVersion: openebs.io/v1alpha1
-kind: StoragePoolClaim
-metadata:
- name: cstor-disk
-spec:
- name: cstor-disk
- type: disk
- maxPools: 1
- poolSpec:
- poolType: striped
----
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: openebs-cstor-disk
- annotations:
- openebs.io/cas-type: cstor
- cas.openebs.io/config: |
- - name: StoragePoolClaim
- value: "cstor-disk"
- - name: ReplicaCount
- value: "1"
-provisioner: openebs.io/provisioner-iscsi
-```
-
-[Go to top](#top)
-
-### How to get the details like status, capacity etc. of cStor Pool, cStor Volume Replica, cStor Volumes and Disks using kubectl command? {#more-info-pool-cvr-cv-disk}
-
-From 0.8.1 onwards, following command list down the info like status, size etc. using `kubectl get` command. These command will output similar to the following only if Kubernetes version of client and server are above 1.11.
-
-The following command will give the details of cStor Storage Pool.
-
-```
-kubectl get csp -n openebs
-```
-
-Following is an example output.
-
-```shell hideCopy
-NAME ALLOCATED FREE CAPACITY STATUS TYPE AGE
-sparse-claim-auto-lja7 125K 9.94G 9.94G Healthy striped 1h
-```
-
-The following command will give the details of replica status of each cStor volume created in `openebs` namespace.
-
-```
-kubectl get cvr -n openebs
-```
-
-Following is an example output.
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-9ca83170-01e3-11e9-812f-54e1ad0c1ccc-sparse-claim-auto-lja7 6K 6K Healthy 1h
-```
-
-The following command will give the details of cStor volume created in `openebs` namespace.
-
-```
-kubectl get cstorvolume -n openebs
-```
-
-Following is an example output.
-
-```shell hideCopy
-NAME STATUS AGE
-pvc-9ca83170-01e3-11e9-812f-54e1ad0c1ccc Healthy 4h
-```
-
-The following command will give the details disks that are attached to all Nodes in the cluster.
-
-```
-kubectl get disk
-```
-
-Following is an example output.
-
-```shell hideCopy
-NAME SIZE STATUS AGE
-sparse-5a92ced3e2ee21eac7b930f670b5eab5 10737418240 Active 10m
-```
-
-[Go to top](#top)
\ No newline at end of file
diff --git a/docs/main/faqs/faqs.md b/docs/main/faqs/faqs.md
index 30ac76d48..f78dafe4b 100644
--- a/docs/main/faqs/faqs.md
+++ b/docs/main/faqs/faqs.md
@@ -8,57 +8,6 @@ keywords:
description: The FAQ section about OpenEBS helps to address common concerns, questions, and objections that users have about OpenEBS.
---
-[What is most distinctive about the OpenEBS architecture?](#What-is-most-distinctive-about-the-OpenEBS-architecture)
-
-[Why did you choose iSCSI? Does it introduce latency and decrease performance? ](#Why-did-you-choose-iSCSI)
-
-[Where is my data stored and how can I see that?](#where-is-my-data)
-
-[What changes are needed for Kubernetes or other subsystems to leverage OpenEBS?](#changes-on-k8s-for-openebs)
-
-[How do you get started and what is the typical trial deployment?](#get-started)
-
-[What is the default OpenEBS Reclaim policy?](#default-reclaim-policy)
-
-[Why NDM daemon set required privileged mode?](#why-ndm-privileged)
-
-[Is OpenShift supported?](#openebs-in-openshift)
-
-[Can I use replica count as 2 in StorageClass if it is a single node cluster?](#replica-count-2-in-a-single-node-cluster)
-
-[How backup and restore is working with OpenEBS volumes?](#backup-restore-openebs-volumes)
-
-[Why customized parameters set on default OpenEBS StorageClasses are not getting persisted?](#customized-values-not-persisted-after-reboot)
-
-[Why NDM listens on host network?](#why-ndm-listens-on-host-network)
-
-[How is data protected? What happens when a host, client workload, or a data center fails?](#how-is-data-protected-what-happens-when-a-host-client-workload-or-a-data-center-fails)
-
-[How does OpenEBS provide high availability for stateful workloads?](#how-does-openebs-provide-high-availability-for-stateful-workloads)
-
-[What are the recommended iscsi timeout settings on the host?](#what-are-the-recommended-iscsi-timeout-settings-on-the-host)
-
-[What changes must be made to the containers on which OpenEBS runs?](#what-changes-must-be-made-to-the-containers-on-which-openebs-runs)
-
-[What are the minimum requirements and supported container orchestrators?](#what-are-the-minimum-requirements-and-supported-container-orchestrators)
-
-[Why would you use OpenEBS on EBS?](#why-would-you-use-openebs-on-ebs)
-
-[Can I use the same PVC for multiple Pods?](#can-i-use-the-same-pvc-for-multiple-pods)
-
-[Warning Messages while Launching PVC](#warning-messages-while-launching-pvc)
-
-[Why *OpenEBS_logical_size* and *OpenEBS_actual_used* are showing in different size?](#why-openebs-logical-size-and-openebs-actual-used-are-showing-in-different-size)
-
-[What must be the disk mount status on Node for provisioning OpenEBS volume?](#what-must-be-the-disk-mount-status-on-node-for-provisioning-openebs-volume)
-
-[Does OpenEBS support encryption at rest?](#encryption-rest)
-
-[Can the same BDC name be used for claiming a new block device?](#same-bdc-claim-new-bd)
-
------
-
-
### What is most distinctive about the OpenEBS architecture? {#What-is-most-distinctive-about-the-OpenEBS-architecture}
The OpenEBS architecture is an example of Container Attached Storage (CAS). These approaches containerize the storage controller, called IO controllers, and underlying storage targets, called “replicas”, allowing an orchestrator such as Kubernetes to automate the management of storage. Benefits include automation of management, a delegation of responsibility to developer teams, and the granularity of the storage policies which in turn can improve performance.
@@ -123,7 +72,6 @@ You can then begin running a workload against OpenEBS. There is a large and grow
[Go to top](#top)
-
### What is the default OpenEBS Reclaim policy? {#default-reclaim-policy}
The default retention is the same used by K8s. For dynamically provisioned PersistentVolumes, the default reclaim policy is “Delete”. This means that a dynamically provisioned volume is automatically deleted when a user deletes the corresponding PersistentVolumeClaim.
@@ -150,18 +98,26 @@ Yes. See the [detailed installation instructions for OpenShift](../user-guides/l
While creating a StorageClass, if user mention replica count as 2 in a single node cluster, OpenEBS will not create the volume from 0.9 version onwards. It is required to match the number of replica count and number of nodes available in the cluster for provisioning OpenEBS Jiva and cStor volumes.
+[Go to top](#top)
+
### How backup and restore is working with OpenEBS volumes? {#backup-restore-openebs-volumes}
OpenEBS cStor volume is working based on cStor/ZFS snapshot using Velero. For OpenEBS Local PV and Jiva volume, it is based on restic using Velero.
+[Go to top](#top)
+
### Why customized parameters set on default OpenEBS StorageClasses are not getting persisted? {#customized-values-not-persisted-after-reboot}
The customized parameters set on default OpenEBS StorageClasses will not persist after restarting `maya-apiserver` pod or restarting the node where `maya-apiserver` pod is running. StorageClasses created by maya-apiserver are owned by it and it tries to overwrite them upon its creation.
+[Go to top](#top)
+
### Why NDM listens on host network?
NDM uses `udev` to monitor dynamic disk attach and detach events. `udev` listens on netlink socket of the host system to get those events. A container requires host network access so that it can listen on the socket. Therefore NDM requires host network access for the `udev` running inside the container to listen those disk related events.
+[Go to top](#top)
+
### How is data protected? What happens when a host, client workload, or a data center fails?
Kubernetes provides many ways to enable resilience. OpenEBS leverages these wherever possible. For example, say the IO container that has the iSCSI target fails. Well, it is spun back up by Kubernetes. The same applies to the underlying replica containers, where the data is actually stored. They are spun back up by Kubernetes. Now, the point of replicas is to ensure that when one or more of these replicas are being respond and then repopulated in the background by OpenEBS, the client applications still run. OpenEBS takes a simple approach to ensuring that multiple replicas can be accessed by an IO controller using a configurable quorum or the minimum number of replica requirements. In addition, our new cStor checks for silent data corruption and in some cases can fix it in the background. Silent data corruption, unfortunately, can occur from poorly engineered hardware and from other underlying conditions including those that your cloud provider is unlikely to report or identify.
@@ -174,6 +130,8 @@ An OpenEBS Jiva volume is a controller deployed during OpenEBS installation. Vol
**Note:** Each replica is scheduled in a unique K8s node, and a K8s node never has two replicas of one OpenEBS volume.
+[Go to top](#top)
+
### What are the recommended iSCSI timeout settings on the host?
There are cases when application pod and OpenEBS cStor target pod are running on different nodes. In such cases, there may be chances that application can go to read only when K8s takes around 5 mins to re-schedule OpenEBS target pod to a new Node. To avoid such scenarios, default iscsi timeout values can be configured to the recommended one.
@@ -192,8 +150,6 @@ Do below configuration settings on the host node to change the default iscsi tim
2. Modify **node.session.timeo.replacement_timeout** with 300 seconds.
-
-
**For those sessions already logged in to iSCSI target:**
Below command can be used to change the setting for logged in sessions:
@@ -266,7 +222,6 @@ In case you need to use Local SSDs as block devices for provisioning cStor volum
[Go to top](#top)
-
### Does OpenEBS support encryption at rest? {#encryption-rest}
OpenEBS recommends LUKS encrypted drives with dm-crypt to achieve block-device encryption at rest.
@@ -290,5 +245,3 @@ Although block-level encryption is faster than filesystem encryption such as eCr
No. It is recommended to create different BDC name for claiming an unclaimed disk manually.
[Go to top](#top)
-
------
\ No newline at end of file
diff --git a/docs/main/introduction-to-openebs/benefits.mdx b/docs/main/introduction-to-openebs/benefits.mdx
index a8e6065ac..68ca41472 100644
--- a/docs/main/introduction-to-openebs/benefits.mdx
+++ b/docs/main/introduction-to-openebs/benefits.mdx
@@ -9,7 +9,7 @@ description: Some key aspects that make OpenEBS different compared to other trad
import { TwoColumn } from "@site/src/components/TwoColumn";
:::tip
-For information on how OpenEBS is used in production, visit the [use cases](/docs/introduction/usecases) section or read what [OpenEBS Adopters have shared](https://github.com/openebs/openebs/blob/HEAD/ADOPTERS.md).
+For information on how OpenEBS is used in production, visit the [use cases](use-cases-and-examples.mdx) section or read what [OpenEBS Adopters have shared](https://github.com/openebs/openebs/blob/HEAD/ADOPTERS.md).
:::
Containers and Kubernetes have disrupted the way platforms and technology stacks are built; OpenEBS is a result of applying the patterns of containers and container orchestratation to storage software. Therefore the benefits of using OpenEBS are inline with benefits of moving to cloud native architectures. A few benefits worth highlighting include:
@@ -29,7 +29,7 @@ Containers and Kubernetes have disrupted the way platforms and technology stacks
where "cloud native" means following a loosely coupled architecture. As such
the normal benefits to cloud native, loosely coupled architectures apply.
For example, developers and DevOps architects can use standard Kubernetes
- skills and utilities to configure, use, scale, customize and manage OpenEBS itself.
+ skills and utilities to configure, use, scale, customize, and manage OpenEBS itself.
![Cloud Native Storage Icon](../assets/b-cn.svg)
@@ -41,7 +41,7 @@ Some key aspects that make OpenEBS different compared to other traditional stora
- Built using the _micro-services architecture_ like the applications it serves. OpenEBS is itself deployed as a set of containers on Kubernetes worker nodes. Uses Kubernetes itself to orchestrate and manage OpenEBS components.
- Built completely in userspace making it highly portable to run across _any OS/platform_.
- Completely intent-driven, inheriting the same principles that drive the _ease of use_ with Kubernetes.
-- OpenEBS supports a range of storage engines so that developers can deploy the storage technology appropriate to their application design objectives. Distributed applications like Cassandra can use a _LocalPV_ engine for lowest latency writes. Monolithic applications like MySQL and PostgreSQL can use _Mayastor built using NVMe and SPDK_ or _cStor based on ZFS_ for resilience. Streaming applications like Kafka can use the NVMe engine [Mayastor](https://github.com/openebs/Mayastor) for best performance in edge environments or, again, a LocalPV option.
+- OpenEBS supports a range of storage engines so that developers can deploy the storage technology appropriate to their application design objectives. Distributed applications like Cassandra can use a _Local_ engine for lowest latency writes. Monolithic applications like MongoDB and PostgreSQL can use _replicated engine_ for resilience. Streaming applications like Kafka can use the replicated engine for best performance in edge environments or, again, a local engine option.
### Avoid Cloud Lock-in
@@ -49,8 +49,8 @@ Some key aspects that make OpenEBS different compared to other traditional stora
Even though Kubernetes provides an increasingly ubiquitous control plane,
concerns about data gravity resulting in lock-in and other challenges remain.
- With OpenEBS, the data can be written to the OpenEBS layer - if cStor, Jiva
- or Mayastor are used - and if so OpenEBS acts as a data abstraction layer.
+ With OpenEBS, the data can be written to the OpenEBS layer - if a replicated engine
+ is used - and if so OpenEBS acts as a data abstraction layer.
Using this data abstraction layer, data can be much more easily moved amongst
Kubernetes environments, whether they are on premise and attached to traditional
storage systems or in the cloud and attached to local storage or managed storage services.
@@ -86,15 +86,10 @@ Some key aspects that make OpenEBS different compared to other traditional stora
- Node Disk Manager (NDM) in OpenEBS can be used to enable disk management in
- a Kubernetes way by using Kubernetes constructs. Using NDM and OpenEBS,
- nodes in the Kubernetes cluster can be horizontally scaled without worrying
- about managing persistent storage needs of stateful applications. The
- storage needs (capacity planning, performance planning, and volume
- management) of a cluster can be automated using the volume and pool policies
- of OpenEBS thanks in part to the role played by NDM in identifying and
- managing underlying storage resources, including local disks and cloud
- volumes.
+ Using OpenEBS, nodes in the Kubernetes cluster can be horizontally scaled without
+ worrying about managing persistent storage needs of stateful applications.
+ The storage needs (capacity planning, performance planning, and volume management)
+ of a cluster can be automated using the pool and volume policies of OpenEBS.
![Natively HCI on K8s Icon](../assets/b-hci.svg)
@@ -105,8 +100,8 @@ Some key aspects that make OpenEBS different compared to other traditional stora
- Because OpenEBS follows the CAS architecture, upon node failure the OpenEBS
- controller will be rescheduled by Kubernetes while the underlying data is
+ Upon node failure, the OpenEBS controller will be rescheduled by Kubernetes
+ because OpenEBS follows the CNS architecture, while the underlying data is
protected via the use of one or more replicas. More importantly - because
each workload can utilize its own OpenEBS - there is no risk of a system
wide outage due to the loss of storage. For example, metadata of the volume
@@ -124,8 +119,8 @@ Some key aspects that make OpenEBS different compared to other traditional stora
## See Also:
-- [Use cases and Examples](/docs/introduction/usecases)
-- [OpenEBS Features](/docs/introduction/features)
-- [OpenEBS Architecture](/docs/concepts/architecture)
-- [OpenEBS Local PV](/docs/concepts/localpv)
-- [OpenEBS Mayastor](/docs/concepts/mayastor)
+- [Use Cases and Examples](use-cases-and-examples.mdx)
+- [OpenEBS Features](features.mdx)
+- [OpenEBS Architecture](../concepts/architecture.md)
+- [OpenEBS Local Engine](../concepts/data-engines/local-engine.md)
+- [OpenEBS Replicated Engine](../concepts/data-engines/replicated-engine.md)
diff --git a/docs/main/introduction-to-openebs/features.mdx b/docs/main/introduction-to-openebs/features.mdx
index 127a1f5ca..dc5474ed4 100644
--- a/docs/main/introduction-to-openebs/features.mdx
+++ b/docs/main/introduction-to-openebs/features.mdx
@@ -14,7 +14,7 @@ description: OpenEBS features includes containerized storage for containers, syn
import { TwoColumn } from "@site/src/components/TwoColumn";
:::tip
-For information on how OpenEBS is used in production, visit the [use cases](/docs/introduction/usecases) section or read what [OpenEBS Adopters have shared](https://github.com/openebs/openebs/blob/HEAD/ADOPTERS.md).
+For information on how OpenEBS is used in production, visit the [use cases](use-cases-and-examples.mdx) section or read what [OpenEBS Adopters have shared](https://github.com/openebs/openebs/blob/HEAD/ADOPTERS.md).
:::
OpenEBS Features, like any storage solution, can be broadly classified into following categories:
@@ -28,11 +28,11 @@ OpenEBS Features, like any storage solution, can be broadly classified into foll
- OpenEBS2 is an example of Container Attached Storage or CAS. Volumes
+ OpenEBS2 is an example of Container Native Storage (CNS). Volumes
provisioned through OpenEBS are always containerized. Each volume has a
dedicated storage controller that increases the agility and granularity of
persistent storage operations of the stateful applications. Benefits and
- more details on CAS architecture are found here.
+ more details on CNS architecture are found here.
![Containerized Storage Icon](../assets/f-cas.svg)
@@ -43,7 +43,7 @@ OpenEBS Features, like any storage solution, can be broadly classified into foll
- Synchronous Replication is an optional and popular feature of OpenEBS. When used with the Jiva, cStor and Mayastor storage engines, OpenEBS can synchronously replicate the data volumes for high availability. The replication happens across Kubernetes zones resulting in high availability for cross AZ setups. This feature is especially useful to build highly available stateful applications using local disks on cloud providers services such as GKE, EKS and AKS.
+ Synchronous Replication is an optional and popular feature of OpenEBS. When used with the replicated engine, OpenEBS can synchronously replicate the data volumes for high availability. The replication happens across Kubernetes zones resulting in high availability for cross AZ setups. This feature is especially useful to build highly available stateful applications using local disks on cloud providers services such as GKE, EKS and AKS.
![Synchronous Replication Icon](../assets/f-replication.svg)
@@ -54,7 +54,7 @@ OpenEBS Features, like any storage solution, can be broadly classified into foll
- Copy-on-write snapshots are another optional and popular feature of OpenEBS. When using the cStor engine, snapshots are created instantaneously and there is no limit on the number of snapshots. The incremental snapshot capability enhances data migration and portability across Kubernetes clusters and across different cloud providers or data centers. Operations on snapshots and clones are performed in completely Kubernetes native method using the standard kubectl commands. Common use cases include efficient replication for back-ups and the use of clones for troubleshooting or development against a read only copy of data.
+ Copy-on-write snapshots are another optional and popular feature of OpenEBS. When using the replicated engine, snapshots are created instantaneously and there is no limit on the number of snapshots. The incremental snapshot capability enhances data migration and portability across Kubernetes clusters and across different cloud providers or data centers. Operations on snapshots and clones are performed in completely Kubernetes native method using the standard kubectl commands. Common use cases include efficient replication for back-ups and the use of clones for troubleshooting or development against a read only copy of data.
![Snapshots and Clones Icon](../assets/f-snapshots.svg)
@@ -76,7 +76,7 @@ OpenEBS Features, like any storage solution, can be broadly classified into foll
- OpenEBS volumes are instrumented for granular data metrics such as volume IOPS, throughput, latency and data patterns. As OpenEBS follows the CAS pattern, stateful applications can be tuned for better performance by observing the traffic data patterns on Prometheus and modifying the storage policy parameters without worrying about neighboring workloads that are using OpenEBS thereby minimizing the incidence of "noisy neighbor" issues.
+ OpenEBS volumes are instrumented for granular data metrics such as volume IOPS, throughput, latency and data patterns. As OpenEBS follows the CNS pattern, stateful applications can be tuned for better performance by observing the traffic data patterns on Prometheus and modifying the storage policy parameters without worrying about neighboring workloads that are using OpenEBS thereby minimizing the incidence of "noisy neighbor" issues.
![Prometheus and Tuning Icon](../assets/f-prometheus.svg)
@@ -85,8 +85,8 @@ OpenEBS Features, like any storage solution, can be broadly classified into foll
## See Also:
-- [Use cases and Examples](/docs/introduction/usecases)
-- [OpenEBS Benefits](/docs/introduction/benefits)
-- [OpenEBS Architecture](/docs/concepts/architecture)
-- [OpenEBS Local PV](/docs/concepts/localpv)
-- [OpenEBS Mayastor](/docs/concepts/mayastor)
+- [Use Cases and Examples](use-cases-and-examples.mdx)
+- [OpenEBS Benefits](benefits.mdx)
+- [OpenEBS Architecture](../concepts/architecture.md)
+- [OpenEBS Local Engine](../concepts/data-engines/local-engine.md)
+- [OpenEBS Replicated Engine](../concepts/data-engines/replicated-engine.md)
diff --git a/docs/main/introduction-to-openebs/introduction-to-openebs.md b/docs/main/introduction-to-openebs/introduction-to-openebs.md
index 7e1999c16..580e0deb0 100644
--- a/docs/main/introduction-to-openebs/introduction-to-openebs.md
+++ b/docs/main/introduction-to-openebs/introduction-to-openebs.md
@@ -5,16 +5,16 @@ slug: /
keywords:
- OpenEBS
- OpenEBS overview
-description: OpenEBS builds on Kubernetes to enable Stateful applications to easily access Dynamic Local or Distributed Container Attached Kubernetes Persistent Volumes. By using the Container Attached Storage pattern users report lower costs, easier management, and more control for their teams.
+description: OpenEBS builds on Kubernetes to enable Stateful applications to easily access Dynamic Local or Distributed Container Attached Kubernetes Persistent Volumes. By using the Container Native Storage pattern users report lower costs, easier management, and more control for their teams.
---
## What is OpenEBS?
-OpenEBS turns any storage available to Kubernetes worker nodes into Local or Distributed Kubernetes Persistent Volumes. OpenEBS helps Application and Platform teams easily deploy Kubernetes Stateful Workloads that require fast and highly durable, reliable and scalable [Container Attached Storage](/docs/concepts/cas).
+OpenEBS turns any storage available to Kubernetes worker nodes into Local or Distributed Kubernetes Persistent Volumes. OpenEBS helps application and platform teams easily deploy Kubernetes stateful workloads that require fast and highly durable, reliable, and scalable [Container Native Storage](../concepts/container-native-storage.md).
OpenEBS is also a leading choice for NVMe based storage deployments.
-OpenEBS was originally built by [MayaData](https://mayadata.io) and donated to the _Cloud Native Computing Foundation_ and is now a [CNCF sandbox project](https://www.cncf.io/sandbox-projects/).
+OpenEBS was originally built by MayaData and donated to the _Cloud Native Computing Foundation_ and is now a [CNCF sandbox project](https://www.cncf.io/sandbox-projects/).
## Why do users prefer OpenEBS?
@@ -34,17 +34,16 @@ OpenEBS manages the storage available on each of the Kubernetes nodes and uses t
In case of [Local Volumes](#local-volumes):
-- OpenEBS can create Persistent Volumes using raw block devices or partitions, or using sub-directories on Hostpaths or by using LVM,ZFS, or sparse files.
+- OpenEBS can create persistent volumes using raw block devices or partitions, or using sub-directories on Hostpaths or by using local engine or sparse files.
- The local volumes are directly mounted into the Stateful Pod, without any added overhead from OpenEBS in the data path, decreasing latency.
-- OpenEBS provides additional tooling for Local Volumes for monitoring, backup/restore, disaster recovery, snapshots when backed by ZFS or LVM, capacity based scheduling, and more.
+- OpenEBS provides additional tooling for local volumes for monitoring, backup/restore, disaster recovery, snapshots when backed by local engine, capacity based scheduling, and more.
In case of [Distributed (aka Replicated) Volumes](#replicated-volumes):
-- OpenEBS creates a Micro-service for each Distributed Persistent volume using one of its engines - Mayastor, cStor or Jiva.
-- The Stateful Pod writes the data to the OpenEBS engines that synchronously replicate the data to multiple nodes in the cluster. The OpenEBS engine itself is deployed as a pod and orchestrated by Kubernetes. When the node running the Stateful pod fails, the pod will be rescheduled to another node in the cluster and OpenEBS provides access to the data using the available data copies on other nodes.
-- The Stateful Pods connect to the OpenEBS Distributed Persistent volume using iSCSI (cStor and Jiva) or NVMeoF (Mayastor).
-- OpenEBS cStor and Jiva focus on ease of use and durability of the storage. These engines use customized versions of ZFS and Longhorn technology respectively for writing the data onto the storage.
-- OpenEBS Mayastor is the latest engine and has been developed with durability and performance as design goals; OpenEBS Mayastor efficiently manages the compute (hugepages, cores) and storage (NVMe Drives) to provide fast distributed block storage.
+- OpenEBS creates a Micro-service for each Distributed Persistent Volume using the replicated engine.
+- The Stateful Pod writes the data to the OpenEBS engine that synchronously replicates the data to multiple nodes in the cluster. The OpenEBS engine itself is deployed as a pod and orchestrated by Kubernetes. When the node running the Stateful pod fails, the pod will be rescheduled to another node in the cluster and OpenEBS provides access to the data using the available data copies on other nodes.
+- The Stateful Pods connect to the OpenEBS distributed persistent volume using the NVMeoF (replicated engine).
+- OpenEBS replicated engine is developed with durability and performance as design goals. It efficiently manages the compute (hugepages and cores) and storage (NVMe Drives) to provide fast distributed block storage.
:::tip NOTE
OpenEBS contributors prefer to call the Distributed Block Storage volumes as **Replicated Volumes**, to avoid confusion with traditional distributed block storage for the following reasons:
@@ -58,45 +57,31 @@ OpenEBS Data Engines and Control Plane are implemented as micro-services, deploy
## Local Volumes
-Local Volumes are accessible only from a single node in the cluster. Pods using Local Volume have to be scheduled on the node where volume is provisioned. Local Volumes are typically preferred for distributed workloads like Cassandra, MongoDB, Elastic, etc that are distributed in nature and have high availability built into them.
-
-Depending on the type of storage attached to your Kubernetes worker nodes and the requirements of your workloads, you can select from different flavors of Dynamic Local PV - Hostpath, Device, LVM, ZFS or Rawfile.
-
-### Quickstart Guides
-
-Installing OpenEBS in your cluster is as simple as running a few `kubectl` or `helm` commands. Here are the list of our Quickstart guides with detailed instructions for each storage engine.
-
-- [Local PV hostpath](/docs/user-guides/localpv-hostpath)
-- [Local PV device](/docs/user-guides/localpv-device)
-- [ZFS Local PV](https://github.com/openebs/zfs-localpv)
-- [LVM Local PV](https://github.com/openebs/lvm-localpv)
-- [Rawfile Local PV](https://github.com/openebs/rawfile-localpv)
+Local Volumes are accessible only from a single node in the cluster. Pods using local volume have to be scheduled on the node where volume is provisioned. Local volumes are typically preferred for distributed workloads like Cassandra, MongoDB, Elastic, etc that are distributed in nature and have high availability built into them.
## Replicated Volumes
Replicated Volumes, as the name suggests, are those that have their data synchronously replicated to multiple nodes. Volumes can sustain node failures. The replication also can be setup across availability zones helping applications move across availability zones.
-Replicated Volumes also are capable of enterprise storage features like snapshots, clone, volume expansion and so forth. Replicated Volumes are a preferred choice for Stateful workloads like Percona/MySQL, Jira, GitLab, etc.
+Replicated Volumes also are capable of enterprise storage features like snapshots, clone, volume expansion and so forth. Replicated Volumes are a preferred choice for Stateful workloads like Percona/MongoDB, Jira, GitLab, etc.
-Depending on the type of storage attached to your Kubernetes worker nodes and application performance requirements, you can select from Jiva, cStor or Mayastor.
-
-### Quickstart Guides
+:::tip NOTE
+Depending on the type of storage attached to your Kubernetes worker nodes and the requirements of your workloads, you can select from local engine or replicated engine.
+:::
-Installing OpenEBS in your cluster is as simple as running a few `kubectl` or `helm` commands. Here are the list of our Quickstart guides with detailed instructions for each storage engine.
+## Quickstart Guides
-- [Mayastor](/docs/user-guides/mayastor)
-- [cStor](https://github.com/openebs/cstor-operators/blob/master/docs/quick.md)
-- [Jiva](https://github.com/openebs/jiva-operator)
+Installing OpenEBS in your cluster is as simple as running a few `kubectl` or `helm` commands. Refer to our [Quickstart guide](../quickstart-guide/quickstart.md) for more information.
## Community Support via Slack
-OpenEBS has a vibrant community that can help you get started. If you have further question and want to learn more about OpenEBS, please join [OpenEBS community on Kubernetes Slack](https://kubernetes.slack.com). If you are already signed up, head to our discussions at[#openebs](https://kubernetes.slack.com/messages/openebs/) channel.
+OpenEBS has a vibrant community that can help you get started. If you have further questions and want to learn more about OpenEBS, join [OpenEBS community on Kubernetes Slack](https://kubernetes.slack.com). If you are already signed up, head to our discussions at[#openebs](https://kubernetes.slack.com/messages/openebs/) channel.
## See Also:
-- [Quickstart](/docs/user-guides/quickstart)
-- [Use cases and Examples](/docs/introduction/usecases)
-- [Container Attached Storage (CAS)](/docs/concepts/cas)
-- [OpenEBS Architecture](/docs/concepts/architecture)
-- [OpenEBS Local PV](/docs/concepts/localpv)
-- [OpenEBS Mayastor](/docs/concepts/mayastor)
+- [Quickstart](../quickstart-guide/quickstart.md)
+- [Use Cases and Examples](use-cases-and-examples.mdx)
+- [Container Native Storage (CNS)](../concepts/container-native-storage.md)
+- [OpenEBS Architecture](../concepts/architecture.md)
+- [OpenEBS Local Engine](../concepts/data-engines/local-engine.md)
+- [OpenEBS Replicated Engine](../concepts/data-engines/replicated-engine.md)
diff --git a/docs/main/introduction-to-openebs/use-cases-and-examples.mdx b/docs/main/introduction-to-openebs/use-cases-and-examples.mdx
index 81f79c71d..fe5945c4a 100644
--- a/docs/main/introduction-to-openebs/use-cases-and-examples.mdx
+++ b/docs/main/introduction-to-openebs/use-cases-and-examples.mdx
@@ -24,13 +24,13 @@ Following are a few examples of how OpenEBS is being used:
### Self Managed Database Service like RDS
-As per the [CNCF Database Technology Radar report](https://radar.cncf.io/2020-11-database-storage), many companies working with sensitive data are more likely to host databases in-house and may even be required to. Also, if a company has a large amount of data, for instance, there can be significant cost overhead to using a managed database solution available from cloud providers. Additionally the data mesh pattern is leading to a proliferation of small self managed DBs.
+As per the [CNCF Database Technology Radar report](https://radar.cncf.io/2020-11-database-storage), many companies working with sensitive data are more likely to host databases in-house and may even be required to. Also, if a company has a large amount of data, for instance, there can be significant cost overhead to using a managed database solution available from cloud providers. Additionally the data mesh pattern is leading to a proliferation of small self managed DBs.
OpenEBS through its simplicity in setup and configuration and built on the resilience of Kubernetes orchestration can be used to easily setup a managed database service. Using OpenEBS you get the benefits of:
- Fast local storage for cloud native databases
- Synchronously replicated storage for protecting against node or AZ failures if needed
-- Enterprise storage features like Thin provisioning, Storage Expansion, Data Protection and more.
+- Enterprise storage features like Thin Provisioning, Storage Expansion, Data Protection, and more.
Examples:
@@ -74,9 +74,9 @@ Examples:
]}
/>
-### Open source durable storage for Observability stack
+### Open Source Durable Storage for Observability Stack
-Open standards such as OpenMetrics and OpenTelemetry and open source tools like Prometheus, Grafana, Elastic have become widely adopted projects to run the [Cloud Native Observability Stack](https://radar.cncf.io/2020-09-observability). It shouldn't come as any surprise that OpenEBS, being a prominent cloud native open source technology, is often the choice for running these open source observability stacks.
+Open standards such as OpenMetrics and OpenTelemetry and open source tools like Prometheus, Grafana, Elastic have become widely adopted projects to run the [Cloud Native Observability Stack](https://radar.cncf.io/2020-09-observability). It should not come as any surprise that OpenEBS, being a prominent cloud native open source technology, is often the choice for running these open source observability stacks.
Examples:
@@ -97,7 +97,7 @@ Examples:
### Running CI/CD on Kubernetes
-Jenkins, Gitlab, Gerrit, Sonarqube and many of the tools built in-house are moving towards Kubernetes for better DevOps experience and agility. With Kubernetes becoming a standard to run the applications, the CI/CD tools that manage them are following suite, with many provides now providing Kubernetes Operators.
+Jenkins, Gitlab, Gerrit, Sonarqube, and many of the tools built in-house are moving towards Kubernetes for better DevOps experience and agility. With Kubernetes becoming a standard to run the applications, the CI/CD tools that manage them are following suite, with many provides now providing Kubernetes Operators.
Examples:
@@ -111,9 +111,9 @@ Examples:
]}
/>
-### Self managed Object storage service
+### Self-managed Object Storage Service
-Use OpenEBS and Minio on Kubernetes to build cross AZ cloud native object storage solution. Kubernetes PVCs are used by Minio to seamlessly scale Minio nodes. OpenEBS provides easily scalable and manageable storage pools including localPV. Scalability of Minio is directly complimented by OpenEBS's feature of cloud-native scalable architecture.
+Use OpenEBS and MinIO on Kubernetes to build cross AZ cloud native object storage solution. Kubernetes PVCs are used by MinIO to seamlessly scale MinIO nodes. OpenEBS provides easily scalable and manageable storage pools including local engine. Scalability of MinIO is directly complimented by OpenEBS's feature of cloud-native scalable architecture.
Examples:
@@ -127,7 +127,7 @@ Examples:
]}
/>
-### Building scalable websites and ML pipelines
+### Building Scalable Websites and ML Pipelines
Many applications such as WordPress require shared storage with RWM access mode as do many ML pipelines. OpenEBS acting as a persistent storage backend for NFS solves this need very well.
@@ -145,8 +145,8 @@ Examples:
## See Also:
-- [Use cases and Examples](/docs/introduction/usecases)
-- [OpenEBS Benefits](/docs/introduction/benefits)
-- [OpenEBS Architecture](/docs/concepts/architecture)
-- [OpenEBS Local PV](/docs/concepts/localpv)
-- [OpenEBS Mayastor](/docs/concepts/mayastor)
+- [Use Cases and Examples](use-cases-and-examples.mdx)
+- [OpenEBS Benefits](benefits.mdx)
+- [OpenEBS Architecture](../concepts/architecture.md)
+- [OpenEBS Local Engine](../concepts/data-engines/local-engine.md)
+- [OpenEBS Replicated Engine](../concepts/data-engines/replicated-engine.md)
diff --git a/docs/main/quickstart-guide/deploy-a-test-application.md b/docs/main/quickstart-guide/deploy-a-test-application.md
index 0c634d9f5..1f6ceb57a 100644
--- a/docs/main/quickstart-guide/deploy-a-test-application.md
+++ b/docs/main/quickstart-guide/deploy-a-test-application.md
@@ -1,256 +1,202 @@
---
id: deployment
-title: Deploy a Test Application
+title: Deploy an Application
keywords:
- Deploy
- Deployment
- - Deploy a Test Application
-description: This guide will help you to deploy a test application
+ - Deploy an Application
+description: This section will help you to deploy a test application.
---
-# Deploy a Test Application
-
-## Objective
-
-If all verification steps in the preceding stages were satisfied, then Mayastor has been successfully deployed within the cluster. In order to verify basic functionality, we will now dynamically provision a Persistent Volume based on a Mayastor StorageClass, mount that volume within a small test pod which we'll create, and use the [**Flexible I/O Tester**](https://github.com/axboe/fio) utility within that pod to check that I/O to the volume is processed correctly.
-
-## Define the PVC
-
-Use `kubectl` to create a PVC based on a StorageClass that you created in the [previous stage](installation.md#create-mayastor-storageclass-s). In the example shown below, we'll consider that StorageClass to have been named "mayastor-1". Replace the value of the field "storageClassName" with the name of your own Mayastor-based StorageClass.
-
-For the purposes of this quickstart guide, it is suggested to name the PVC "ms-volume-claim", as this is what will be illustrated in the example steps which follow.
-
-{% tabs %}
-{% tab title="Command" %}
-```text
-cat <=64=0.0%
- submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0%
- complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.1%, 32=0.0%, 64=0.0%, >=64=0.0%
- issued rwts: total=40801,40696,0,0 short=0,0,0,0 dropped=0,0,0,0
- latency : target=0, window=0, percentile=100.00%, depth=16
-
-Run status group 0 (all jobs):
- READ: bw=2720KiB/s (2785kB/s), 2720KiB/s-2720KiB/s (2785kB/s-2785kB/s), io=159MiB (167MB), run=60011-60011msec
- WRITE: bw=2713KiB/s (2778kB/s), 2713KiB/s-2713KiB/s (2778kB/s-2778kB/s), io=159MiB (167MB), run=60011-60011msec
-
-Disk stats (read/write):
- sdd: ios=40795/40692, merge=0/9, ticks=375308/568708, in_queue=891648, util=99.53%
-```
-{% endtab %}
-{% endtabs %}
-
-If no errors are reported in the output then Mayastor has been correctly configured and is operating as expected. You may create and consume additional Persistent Volumes with your own test applications.
+:::info
+- See [LVM Local PV User Guide](../user-guides/local-engine-user-guide/lvm-localpv.md) to deploy LVM Local PV.
+- See [ZFS Local PV User Guide](../user-guides/local-engine-user-guide/zfs-localpv.md) to deploy ZFS Local PV.
+- See [Replicated Engine Deployment](../user-guides/replicated-engine-user-guide/replicated-engine-deployment.md) to deploy Replicated Engine.
+:::
+
+# Deploy an Application
+
+This section will help you to deploy an application.
+
+## Create a PersistentVolumeClaim
+
+The next step is to create a PersistentVolumeClaim. Pods will use PersistentVolumeClaims to request Hostpath Local PV from *OpenEBS Dynamic Local PV provisioner*.
+
+1. Here is the configuration file for the PersistentVolumeClaim. Save the following PersistentVolumeClaim definition as `local-hostpath-pvc.yaml`
+
+ ```
+ kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: local-hostpath-pvc
+ spec:
+ storageClassName: openebs-hostpath
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5G
+ ```
+
+2. Create the PersistentVolumeClaim
+
+ ```
+ kubectl apply -f local-hostpath-pvc.yaml
+ ```
+
+3. Look at the PersistentVolumeClaim:
+
+ ```
+ kubectl get pvc local-hostpath-pvc
+ ```
+
+ The output shows that the `STATUS` is `Pending`. This means PVC has not yet been used by an application pod. The next step is to create a Pod that uses your PersistentVolumeClaim as a volume.
+
+ ```shell hideCopy
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ local-hostpath-pvc Pending openebs-hostpath 3m7s
+ ```
+
+## Create Pod to Consume OpenEBS Local PV Hostpath Storage
+
+1. Here is the configuration file for the Pod that uses Local PV. Save the following Pod definition to `local-hostpath-pod.yaml`.
+
+ ```
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: hello-local-hostpath-pod
+ spec:
+ volumes:
+ - name: local-storage
+ persistentVolumeClaim:
+ claimName: local-hostpath-pvc
+ containers:
+ - name: hello-container
+ image: busybox
+ command:
+ - sh
+ - -c
+ - 'while true; do echo "`date` [`hostname`] Hello from OpenEBS Local PV." >> /mnt/store/greet.txt; sleep $(($RANDOM % 5 + 300)); done'
+ volumeMounts:
+ - mountPath: /mnt/store
+ name: local-storage
+ ```
+
+ :::note
+ As the Local PV storage classes use `waitForFirstConsumer`, do not use `nodeName` in the Pod spec to specify node affinity. If `nodeName` is used in the Pod spec, then PVC will remain in `pending` state. For more details refer https://github.com/openebs/openebs/issues/2915.
+ :::
+
+2. Create the Pod:
+
+ ```
+ kubectl apply -f local-hostpath-pod.yaml
+ ```
+
+3. Verify that the container in the Pod is running.
+
+ ```
+ kubectl get pod hello-local-hostpath-pod
+ ```
+4. Verify that the data is being written to the volume.
+
+ ```
+ kubectl exec hello-local-hostpath-pod -- cat /mnt/store/greet.txt
+ ```
+
+5. Verify that the container is using the Local PV Hostpath.
+ ```
+ kubectl describe pod hello-local-hostpath-pod
+ ```
+
+ The output shows that the Pod is running on `Node: gke-user-helm-default-pool-3a63aff5-1tmf` and using the persistent volume provided by `local-hostpath-pvc`.
+
+ ```shell hideCopy
+ Name: hello-local-hostpath-pod
+ Namespace: default
+ Priority: 0
+ Node: gke-user-helm-default-pool-3a63aff5-1tmf/10.128.0.28
+ Start Time: Thu, 16 Apr 2020 17:56:04 +0000
+ ...
+ Volumes:
+ local-storage:
+ Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
+ ClaimName: local-hostpath-pvc
+ ReadOnly: false
+ ...
+ ```
+
+6. Look at the PersistentVolumeClaim again to see the details about the dynamically provisioned Local PersistentVolume
+ ```
+ kubectl get pvc local-hostpath-pvc
+ ```
+
+ The output shows that the `STATUS` is `Bound`. A new Persistent Volume `pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425` has been created.
+
+ ```shell hideCopy
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ local-hostpath-pvc Bound pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425 5G RWO openebs-hostpath 28m
+ ```
+
+7. Look at the PersistentVolume details to see where the data is stored. Replace the PVC name with the one that was displayed in the previous step.
+ ```
+ kubectl get pv pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425 -o yaml
+ ```
+ The output shows that the PV was provisioned in response to PVC request `spec.claimRef.name: local-hostpath-pvc`.
+
+ ```shell hideCopy
+ apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425
+ annotations:
+ pv.kubernetes.io/provisioned-by: openebs.io/local
+ ...
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ capacity:
+ storage: 5G
+ claimRef:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ name: local-hostpath-pvc
+ namespace: default
+ resourceVersion: "291148"
+ uid: 864a5ac8-dd3f-416b-9f4b-ffd7d285b425
+ ...
+ ...
+ local:
+ fsType: ""
+ path: /var/openebs/local/pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ - gke-user-helm-default-pool-3a63aff5-1tmf
+ persistentVolumeReclaimPolicy: Delete
+ storageClassName: openebs-hostpath
+ volumeMode: Filesystem
+ status:
+ phase: Bound
+ ```
+
+
+:::note
+A few important characteristics of a *OpenEBS Local PV* can be seen from the above output:
+- `spec.nodeAffinity` specifies the Kubernetes node where the Pod using the Hostpath volume is scheduled.
+- `spec.local.path` specifies the unique subdirectory under the `BasePath (/var/local/openebs)` defined in corresponding StorageClass.
+:::
+
+## See Also
+
+[Installation](../../quickstart-guide/installation.md)
+
+[Local PV Hostpath](../user-guides/local-engine-user-guide/localpv-hostpath.md)
+
+[LVM Local PV](../user-guides/local-engine-user-guide/lvm-localpv.md)
+
+[ZFS Local PV](../user-guides/local-engine-user-guide/zfs-localpv.md)
\ No newline at end of file
diff --git a/docs/main/quickstart-guide/installation.md b/docs/main/quickstart-guide/installation.md
index 180788eaa..499fff4ff 100644
--- a/docs/main/quickstart-guide/installation.md
+++ b/docs/main/quickstart-guide/installation.md
@@ -4,7 +4,6 @@ title: Installing OpenEBS
keywords:
- Installing OpenEBS
- Installing OpenEBS through helm
- - Installing OpenEBS through kubectl
description: This guide will help you to customize and install OpenEBS
---
@@ -12,582 +11,169 @@ This guide will help you to customize and install OpenEBS.
## Prerequisites
-If this is your first time installing OpenEBS, make sure that your Kubernetes nodes meet the [required prerequisites](/user-guides/prerequisites). At a high level OpenEBS requires:
+If this is your first time installing OpenEBS Local Engine, make sure that your Kubernetes nodes meet the [required prerequisites](../user-guides/local-engine-user-guide/prerequisites.mdx).
-- Verify that you have the admin context. If you do not have admin permissions to your cluster, please check with your Kubernetes cluster administrator to help with installing OpenEBS or if you are the owner of the cluster, check out the [steps to create a new admin context](#set-cluster-admin-user-context) and use it for installing OpenEBS.
+For OpenEBS Replicated Engine, make sure that your Kubernetes nodes meet the [required prerequisites](../user-guides/replicated-engine-user-guide/prerequisites.md).
+
+At a high level OpenEBS requires:
+
+- Verify that you have the admin context. If you do not have admin permissions to your cluster, check with your Kubernetes cluster administrator to help with installing OpenEBS or if you are the owner of the cluster, check out the [steps to create a new admin context](#set-cluster-admin-user-context) and use it for installing OpenEBS.
- You have Kubernetes 1.18 version or higher.
- Each storage engine may have few additional requirements like having:
- - iSCSI initiator utils installed for Jiva and cStor volumes
- Depending on the managed Kubernetes platform like Rancher or MicroK8s - set up the right bind mounts
- Decide which of the devices on the nodes should be used by OpenEBS or if you need to create LVM Volume Groups or ZFS Pools
-- Join [OpenEBS community on Kubernetes slack](/introduction/commercial).
-
-## Installation through helm
+- Join [OpenEBS community on Kubernetes slack](../community.md).
-:::note
-With OpenEBS v3.4, the OpenEBS helm chart now supports installation of Mayastor v2.0 storage engine.
-:::
+## Installation via Helm
Verify helm is installed and helm repo is updated. You need helm 3.2 or more.
Setup helm repository
```
-helm repo add openebs https://openebs.github.io/charts
+helm repo add openebs https://openebs.github.io/openebs
helm repo update
```
OpenEBS provides several options that you can customize during install like:
- specifying the directory where hostpath volume data is stored or
-- specifying the nodes on which OpenEBS components should be deployed, and so forth.
+- specifying the nodes on which OpenEBS components should be deployed and so forth.
-The default OpenEBS helm chart will only install Local PV hostpath and Jiva data engines. Please refer to [OpenEBS helm chart documentation](https://github.com/openebs/charts/tree/master/charts/openebs) for full list of customizable options and using cStor and other flavors of OpenEBS data engines by setting the correct helm values.
+The default OpenEBS helm chart will install both local engines and replicated engine. Refer to [OpenEBS helm chart documentation](https://github.com/openebs/charts/tree/master/charts/openebs) for full list of customizable options and using other flavors of OpenEBS data engines by setting the correct helm values.
Install OpenEBS helm chart with default values.
```
helm install openebs --namespace openebs openebs/openebs --create-namespace
```
-The above commands will install OpenEBS Jiva and Local PV components in `openebs` namespace and chart name as `openebs`. To install and enable other engines you can modified the above command as follows:
-
-- cStor
- ```
- helm install openebs --namespace openebs openebs/openebs --create-namespace --set cstor.enabled=true
- ```
-
-To view the chart
-```
-helm ls -n openebs
-```
-
-As a next step [verify](#verifying-openebs-installation) your installation and do the [post installation](#post-installation-considerations) steps.
-
-## Installation through kubectl
-
-OpenEBS provides a list of YAMLs that will allow you to easily customize and run OpenEBS in your Kubernetes cluster. For custom installation, [download](https://openebs.github.io/charts/openebs-operator.yaml) the **openebs-operator** YAML file, update the configurations and use the customized YAML for installation in the below `kubectl` command.
-
-To continue with default installation mode, use the following command to install OpenEBS. OpenEBS is installed in `openebs` namespace.
-
-```
-kubectl apply -f https://openebs.github.io/charts/openebs-operator.yaml
-```
-
-The above command installs Jiva and Local PV components. To install and enable other engines you will need to run additional command like:
-- cStor
- ```
- kubectl apply -f https://openebs.github.io/charts/cstor-operator.yaml
- ```
-- Local PV ZFS
- ```
- kubectl apply -f https://openebs.github.io/charts/zfs-operator.yaml
- ```
-- Local PV LVM
- ```
- kubectl apply -f https://openebs.github.io/charts/lvm-operator.yaml
- ```
-
-## Verifying OpenEBS installation
-
-
-**Verify pods:**
-
-List the pods in `` namespace
-
-```
- kubectl get pods -n openebs
-```
-
-In the successful installation of OpenEBS, you should see an example output like below.
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE
-maya-apiserver-d77867956-mv9ls 1/1 Running 3 99s
-openebs-admission-server-7f565bcbb5-lp5sk 1/1 Running 0 95s
-openebs-localpv-provisioner-7bb98f549d-ljcc5 1/1 Running 0 94s
-openebs-ndm-dn422 1/1 Running 0 96s
-openebs-ndm-operator-84849677b7-rhfbk 1/1 Running 1 95s
-openebs-ndm-ptxss 1/1 Running 0 96s
-openebs-ndm-zpr2l 1/1 Running 0 96s
-openebs-provisioner-657486f6ff-pxdbc 1/1 Running 0 98s
-openebs-snapshot-operator-5bdcdc9b77-v7n4w 2/2 Running 0 97s
-```
-
-`openebs-ndm` is a daemon set, it should be running on all nodes or on the nodes that are selected through nodeSelector configuration.
-
-The control plane pods `openebs-provisioner`, `maya-apiserver` and `openebs-snapshot-operator` should be running. If you have configured nodeSelectors , check if they are scheduled on the appropriate nodes by listing the pods through `kubectl get pods -n openebs -o wide`
-
-**Verify StorageClasses:**
-
-List the storage classes to check if OpenEBS has installed with default StorageClasses.
-
-```
-kubectl get sc
-```
-
-In the successful installation, you should have the following StorageClasses are created.
-
-```shell hideCopy
-NAME PROVISIONER AGE
-openebs-device openebs.io/local 64s
-openebs-hostpath openebs.io/local 64s
-openebs-jiva-default openebs.io/provisioner-iscsi 64s
-openebs-snapshot-promoter volumesnapshot.external-storage.k8s.io/snapshot-promoter 64s
-```
-
-## Post-Installation considerations
-
-For testing your OpenEBS installation, you can use the below default storage classes
-
-- `openebs-jiva-default` for provisioning Jiva Volume (this uses `default` pool which means the data replicas are created in the /var/openebs/ directory of the Jiva replica pod)
-
-- `openebs-hostpath` for provisioning Local PV on hostpath.
-
-You can follow through the below user guides for each of the engines to use storage devices available on the nodes instead of the `/var/openebs` directory to save the data.
-- [cStor](/user-guides/cstor-csi)
-- [Jiva](/user-guides/jiva-guide)
-- [Local PV](/user-guides/localpv-hostpath)
-## Troubleshooting
+The above commands will install OpenEBS LocalPV Hostpath, OpenEBS LocalPV LVM, OpenEBS LocalPV ZFS, and OpenEBS Replicated Engine components in `openebs` namespace and chart name as `openebs`.
-### Set cluster-admin user context
-
-For installation of OpenEBS, cluster-admin user context is a must. OpenEBS installs service accounts and custom resource definitions that are only allowed for cluster administrators.
-
-Use the `kubectl auth can-i` commands to verify that you have the cluster-admin context. You can use the following commands to verify if you have access:
-
-```
-kubectl auth can-i 'create' 'namespace' -A
-kubectl auth can-i 'create' 'crd' -A
-kubectl auth can-i 'create' 'sa' -A
-kubectl auth can-i 'create' 'clusterrole' -A
-```
-
-If there is no cluster-admin user context already present, create one and use it. Use the following command to create the new context.
-
-```
-kubectl config set-context NAME [--cluster=cluster_nickname] [--user=user_nickname] [--namespace=namespace]
-```
-
-Example:
-
-```
-kubectl config set-context admin-ctx --cluster=gke_strong-eon-153112_us-central1-a_rocket-test2 --user=cluster-admin
-```
-
-Set the existing cluster-admin user context or the newly created context by using the following command.
-
-Example:
+:::note
+If you do not want to install OpenEBS Replicated Engine, use the following command:
```
-kubectl config use-context admin-ctx
+helm install openebs --namespace openebs openebs/openebs --set mayastor.enabled=false --create-namespace
```
-## See Also:
-
-[OpenEBS Architecture](/concepts/architecture) [OpenEBS Examples](/introduction/usecases) [Troubleshooting](/troubleshooting)
-
-(Below listed are the Mayastor Contents - For Internal reference)
-# Scope
-
-This quickstart guide describes the actions necessary to perform a basic installation of Mayastor on an existing Kubernetes cluster, sufficient for evaluation purposes. It assumes that the target cluster will pull the Mayastor container images directly from OpenEBS public container repositories. Where preferred, it is also possible to [build Mayastor locally from source](https://github.com/openebs/Mayastor/blob/develop/doc/build.md) and deploy the resultant images but this is outside of the scope of this guide.
-
-Deploying and operating Mayastor in production contexts requires a foundational knowledge of Mayastor internals and best practices, found elsewhere within this documentation.
-
-# Preparing the Cluster
-
-### Verify / Enable Huge Page Support
-
-_2MiB-sized_ Huge Pages must be supported and enabled on the mayastor storage nodes. A minimum number of 1024 such pages \(i.e. 2GiB total\) must be available _exclusively_ to the Mayastor pod on each node, which should be verified thus:
-
-```text
-grep HugePages /proc/meminfo
-
-AnonHugePages: 0 kB
-ShmemHugePages: 0 kB
-HugePages_Total: 1024
-HugePages_Free: 671
-HugePages_Rsvd: 0
-HugePages_Surp: 0
+To view the chart and get the output, use the following commands:
+**Command**
```
-
-If fewer than 1024 pages are available then the page count should be reconfigured on the worker node as required, accounting for any other workloads which may be scheduled on the same node and which also require them. For example:
-
-```text
-echo 1024 | sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+helm ls -n openebs
```
-
-This change should also be made persistent across reboots by adding the required value to the file`/etc/sysctl.conf` like so:
-
-```text
-echo vm.nr_hugepages = 1024 | sudo tee -a /etc/sysctl.conf
+**Output**
```
-
-{% hint style="warning" %}
-If you modify the Huge Page configuration of a node, you _MUST_ either restart kubelet or reboot the node. Mayastor will not deploy correctly if the available Huge Page count as reported by the node's kubelet instance does not satisfy the minimum requirements.
-{% endhint %}
-
-### Label Mayastor Node Candidates
-
-All worker nodes which will have Mayastor pods running on them must be labelled with the OpenEBS engine type "mayastor". This label will be used as a node selector by the Mayastor Daemonset, which is deployed as a part of the Mayastor data plane components installation. To add this label to a node, execute:
-
-```text
-kubectl label node openebs.io/engine=mayastor
+NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
+openebs openebs 1 2024-03-25 09:13:00.903321318 +0000 UTC deployed openebs-4.0.0 4.0.0
```
+:::
-# Deploy Mayastor
-
-## Overview
-
-{% hint style="note" %}
-Before deploying and using Mayastor please consult the [Known Issues](../troubleshooting/troubleshooting-replicated-engine.md#known-issues) section of this guide.
-{% endhint %}
-
-The steps and commands which follow are intended only for use in conjunction with Mayastor version(s) 2.1.x and above.
+As a next step [verify](#verifying-openebs-installation) your installation and do the[post installation](#post-installation-considerations) steps.
-## Installation via helm
+## Verifying OpenEBS Installation
-{% hint style="info" %}
-The Mayastor Helm chart now includes the Dynamic Local Persistent Volume (LocalPV) provisioner as the default option for provisioning storage to etcd and Loki. This simplifies storage setup by utilizing local volumes within your Kubernetes cluster.
-For etcd, the chart uses the `mayastor-etcd-localpv` storage class, and for Loki, it utilizes the `mayastor-loki-localpv` storage class. These storage classes are bundled with the Mayastor chart, ensuring that your etcd and Loki instances are configured to use openEbs localPV volumes efficiently.
-`/var/local/{{ .Release.Name }}` paths should be persistent accross reboots.
-{% endhint %}
+### Verify Pods
-1. Add the OpenEBS Mayastor Helm repository.
-{% tabs %}
-{% tab title="Command" %}
-```text
-helm repo add mayastor https://openebs.github.io/mayastor-extensions/
-```
-{% endtab %}
-{% tab title="Output" %}
-```text
-"mayastor" has been added to your repositories
-```
-{% endtab %}
-{% endtabs %}
+#### Default Installation
+List the pods in `` namespace
-Run the following command to discover all the _stable versions_ of the added chart repository:
-
-{% tabs %}
-{% tab title="Command" %}
-```text
-helm search repo mayastor --versions
```
-{% endtab %}
-{% tab title="Sample Output" %}
-```text
- NAME CHART VERSION APP VERSION DESCRIPTION
-mayastor/mayastor 2.4.0 2.4.0 Mayastor Helm chart for Kubernetes
+ kubectl get pods -n openebs
```
-{% endtab %}
-{% endtabs %}
-
-{% hint style="info" %}
-To discover all the versions (including unstable versions), execute:
-`helm search repo mayastor --devel --versions`
-{% endhint %}
+In the successful installation of OpenEBS, you should see an example output like below:
+
+```
+NAME READY STATUS RESTARTS AGE
+openebs-agent-core-674f784df5-7szbm 2/2 Running 0 11m
+openebs-agent-ha-node-nnkmv 1/1 Running 0 11m
+openebs-agent-ha-node-pvcrr 1/1 Running 0 11m
+openebs-agent-ha-node-rqkkk 1/1 Running 0 11m
+openebs-api-rest-79556897c8-b824j 1/1 Running 0 11m
+openebs-csi-controller-b5c47d49-5t5zd 6/6 Running 0 11m
+openebs-csi-node-flq49 2/2 Running 0 11m
+openebs-csi-node-k8d7h 2/2 Running 0 11m
+openebs-csi-node-v7jfh 2/2 Running 0 11m
+openebs-etcd-0 1/1 Running 0 11m
+openebs-etcd-1 1/1 Running 0 11m
+openebs-etcd-2 1/1 Running 0 11m
+openebs-io-engine-7t6tf 2/2 Running 0 11m
+openebs-io-engine-9df6r 2/2 Running 0 11m
+openebs-io-engine-rqph4 2/2 Running 0 11m
+openebs-localpv-provisioner-6ddf7c7978-4fkvs 1/1 Running 0 11m
+openebs-loki-0 1/1 Running 0 11m
+openebs-lvm-localpv-controller-7b6d6b4665-fk78q 5/5 Running 0 11m
+openebs-lvm-localpv-node-mcch4 2/2 Running 0 11m
+openebs-lvm-localpv-node-pdt88 2/2 Running 0 11m
+openebs-lvm-localpv-node-r9jn2 2/2 Running 0 11m
+openebs-nats-0 3/3 Running 0 11m
+openebs-nats-1 3/3 Running 0 11m
+openebs-nats-2 3/3 Running 0 11m
+openebs-obs-callhome-854bc967-5f879 2/2 Running 0 11m
+openebs-operator-diskpool-5586b65c-cwpr8 1/1 Running 0 11m
+openebs-promtail-2vrzk 1/1 Running 0 11m
+openebs-promtail-mwxk8 1/1 Running 0 11m
+openebs-promtail-w7b8k 1/1 Running 0 11m
+openebs-zfs-localpv-controller-f78f7467c-blr7q 5/5 Running 0 11m
+openebs-zfs-localpv-node-h46m5 2/2 Running 0 11m
+openebs-zfs-localpv-node-svfgq 2/2 Running 0 11m
+openebs-zfs-localpv-node-wm9ks 2/2 Running 0 11m
+```
+
+#### Installation with Replicated Engine Disabled
-3. Run the following command to install Mayastor _version 2.4.
-{% tabs %}
-{% tab title="Command" %}
-```text
-helm install mayastor mayastor/mayastor -n mayastor --create-namespace --version 2.4.0
-```
-{% endtab %}
-{% tab title="Sample Output" %}
-```text
-NAME: mayastor
-LAST DEPLOYED: Thu Sep 22 18:59:56 2022
-NAMESPACE: mayastor
-STATUS: deployed
-REVISION: 1
-NOTES:
-OpenEBS Mayastor has been installed. Check its status by running:
-$ kubectl get pods -n mayastor
-
-For more information or to view the documentation, visit our website at https://openebs.io.
-```
-{% endtab %}
-{% endtabs %}
-
-Verify the status of the pods by running the command:
+List the pods in `` namespace
-{% tabs %}
-{% tab title="Command" %}
-```text
-kubectl get pods -n mayastor
```
-{% endtab %}
-{% tab title="Sample Output for a three Mayastor node cluster" %}
-```text
-NAME READY STATUS RESTARTS AGE
-mayastor-agent-core-6c485944f5-c65q6 2/2 Running 0 2m13s
-mayastor-agent-ha-node-42tnm 1/1 Running 0 2m14s
-mayastor-agent-ha-node-45srp 1/1 Running 0 2m14s
-mayastor-agent-ha-node-tzz9x 1/1 Running 0 2m14s
-mayastor-api-rest-5c79485686-7qg5p 1/1 Running 0 2m13s
-mayastor-csi-controller-65d6bc946-ldnfb 3/3 Running 0 2m13s
-mayastor-csi-node-f4fgd 2/2 Running 0 2m13s
-mayastor-csi-node-ls9m4 2/2 Running 0 2m13s
-mayastor-csi-node-xtcfc 2/2 Running 0 2m13s
-mayastor-etcd-0 1/1 Running 0 2m13s
-mayastor-etcd-1 1/1 Running 0 2m13s
-mayastor-etcd-2 1/1 Running 0 2m13s
-mayastor-io-engine-f2wm6 2/2 Running 0 2m13s
-mayastor-io-engine-kqxs9 2/2 Running 0 2m13s
-mayastor-io-engine-m44ms 2/2 Running 0 2m13s
-mayastor-loki-0 1/1 Running 0 2m13s
-mayastor-obs-callhome-5f47c6d78b-fzzd7 1/1 Running 0 2m13s
-mayastor-operator-diskpool-b64b9b7bb-vrjl6 1/1 Running 0 2m13s
-mayastor-promtail-cglxr 1/1 Running 0 2m14s
-mayastor-promtail-jc2mz 1/1 Running 0 2m14s
-mayastor-promtail-mr8nf 1/1 Running 0 2m14s
+ kubectl get pods -n openebs
```
-{% endtab %}
-{% endtabs %}
-
-# Configure Mayastor
-
-## Create DiskPool\(s\)
-
-
-### What is a DiskPool?
-
-When a node allocates storage capacity for a replica of a persistent volume (PV) it does so from a DiskPool. Each node may create and manage zero, one, or more such pools. The ownership of a pool by a node is exclusive. A pool can manage only one block device, which constitutes the entire data persistence layer for that pool and thus defines its maximum storage capacity.
-
-A pool is defined declaratively, through the creation of a corresponding `DiskPool` custom resource on the cluster. The DiskPool must be created in the same namespace where Mayastor has been deployed. User configurable parameters of this resource type include a unique name for the pool, the node name on which it will be hosted and a reference to a disk device which is accessible from that node. The pool definition requires the reference to its member block device to adhere to a discrete range of schemas, each associated with a specific access mechanism/transport/ or device type.
-{% hint style="info" %}
-Mayastor versions before 2.0.1 had an upper limit on the number of retry attempts in the case of failure in `create events` in the DSP operator. With this release, the upper limit has been removed, which ensures that the DiskPool operator indefinitely reconciles with the CR.
-{% endhint %}
-
-#### Permissible Schemes for `spec.disks` under DiskPool CR
-
-{% hint style="info" %}
-It is highly recommended to specify the disk using a unique device link that remains unaltered across node reboots. Examples of such device links are: by-path or by-id.
-
-Easy way to retrieve device link for a given node:
-`kubectl mayastor get block-devices worker`
+In the successful installation of OpenEBS, you should see an example output like below:
```
-DEVNAME DEVTYPE SIZE AVAILABLE MODEL DEVPATH MAJOR MINOR DEVLINKS
-/dev/nvme0n1 disk 894.3GiB yes Dell DC NVMe PE8010 RI U.2 960GB /devices/pci0000:30/0000:30:02.0/0000:31:00.0/nvme/nvme0/nvme0n1 259 0 "/dev/disk/by-id/nvme-eui.ace42e00164f0290", "/dev/disk/by-path/pci-0000:31:00.0-nvme-1", "/dev/disk/by-dname/nvme0n1", "/dev/disk/by-id/nvme-Dell_DC_NVMe_PE8010_RI_U.2_960GB_SDA9N7266I110A814"
+NAME READY STATUS RESTARTS AGE
+openebs-localpv-provisioner-6ddf7c7978-jsstg 1/1 Running 0 3m9s
+openebs-lvm-localpv-controller-7b6d6b4665-wfw64 5/5 Running 0 3m9s
+openebs-lvm-localpv-node-62lnq 2/2 Running 0 3m9s
+openebs-lvm-localpv-node-lhndx 2/2 Running 0 3m9s
+openebs-lvm-localpv-node-tlcqv 2/2 Running 0 3m9s
+openebs-zfs-localpv-controller-f78f7467c-k7ldb 5/5 Running 0 3m9s
+openebs-zfs-localpv-node-5mwbz 2/2 Running 0 3m9s
+openebs-zfs-localpv-node-g45ft 2/2 Running 0 3m9s
+openebs-zfs-localpv-node-g77g6 2/2 Running 0 3m9s
```
-Usage of the device name (for example, /dev/sdx) is not advised, as it may change if the node reboots, which might cause data corruption.
-{% endhint %}
-
-| Type | Format | Example |
-| :--- | :--- | :--- |
-| Disk(non PCI) with disk-by-guid reference (Best Practice) | Device File | aio:///dev/disk/by-id/ OR uring:///dev/disk/by-id/ |
-| Asynchronous Disk\(AIO\) | Device File | /dev/sdx |
-| Asynchronous Disk I/O \(AIO\) | Device File | aio:///dev/sdx |
-| io\_uring | Device File | uring:///dev/sdx |
-
-
-Once a node has created a pool it is assumed that it henceforth has exclusive use of the associated block device; it should not be partitioned, formatted, or shared with another application or process. Any pre-existing data on the device will be destroyed.
-
-{% hint style="warning" %}
-A RAM drive isn't suitable for use in production as it uses volatile memory for backing the data. The memory for this disk emulation is allocated from the hugepages pool. Make sure to allocate sufficient additional hugepages resource on any storage nodes which will provide this type of storage.
-{% endhint %}
-
-### Configure Pools for Use with this Quickstart
-
-To get started, it is necessary to create and host at least one pool on one of the nodes in the cluster. The number of pools available limits the extent to which the synchronous N-way mirroring (replication) of PVs can be configured; the number of pools configured should be equal to or greater than the desired maximum replication factor of the PVs to be created. Also, while placing data replicas ensure that appropriate redundancy is provided. Mayastor's control plane will avoid placing more than one replica of a volume on the same node. For example, the minimum viable configuration for a Mayastor deployment which is intended to implement 3-way mirrored PVs must have three nodes, each having one DiskPool, with each of those pools having one unique block device allocated to it.
+### Verify StorageClasses
-Using one or more the following examples as templates, create the required type and number of pools.
+List the storage classes to check if OpenEBS has installed with default StorageClasses.
-{% tabs %}
-{% tab title="Example DiskPool definition" %}
-```text
-cat <"]
-EOF
```
-{% endtab %}
-
-{% tab title="YAML" %}
-```text
-apiVersion: "openebs.io/v1beta1"
-kind: DiskPool
-metadata:
- name: INSERT_POOL_NAME_HERE
- namespace: mayastor
-spec:
- node: INSERT_WORKERNODE_HOSTNAME_HERE
- disks: ["INSERT_DEVICE_URI_HERE"]
+kubectl get sc
```
-{% endtab %}
-{% endtabs %}
-
-{% hint style="info" %}
-When using the examples given as guides to creating your own pools, remember to replace the values for the fields "metadata.name", "spec.node" and "spec.disks" as appropriate to your cluster's intended configuration. Note that whilst the "disks" parameter accepts an array of values, the current version of Mayastor supports only one disk device per pool.
-{% endhint %}
-
-{% hint style="note" %}
-
-Existing schemas in Custom Resource (CR) definitions (in older versions) will be updated from v1alpha1 to v1beta1 after upgrading to Mayastor 2.4 and above. To resolve errors encountered pertaining to the upgrade, click [here](../troubleshooting/troubleshooting-replicated-engine.md)
-{% endhint %}
+In the successful installation, you should have the following StorageClasses are created:
-### Verify Pool Creation and Status
-
-The status of DiskPools may be determined by reference to their cluster CRs. Available, healthy pools should report their State as `online`. Verify that the expected number of pools have been created and that they are online.
-
-{% tabs %}
-{% tab title="Command" %}
-```text
-kubectl get dsp -n mayastor
-```
-{% endtab %}
-
-{% tab title="Example Output" %}
-```text
-NAME NODE STATE POOL_STATUS CAPACITY USED AVAILABLE
-pool-on-node-1 node-1-14944 Created Online 10724835328 0 10724835328
-pool-on-node-2 node-2-14944 Created Online 10724835328 0 10724835328
-pool-on-node-3 node-3-14944 Created Online 10724835328 0 10724835328
```
-{% endtab %}
-{% endtabs %}
-
-
-----------
-
-
-## Create Mayastor StorageClass\(s\)
-
-Mayastor dynamically provisions PersistentVolumes \(PVs\) based on StorageClass definitions created by the user. Parameters of the definition are used to set the characteristics and behaviour of its associated PVs. For a detailed description of these parameters see [storage class parameter description](https://mayastor.gitbook.io/introduction/reference/storage-class-parameters). Most importantly StorageClass definition is used to control the level of data protection afforded to it \(that is, the number of synchronous data replicas which are maintained, for purposes of redundancy\). It is possible to create any number of StorageClass definitions, spanning all permitted parameter permutations.
-
-We illustrate this quickstart guide with two examples of possible use cases; one which offers no data redundancy \(i.e. a single data replica\), and another having three data replicas.
-{% hint style="info" %}
-Both the example YAMLs given below have [thin provisioning](https://mayastor.gitbook.io/introduction/quickstart/configure-mayastor/storage-class-parameters#thin) enabled. You can modify these as required to match your own desired test cases, within the limitations of the cluster under test.
-{% endhint %}
-
-{% tabs %}
-{% tab title="Command \(1 replica example\)" %}
-```text
-cat < 10 Gigabytes cannot be created, as Mayastor currently does not support pool expansion.
-3. The replicas for a given volume can be either all thick or all thin. Same volume cannot have a combination of thick and thin replicas
-{% endhint %}
-
-
-## "stsAffinityGroup"
-
- `stsAffinityGroup` represents a collection of volumes that belong to instances of Kubernetes StatefulSet. When a StatefulSet is deployed, each instance within the StatefulSet creates its own individual volume, which collectively forms the `stsAffinityGroup`. Each volume within the `stsAffinityGroup` corresponds to a pod of the StatefulSet.
-
-This feature enforces the following rules to ensure the proper placement and distribution of replicas and targets so that there isn't any single point of failure affecting multiple instances of StatefulSet.
-
-1. Anti-Affinity among single-replica volumes :
- This rule ensures that replicas of different volumes are distributed in such a way that there is no single point of failure. By avoiding the colocation of replicas from different volumes on the same node.
-
-2. Anti-Affinity among multi-replica volumes :
-
-If the affinity group volumes have multiple replicas, they already have some level of redundancy. This feature ensures that in such cases, the replicas are distributed optimally for the stsAffinityGroup volumes.
-
-
-3. Anti-affinity among targets :
-
-The [High Availability](https://mayastor.gitbook.io/introduction/advanced-operations/ha) feature ensures that there is no single point of failure for the targets.
-The `stsAffinityGroup` ensures that in such cases, the targets are distributed optimally for the stsAffinityGroup volumes.
+You can follow through the below user guides for each of the engines to use storage devices available on the nodes instead of the `/var/openebs` directory to save the data.
+- [Local Engine User Guide](../user-guides/local-engine-user-guide/)
+- [Replicated Engine User Guide](../user-guides/replicated-engine-user-guide/)
-By default, the `stsAffinityGroup` feature is disabled. To enable it, modify the storage class YAML by setting the `parameters.stsAffinityGroup` parameter to true.
+## See Also
-## "cloneFsIdAsVolumeId"
+[OpenEBS Architecture](../concepts/architecture.md)
-`cloneFsIdAsVolumeId` is a setting for volume clones/restores with two options: `true` and `false`. By default, it is set to `false`.
-- When set to `true`, the created clone/restore's filesystem `uuid` will be set to the restore volume's `uuid`. This is important because some file systems, like XFS, do not allow duplicate filesystem `uuid` on the same machine by default.
-- When set to `false`, the created clone/restore's filesystem `uuid` will be same as the orignal volume `uuid`, but it will be mounted using the `nouuid` flag to bypass duplicate `uuid` validation.
+[OpenEBS Use Cases and Examples](../introduction-to-openebs/use-cases-and-examples.mdx)
-{% hint style="note" %}
-This option needs to be set to true when using a `btrfs` filesystem, if the application using the restored volume is scheduled on the same node where the original volume is mounted, concurrently.
-{% endhint %}
+[Troubleshooting](../troubleshooting/)
\ No newline at end of file
diff --git a/docs/main/quickstart-guide/quickstart.md b/docs/main/quickstart-guide/quickstart.md
index 3a90a5fa5..92f4a535c 100644
--- a/docs/main/quickstart-guide/quickstart.md
+++ b/docs/main/quickstart-guide/quickstart.md
@@ -15,7 +15,7 @@ description: This guide will help you to setup OpenEBS and use OpenEBS Volumes t
With OpenEBS v3.4, the OpenEBS helm chart now supports installation of Mayastor v2.0 storage engine.
:::
-This guide will help you to setup OpenEBS and use OpenEBS Volumes to run your Kubernetes Stateful Workloads. If you are new to running Stateful workloads in Kubernetes, you will need to familiarize yourself with [Kubernetes Storage Concepts](/concepts/basics).
+This guide will help you to setup OpenEBS and use OpenEBS Volumes to run your Kubernetes Stateful Workloads. If you are new to running Stateful workloads in Kubernetes, you will need to familiarize yourself with [Kubernetes Storage Concepts](../concepts/basics.md).
In most cases, the following steps is all you need to install OpenEBS. You can read through the rest of the document to understand the choices you have and optimize OpenEBS for your Kubernetes cluster.
@@ -27,11 +27,6 @@ In most cases, the following steps is all you need to install OpenEBS. You can r
helm repo update
helm install openebs --namespace openebs openebs/openebs --create-namespace
```
-
- Install using kubectl
- ```
- kubectl apply -f https://openebs.github.io/charts/openebs-operator.yaml
- ```
:::
## How to setup and use OpenEBS?
@@ -44,32 +39,32 @@ The OpenEBS workflow fits nicely into the reconcilation pattern introduced by Ku
### 1. Kubernetes Cluster Design
-As a Kubernetes cluster administrator, you will have to work with your Platform or Infrastructure teams on the composition of the Kubernetes worker nodes like - RAM, CPU, Network and the storage devices attached to the worker nodes. The [resources available to the Kubernetes nodes](/concepts/casengines#node-capabilities) determine what OpenEBS engines to use for your stateful workloads.
+As a Kubernetes cluster administrator, you will have to work with your Platform or Infrastructure teams on the composition of the Kubernetes worker nodes like - RAM, CPU, Network, and the storage devices attached to the worker nodes. The [resources available to the Kubernetes nodes](../concepts/data-engines/data-engines.md#node-capabilities) determine what OpenEBS engines to use for your stateful workloads.
-As a Kubernetes cluster administrator or Platform SREs you will have to decide which deployment strategy works best for you - either use an hyperconverged mode where Stateful applications and storage volumes are co-located or run Stateful applications and storage on different pools of nodes.
+As a Kubernetes cluster administrator or Platform SREs, you will have to decide which deployment strategy works best for you - either use an hyperconverged mode where Stateful applications and storage volumes are co-located or run Stateful applications and storage on different pools of nodes.
For installing OpenEBS, you Kubernetes cluster should meet the following:
- Kubernetes 1.18 or newer is recommended.
- Based on the selected data engine, the nodes should be prepared with additional packages like:
- - Installing the ext4, xfs, nfs, lvm, zfs or iscsi, nvme packages.
+ - Installing the ext4, xfs, nfs, lvm, zfs, nvme packages.
- Prepare the devices for use by data engines like - making sure there are no the filesystem installed or by creating an LVM volume group or ZFS Pool or partition the drives if required.
- Based on whether you are using a upstream Kubernetes cluster or using a managed Kubernetes cluster like AKS, Rancher, OpenShift, GKE, there may be additional steps required.
-Please read through the relevant section of the [pre-requisites](/user-guides/prerequisites) for your Kubernetes platform, Operating System of the worker nodes.
+Read through the relevant section of the [pre-requisites](../user-guides/local-engine-user-guide/prerequisites.mdx) for your Kubernetes platform, Operating System of the worker nodes.
-- [Ubuntu](/user-guides/prerequisites#ubuntu)
-- [RHEL](/user-guides/prerequisites#rhel)
-- [CentOS](/user-guides/prerequisites#centos)
-- [OpenShift](/user-guides/prerequisites#openshift)
-- [Rancher](/user-guides/prerequisites#rancher)
-- [ICP](/user-guides/prerequisites#icp)
-- [EKS](/user-guides/prerequisites#eks)
-- [GKE](/user-guides/prerequisites#gke)
-- [AKS](/user-guides/prerequisites#aks)
-- [Digital Ocean](/user-guides/prerequisites#do)
-- [Konvoy](/user-guides/prerequisites#konvoy)
+- [Ubuntu](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [RHEL](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [CentOS](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [OpenShift](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [Rancher](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [ICP](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [EKS](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [GKE](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [AKS](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [Digital Ocean](../user-guides/local-engine-user-guide/prerequisites.mdx)
+- [Konvoy](../user-guides/local-engine-user-guide/prerequisites.mdx)
-If your platform is missing in the above list, please [raise an issue on the docs](https://github.com/openebs/openebs/issues/new/choose) or reach us on the [community slack](/introduction/community) to let us know.
+If your platform is missing in the above list, [raise an issue on the docs](https://github.com/openebs/openebs/issues/new/choose) or reach us on the [community slack](../community.md) to let us know.
### 2. Install OpenEBS and Setup Storage Classes
@@ -77,41 +72,34 @@ OpenEBS is Kubernetes native, which makes it possible to install OpenEBS into yo
You can install OpenEBS only using Kubernetes admin context as you will require cluster level permissions to create Storage Classes.
-OpenEBS offers different modes of [installation](/user-guides/installation). The most popular ones are using:
-- [OpenEBS Helm chart](/user-guides/installation#installation-through-helm)
-- [OpenEBS YAML(s) via `kubectl`](/user-guides/installation#installation-through-kubectl)
+OpenEBS offers different modes of [installation](../quickstart-guide/installation.md). The most popular ones are using [OpenEBS Helm chart](/user-guides/installation#installation-through-helm).
-OpenEBS will install a couple of default storage classes that you an use for Local Volumes (`openebs-hostpath`) and Replicated Volumes (`openebs-hostpath`). The data of the volumes created by these default storage classes will be saved under `/var/openebs`.
+OpenEBS will install a couple of default storage classes that you an use for Local Volumes (`openebs-hostpath`) and Replicated Volumes (`openebs-hostpath`). The data of the volumes created by these default Storage Classes will be saved under `/var/openebs`.
-As a Platform SRE / Cluster Administrator, you can customize several things about OpenEBS installer to suite your specific environment and create the setup the required Storage Classes. You can jump to the relevant sections based on your choice of [data engines](/docs/concepts/casengines#data-engine-capabilities):
+As a Platform SRE / Cluster Administrator, you can customize several things about OpenEBS installer to suite your specific environment and create the setup the required Storage Classes. You can jump to the relevant sections based on your choice of [data engines](../concepts/data-engines/data-engines.md):
- [Local PV hostpath](/user-guides/localpv-hostpath)
-- [Local PV device](/user-guides/localpv-device)
- [Local PV ZFS](https://github.com/openebs/zfs-localpv)
- [Local PV LVM](https://github.com/openebs/lvm-localpv)
-- [Local PV Rawfile](https://github.com/openebs/rawfile-localpv)
-- [Replicated PV Jiva](https://github.com/openebs/jiva-operator)
-- [Replicated PV cStor](https://github.com/openebs/cstor-operators/blob/master/docs/quick.md)
-- [Replicated PV Mayastor](https://mayastor.gitbook.io/introduction/)
+- [Replicated Engine](../user-guides/replicated-engine-user-guide/)
### 3. Deploy Stateful Workloads
-The application developers will launch their application (stateful workloads) that will in turn create Persistent Volume Claims for requesting the Storage or Volumes for their pods. The Platform teams can provide templates for the applications with associated PVCs or application developers can select from the list of storage classes available for them.
+The application developers will launch their application (stateful workloads) that will in turn create Persistent Volume Claims for requesting the Storage or Volumes for their pods. The Platform teams can provide templates for the applications with associated PVCs or application developers can select from the list of Storage Classes available for them.
As an application developer all you have to do is substitute the `StorageClass` in your PVCs with the OpenEBS Storage Classes available in your Kubernetes cluster.
**Here are examples of some applications using OpenEBS:**
-- [MySQL](/stateful-applications/mysql)
-- [PostgreSQL](/stateful-applications/postgres)
-- [Percona](/stateful-applications/percona)
-- [Redis](/stateful-applications/redis)
-- [MongoDB](/stateful-applications/mongodb)
-- [Cassandra](/stateful-applications/cassandra)
-- [Prometheus](/stateful-applications/prometheus)
-- [Elastic](/stateful-applications/elasticsearch)
-- [Minio](/stateful-applications/minio)
-- [Wordpress using NFS](/concepts/rwm)
+- PostgreSQL
+- Percona
+- Redis
+- MongoDB
+- Cassandra
+- Prometheus
+- Elastic
+- MinIO
+- Wordpress using NFS
### 4. Dynamic Persistent Volume Provisioning
@@ -119,12 +107,14 @@ The Kubernetes CSI (provisioning layer) will intercept the requests for the Pers
OpenEBS control plane will then process the request and create the Persistent Volumes using the specified local or replicated engines. The data engine services like target and replica are deployed as Kubernetes applications as well. The containers provide storage for the containers. The new containers launched for serving the applications will be available in the `openebs` namespace.
-With the magic of OpenEBS and Kubernetes, the volumes should be provisioned, pods scheduled and application ready to serve. For this magic to happen, the prerequisites should be met. Check out our [troubleshooting section](/troubleshooting/) for some of the common errors that users run into due to setup issues.
+With the magic of OpenEBS and Kubernetes, the volumes should be provisioned, pods scheduled and application ready to serve. For this magic to happen, the prerequisites should be met.
+
+Check out our [troubleshooting section](../troubleshooting/) for some of the common errors that users run into due to setup issues.
-### 5. Managing the Life cycle of OpenEBS components
+### 5. Managing the Life Cycle of OpenEBS Components
-Once the workloads are up and running, the platform or the operations team can observe the system using the cloud native tools like Prometheus, Grafana and so forth. The operational tasks are a shared responsibility across the teams:
+Once the workloads are up and running, the platform or the operations team can observe the system using the cloud native tools like Prometheus, Grafana, and so forth. The operational tasks are a shared responsibility across the teams:
* Application teams can watch out for the capacity and performance and tune the PVCs accordingly.
-* Platform or Cluster teams can check for the utilization and performance of the storage per node and decide on expansion and spreading out of the data engines
+* Platform or Cluster teams can check for the utilization and performance of the storage per node and decide on expansion and spreading out of the Data Engines.
* Infrastructure team will be responsible for planning the expansion or optimizations based on the utilization of the resources.
diff --git a/docs/main/releases.md b/docs/main/releases.md
index 751002017..4aa6820cb 100644
--- a/docs/main/releases.md
+++ b/docs/main/releases.md
@@ -1,6 +1,6 @@
---
id: releases
-title: Releases
+title: OpenEBS Releases
keywords:
- OpenEBS releases
description: This page contains list of supported OpenEBS releases.
diff --git a/docs/main/troubleshooting/cstor.md b/docs/main/troubleshooting/cstor.md
deleted file mode 100644
index 7090a86ce..000000000
--- a/docs/main/troubleshooting/cstor.md
+++ /dev/null
@@ -1,521 +0,0 @@
----
-id: cstor
-title: Troubleshooting OpenEBS - cStor
-keywords:
- - OpenEBS
- - cStor
- - cStor troubleshooting
-description: This page contains a list of cStor related troubleshooting information.
----
-
-## General guidelines for troubleshooting
-
-- Contact [OpenEBS Community](/docs/introduction/community) for support.
-- Search for similar issues added in this troubleshooting section.
-- Search for any reported issues on [StackOverflow under OpenEBS tag](https://stackoverflow.com/questions/tagged/openebs)
-
-[One of the cStorVolumeReplica(CVR) will have its status as `Invalid` after corresponding pool pod gets recreated](#CVR-showing-status-as-invalid-after-poolpod-gets-recreated)
-
-[cStor volume become read only state](#cstor-volume-read-only)
-
-[cStor pools, volumes are offline and pool manager pods are stuck in pending state](#pools-volume-offline)
-
-[Pool Operation Hung Due to Bad Disk](#pool-operation-hung)
-
-[Volume Migration when the underlying cStor pool is lost](#volume-migration-pool-lost)
-
-### One of the cStorVolumeReplica(CVR) will have its status as `Invalid` after corresponding pool pod gets recreated {#CVR-showing-status-as-invalid-after-poolpod-gets-recreated}
-
-When User delete a cStor pool pod, there are high chances for that corresponding pool-related CVR's can goes into `Invalid` state.
-Following is a sample output of `kubectl get cvr -n openebs`
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-738f76c0-b553-11e9-858e-54e1ad4a9dd4-cstor-sparse-p8yp 6K 6K Invalid 6m
-```
-
-**Troubleshooting**
-
-Sample logs of `cstor-pool-mgmt` when issue happens:
-
-```shell hideCopy
-rm /usr/local/bin/zrepl
-exec /usr/local/bin/cstor-pool-mgmt start
-I0802 18:35:13.814623 6 common.go:205] CStorPool CRD found
-I0802 18:35:13.822382 6 common.go:223] CStorVolumeReplica CRD found
-I0802 18:35:13.824957 6 new_pool_controller.go:103] Setting up event handlers
-I0802 18:35:13.827058 6 new_pool_controller.go:105] Setting up event handlers for CSP
-I0802 18:35:13.829547 6 new_replica_controller.go:118] will set up informer event handlers for cvr
-I0802 18:35:13.830341 6 new_backup_controller.go:104] Setting up event handlers for backup
-I0802 18:35:13.837775 6 new_restore_controller.go:103] Setting up event handlers for restore
-I0802 18:35:13.845333 6 run_pool_controller.go:38] Starting CStorPool controller
-I0802 18:35:13.845388 6 run_pool_controller.go:41] Waiting for informer caches to sync
-I0802 18:35:13.847407 6 run_pool_controller.go:38] Starting CStorPool controller
-I0802 18:35:13.847458 6 run_pool_controller.go:41] Waiting for informer caches to sync
-I0802 18:35:13.856572 6 new_pool_controller.go:124] cStorPool Added event : cstor-sparse-p8yp, 48d3b2ba-b553-11e9-858e-54e1ad4a9dd4
-I0802 18:35:13.857226 6 event.go:221] Event(v1.ObjectReference{Kind:"CStorPool", Namespace:"", Name:"cstor-sparse-p8yp", UID:"48d3b2ba-b553-11e9-858e-54e1ad4a9dd4", APIVersion:"openebs.io/v1alpha1", ResourceVersion:"1998", FieldPath:""}): type: 'Normal' reason: 'Synced' Received Resource create event
-I0802 18:35:13.867953 6 common.go:262] CStorPool found
-I0802 18:35:13.868007 6 run_restore_controller.go:38] Starting CStorRestore controller
-I0802 18:35:13.868019 6 run_restore_controller.go:41] Waiting for informer caches to sync
-I0802 18:35:13.868022 6 run_replica_controller.go:39] Starting CStorVolumeReplica controller
-I0802 18:35:13.868061 6 run_replica_controller.go:42] Waiting for informer caches to sync
-I0802 18:35:13.868098 6 run_backup_controller.go:38] Starting CStorBackup controller
-I0802 18:35:13.868117 6 run_backup_controller.go:41] Waiting for informer caches to sync
-I0802 18:35:13.946730 6 run_pool_controller.go:45] Starting CStorPool workers
-I0802 18:35:13.946931 6 run_pool_controller.go:51] Started CStorPool workers
-I0802 18:35:13.968344 6 run_replica_controller.go:47] Starting CStorVolumeReplica workers
-I0802 18:35:13.968441 6 run_replica_controller.go:54] Started CStorVolumeReplica workers
-I0802 18:35:13.968490 6 run_restore_controller.go:46] Starting CStorRestore workers
-I0802 18:35:13.968538 6 run_restore_controller.go:53] Started CStorRestore workers
-I0802 18:35:13.968602 6 run_backup_controller.go:46] Starting CStorBackup workers
-I0802 18:35:13.968689 6 run_backup_controller.go:53] Started CStorBackup workers
-I0802 18:35:43.869876 6 handler.go:456] cStorPool pending: 48d3b2ba-b553-11e9-858e-54e1ad4a9dd4
-I0802 18:35:43.869961 6 new_pool_controller.go:160] cStorPool Modify event : cstor-sparse-p8yp, 48d3b2ba-b553-11e9-858e-54e1ad4a9dd4
-I0802 18:35:43.870552 6 event.go:221] Event(v1.ObjectReference{Kind:"CStorPool", Namespace:"", Name:"cstor-sparse-p8yp", UID:"48d3b2ba-b553-11e9-858e-54e1ad4a9dd4", APIVersion:"openebs.io/v1alpha1", ResourceVersion:"2070", FieldPath:""}): type: 'Normal' reason: 'Synced' Received Resource modify event
-I0802 18:35:44.905633 6 pool.go:93] Import command successful with true dontimport: false importattr: [import -c /tmp/pool1.cache -o cachefile=/tmp/pool1.cache cstor-48d3b2ba- b553-11e9-858e-54e1ad4a9dd4] out:
-```
-
-From the above highlighted logs, we can confirm `cstor-pool-mgmt` in new pod is communicating with `cstor-pool` in old pod as first highlighted log says `cstor pool found` then next highlighted one says pool is really `imported`.
-
-**Possible Reason:**
-
-When a cstor pool pod is deleted there are high chances that two cstor pool pods of same pool can present i.e old pool pod will be in `Terminating` state(which means not all the containers completely terminated) and new pool pod will be in `Running` state(might be few containers are in running state but not all). In this scenario `cstor-pool-mgmt` container in new pool pod is communicating with `cstor-pool` in old pool pod. This can cause CVR resource to set to `Invalid`.
-
-**Note:** This issue has observed in all OpenEBS versions up to 1.2.
-
-**Resolution:**
-
-Edit the `Phase` of cStorVolumeReplica (cvr) from `Invalid` to `Offline`. After few seconds CVR will be `Healthy` or `Degraded` state depends on rebuilding progress.
-
-### cStor volume become read only state {#cstor-volume-read-only}
-
-Application mount point running on cStor volume went into read only state.
-
-**Possible Reason:**
-
-If `cStorVolume` is `Offline` or corresponding target pod is unavailable for more than 120 seconds(iSCSI timeout) then the PV will be mounted as `read-only` filesystem. For understanding different states of cStor volume, more details can be found [here](/additional-info/kb#verification-of-cstor-volume).
-
-**Troubleshooting**
-
-Check the status of corresponding cStor volume using the following command:
-
-```
-kubectl get cstorvolume -n -l openebs.io/persistent-volume=
-```
-
-If cStor volume exists in `Healthy` or `Degraded` state then restarting of the application pod alone will bring back cStor volume to `RW` mode. If cStor volume exists in `Offline`, reach out to [OpenEBS Community](/docs/introduction/community) for assistance.
-
-### cStor pools, volumes are offline and pool manager pods are stuck in pending state {#pools-volume-offline}
-
-The cStor pools and volumes are offline, the pool manager pods are stuck in a `pending` state, as shown below:
-
-```
-$ kubectl get po -n openebs -l app=cstor-pool
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE
-cstor-cspc-chjg-85f65ff79d-pq9d2 0/3 Pending 0 16m
-cstor-cspc-h99x-57888d4b5-kh42k 0/3 Pending 0 15m
-cstor-cspc-xs4b-85dbbbb59b-wvhmr 0/3 Pending 0 18m
-```
-
-One such scenario that can lead to such a situation is, when the nodes have been scaled down and then scaled up. This results in nodes coming up with a different hostName and node name, i.e, the nodes that have come up are new nodes and not the same as previous nodes that existed earlier. Due to this, the disks that were attached to the older nodes now get attached to the newer nodes.
-
-**Troubleshooting**
-To bring cStor pool back to online state carry out the below mentioned steps,
-
-1. **Update validatingwebhookconfiguration resource's failurePolicy**:
- Update the `validatingwebhookconfiguration` resource's failure policy to `Ignore`. It would be previously set to `Fail`. This informs the kube-APIServer to ignore the error in case cStor admission server is not reachable.
- To edit, execute:
-
- ```
- $ kubectl edit validatingwebhookconfiguration openebs-cstor-validation-webhook
- ```
-
- Sample Output with updated `failurePolicy`
-
- ```
- kind: ValidatingWebhookConfiguration
- metadata:
- name: openebs-cstor-validation-webhook
- ...
- ...
- webhooks:
- - admissionReviewVersions:
- - v1beta1
- failurePolicy: Fail
- name: admission-webhook.cstor.openebs.io
- ...
- ...
-
- ```
-
-2. **Scale down the admission**:
-
- The openEBS admission server needs to be scaled down as this would skip the validations performed by cStor admission server when CSPC spec is updated with new node details.
-
- ```
- $ kubectl scale deploy openebs-cstor-admission-server -n openebs --replicas=0
- ```
-
- Sample Output:
-
- ```
- deployment.extensions/openebs-cstor-admission-server scaled
- ```
-
-3. **Update the CSPC spec nodeSelector**:
- The `CStorPoolCluster` needs to be updated with the new `nodeSelector` values. The updated CSPC now points to the new nodes instead of the old nodeSelectors.
-
- Update `kubernetes.io/hostname` with the new values.
-
- Sample Output:
-
- ```shell hideCopy
- apiVersion: cstor.openebs.io/v1
- kind: CStorPoolCluster
- metadata:
- name: cstor-cspc
- namespace: openebs
- spec:
- pools:
- - nodeSelector:
- kubernetes.io/hostname: "ip-192-168-25-235"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-798dbaf214f355ada15d097d87da248c"
- poolConfig:
- dataRaidGroupType: "stripe"
- - nodeSelector:
- kubernetes.io/hostname: "ip-192-168-33-15"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-4505d9d5f045b05995a5654b5493f8e0"
- poolConfig:
- dataRaidGroupType: "stripe"
- - nodeSelector:
- kubernetes.io/hostname: "ip-192-168-75-156"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-c783e51a80bc51065402e5473c52d185"
- poolConfig:
- dataRaidGroupType: "stripe"
- ```
-
-4. To apply the above configuration, execute:
-
- ```
- $ kubectl apply -f cspc.yaml
- ```
-
-5. **Update nodeSelectors, labels and NodeName**:
-
- Next, the CSPI needs to be updated with the correct node details.
- Get the node details on which the previous blockdevice was attached and after fetching node details update hostName, nodeSelector values and `kubernetes.io/hostname` values in labels of CSPI with new details.
- To update, execute:
-
- ```
- kubectl edit cspi -n openebs
- ```
-
- **NOTE**: The same process needs to be repeated for all other CSPIs which are in pending state and belongs to the updated CSPC.
-
-6. **Verification**:
- On successful implementation of the above steps, the updated CSPI generates an event, _pool is successfully imported_ which verifies the above steps have been completed successfully.
-
- ```
- kubectl describe cspi cstor-cspc-xs4b -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- ...
- ...
- Events:
- Type Reason Age From Message
- ---- ------ ---- ---- -------
- Normal Pool Imported 2m48s CStorPoolInstance Pool Import successful: cstor-07c4bfd1-aa1a-4346-8c38-f81d33070ab7
- ```
-
-7. **Scale-up the cStor admission server and update validatingwebhookconfiguration**:
- This brings back the cStor admission server to running state. As well as admission server is required to validate the modifications made to CSPC API in future.
- `$ kubectl scale deploy openebs-cstor-admission-server -n openebs --replicas=1`
-
- Sample Output:
-
- ```shell hideCopy
- deployment.extensions/openebs-cstor-admission-server scaled
- ```
-
- Now, update the `failurePolicy` back to `Fail` under validatingwebhookconfiguration. To edit, execute:
-
- ```
- $ kubectl edit validatingwebhookconfiguration openebs-cstor-validation-webhook
- ```
-
- Sample Output:
-
- ```shell hideCopy
- validatingwebhookconfiguration.admissionregistration.k8s.io/openebs-cstor-validation-webhook edited
- ```
-
-### Pool Operation hung due to Bad Disk {#pool-operation-hung}
-
-cStor scans all the devices on the node while it tries to import the pool in case there is a pool manager pod restart. Pool(s) are always imported before creation.
-On pool creation all of the devices are scanned and as there are no existing pool(s), a new pool is created. Now, when the pool is created the participating devices are cached for faster import of the pool (in case of pool manager pod restart). If the import utilises cache then this issue won't be hit but there is a chance of import without cache (when the pool is being created for the first time)
-
-In such cases where pool import happens without cache file and if any of the devices(even the devices that are not part of the cStor pool) is bad and is not responding the command issued by cStor keeps on waiting and is stuck. As a result of this, pool manager pod is not able to issue any more command in order to reconcile the state of cStor pools or even perform the IO for the volumes that are placed on that particular pool.
-
-**Troubleshooting**
-This might be encountered because of one of the following situations:
-
-1. The device that has gone bad is actually a part of the cStor pool on the node. In such cases, Block device replacement needs to be done, the detailed steps to it can be found [here](/deprecated/spc-based-cstor#setting-performance-tunings).
-
-**Note**: Block device replacement is not supported for stripe raid configuration. Please visit this link for some use cases and solutions.
-
-2. The device that has gone bad is not part of the cStor pool on the node. In this case, removing the bad disk from the node and restarting the pool manager pod with fix the problem.
-
-### Volume Migration when the underlying cStor pool is lost {#volume-migration-pool-lost}
-
-#### Scenarios that can result in losing of cStor pool(s):
-
-- If the node is lost.
-- If one or more disks participating in the cStor pool are lost. This occurs when the pool configuration is set to stripe.
-- If all the disks participating in any raid group are lost. This occurs when the pool configuration is set to mirror.
-- If the cStor pool configuration is raidz and more than 1 disk in any raid group is lost.
-- If the cStor pool configuration is raidz2 and more than 2 disks in any raid group are lost.
-
-This situation is often encountered in Kubernetes clusters that have autoscale feature enabled and nodes scale down and scale-up.
-
-If the volume replica that resided on the lost pool was configured in high availability mode then the volume replica can be migrated to a new cStor pool.
-
-**NOTE**:The CStorVolume associated to the volume replicas have to be migrated should be in Healthy state.
-
-**STEP 1:**
-
-**Remove the cStorVolumeReplicas from the lost pool**:
-
-To remove the pool the `CStorVolumeConfig` needs to updated. The `poolName` for the corresponding pool needs to be removed from `replicaPoolInfo`. This ensures that the admission server accepts the scale down request.
-
-**NOTE**: Ensure that the cstorvolume and target pods are in running state.
-
-A sample CVC resource(corresponding to the volume) that has 3 pools.
-
-```shell hideCopy
-...
-...
- policy:
- provision:
- replicaAffinity: false
- replica: {}
- replicaPoolInfo:
- - poolName: cstor-cspc-4tr5 // This pool needs to be removed
- - poolName: cstor-cspc-xnxx
- - poolName: cstor-cspc-zdvk
-...
-...
-
-```
-
-Now edit the CVC and remove the desired poolName.
-
-```
-$ kubectl edit cvc pvc-81746e7a-a29d-423b-a048-76edab0b0826 -n openebs
-```
-
-```shell hideCopy
-...
-...
- policy:
- provision:
- replicaAffinity: false
- replica: {}
- replicaPoolInfo:
- - poolName: cstor-cspc-xnxx
- - poolName: cstor-cspc-zdvk
-...
-...
-```
-
-From the above spec, `cstor-cspc-4tr5` CSPI entry is removed. This needs to be repeated for all the volumes which have cStor volume replicas on the lost pool. To get the list of volume replicas in lost pool, execute:
-
-```
-$ kubectl get cvr -n openebs -l cstorpoolinstance.openebs.io/name=
-```
-
-```
-NAME USED ALLOCATED STATUS AGE
-pvc-81746e7a-a29d-423b-a048-76edab0b0826-cstor-cspc-bf9h 6K 6K Healthy 4m7s
-```
-
-**STEP 2:**
-
-**Remove the finalizer from cStor volume replicas**
-
-The CVRs need to be deleted from the etcd, this requires the `finalizer` under `cstorvolumereplica.openebs.io/finalizer` to be removed from the CVRs which were present on the lost cStor pool.
-
-Usually, the finalizer is removed by pool-manager pod but as in this case the pod is not in running state hence manual intervention is required.
-
-To get the list of CVRs, execute:
-
-```
-$ kubectl get cvr -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-81746e7a-a29d-423b-a048-76edab0b0826-cstor-cspc-xnxx 6K 6K Healthy 52m
-pvc-81746e7a-a29d-423b-a048-76edab0b0826-cstor-cspc-zdvk 6K 6K Healthy 52m
-```
-
-After this step, CStorVolume will scale down. To verify, execute:
-
-```
-$ kubectl describe cvc -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-Events:
-Type Reason Age From Message
----- ------ ---- ---- -------
-Normal ScalingVolumeReplicas 6m10s cstorvolumeclaim-controller successfully scaled volume replicas to 2
-```
-
-**STEP 3:**
-
-**Remove the pool spec from CSPC belongs to lost node**
-
-Next, the corresponding CSPC needs to be edited and the pool spec that belongs to the nodes, which no longer exists, needs to be removed. To edit the cspc, execute:
-
-```
-kubectl edit cspc -n openebs
-```
-
-This updates the number of desired instances.
-
-To verify, execute:
-
-```
-$ kubectl get cspc -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME HEALTHYINSTANCES PROVISIONEDINSTANCES DESIREDINSTANCES AGE
-cstor-cspc 2 3 2 56m
-```
-
-Since CSPI has pool protection finalizer i.e `openebs.io/pool-protection` the CSPC operator was unable to delete the CSPI. Due to this reason the count for provisioned instances still remains 3.
-
-To fix this `openebs.io/pool-protection` finalizer must be removed from the CSPI that was present on the lost node.
-
-To edit, execute:
-
-```
-kubectl edit cspi
-```
-
-After the finalizer is removed the CSPI count goes to the desired number.
-
-```shell hideCopy
-$ kubectl get cspc -n openebs
-NAME HEALTHYINSTANCES PROVISIONEDINSTANCES DESIREDINSTANCES AGE
-cstor-cspc 2 2 2 68m
-```
-
-**STEP 4:**
-
-**Scale the cStorVolumeReplicas back to the original number**
-
-Scale the CStorVolumeReplicas back to the desired number on new or existing cStor pool where a volume replica of the same volume doesn't exist.
-
-NOTE: A CStorVolume is a collection of 1 or more volume replicas and no two replicas of a CStorVolume should reside on the same CStorPoolInstance. CStorVolume is a custom resource and a logical aggregated representation of all the underlying cStor volume replicas for this particular volume.
-
-To get the list of cspi execute:
-
-```
-$ kubectl get cspi -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME HOSTNAME ALLOCATED FREE CAPACITY READONLY PROVISIONEDREPLICAS HEALTHYREPLICAS TYPE STATUS AGE
-cstor-cspc-bf9h ip-192-168-49-174 230k 9630M 9630230k false 0 0 stripe ONLINE 66s
-```
-
-Next, add the newly created CStorPoolInstance under CVC.Spec
-In this example we are adding, `cstor-cspc-bf9h `
-
-To edit, execute:
-
-```
-$ kubectl edit cvc pvc-81746e7a-a29d-423b-a048-76edab0b0826 -n openebs
-```
-
-Sample YAML:
-
-```
-...
-...
-spec:
- policy:
- provision:
- replicaAffinity: false
- replica: {}
- replicaPoolInfo:
- - poolName: cstor-cspc-bf9h
- - poolName: cstor-cspc-xnxx
- - poolName: cstor-cspc-zdvk
-...
-...
-
-```
-
-The same needs to be repeated for all the scaled down cStor volumes.
-Next, verify the status of the new CStorVolumeReplica(CVR) that are provisioned.
-
-To get the list of CVR, execute:
-
-```
-$ kubectl get cvr -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-81746e7a-a29d-423b-a048-76edab0b0826-cstor-cspc-bf9h 6K 6K Healthy 11m
-pvc-81746e7a-a29d-423b-a048-76edab0b0826-cstor-cspc-xnxx 6K 6K Healthy 96m
-pvc-81746e7a-a29d-423b-a048-76edab0b0826-cstor-cspc-zdvk 6K 6K Healthy 96m
-
-```
-
-To get the list of cspi, execute:
-
-```
-$ kubectl get cspi -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME HOSTNAME ALLOCATED FREE CAPACITY READONLY PROVISIONEDREPLICAS HEALTHYREPLICAS TYPE STATUS AGE
-cstor-cspc-bf9h ip-192-168-49-174 230k 9630M 9630230k false 1 1 stripe ONLINE 66s
-cstor-cspc-xnxx ip-192-168-79-76 101k 9630M 9630101k false 1 1 stripe ONLINE 4m25s
-cstor-cspc-zdvk ip-192-168-29-217 98k 9630M 9630098k false 1 1 stripe ONLINE 4m25s
-
-```
-
-## See Also:
-
-[FAQs](/docs/additional-info/faqs) [Seek support or help](/docs/introduction/community) [Latest release notes](/docs/introduction/releases)
diff --git a/docs/main/troubleshooting/jiva.md b/docs/main/troubleshooting/jiva.md
deleted file mode 100644
index 3729314e4..000000000
--- a/docs/main/troubleshooting/jiva.md
+++ /dev/null
@@ -1,76 +0,0 @@
----
-id: jiva
-title: Troubleshooting OpenEBS - Jiva
-keywords:
- - OpenEBS
- - Jiva
- - Jiva troubleshooting
-description: This page contains a list of Jiva related troubleshooting information.
----
-
-## General guidelines for troubleshooting
-
-- Contact [OpenEBS Community](/docs/introduction/community) for support.
-- Search for similar issues added in this troubleshooting section.
-- Search for any reported issues on [StackOverflow under OpenEBS tag](https://stackoverflow.com/questions/tagged/openebs)
-
-[Jiva replica pod logs showing meta file missing entry](#replica-pod-meta-file-error)
-
-### Jiva replica pod logs showing "Failed to find metadata" {#replica-pod-meta-file-error}
-
-Jiva target pod may not be syncing data across all replicas when replica pod logs contains below kind of messages:
-
-```shell hideCopy
-level=error msg="Error in request: Failed to find metadata for volume-snap-b72764f0-4ca8-49b1-b9ca-57cb9dfb6fa9.img"
-```
-
-**Troubleshooting**:
-
-Perform following steps to restore the missing metadata file of internal snapshots.
-
-- Check all replica pods are in running state. Faulty replica pod will be in `crashloopBackoff` state in OpenEBS 1.0.0 version.
-
-- Find the replica in `RW` mode using mayactl command, consider it as healthy.
-
-- Consider the replica that have above kind of error messages in its logs as faulty.
-
-- Log in to the nodes of healthy and faulty replica and list all the snapshots under `**/var/openebs/**`.
-
- Example snippet of Healthy replica:
-
- ```shell hideCopy
- revision.counter volume-snap-792e7036-877d-4807-9641-4843c987d0a5.img
- volume-head-005.img volume-snap-792e7036-877d-4807-9641-4843c987d0a5.img.meta
- volume-head-005.img.meta volume-snap-b72764f0-4ca8-49b1-b9ca-57cb9dfb6fa9.img
- volume-snap-15660574-e47d-4217-ac92-1497e5b654a4.img volume-snap-b72764f0-4ca8-49b1-b9ca-57cb9dfb6fa9.img.meta
- volume-snap-15660574-e47d-4217-ac92-1497e5b654a4.img.meta volume-snap-cce9eb61-8f8b-42bd-ba44-8479ada98cee.img
- volume-snap-2ac410ca-2716-4255-94b1-39105b627270.img volume-snap-cce9eb61-8f8b-42bd-ba44-8479ada98cee.img.meta
- volume-snap-2ac410ca-2716-4255-94b1-39105b627270.img.meta volume-snap-d9f8d3db-9434-4f16-a5a7-b1b120ceae94.img
- volume-snap-466d32e7-c443-46dd-afdd-8412e76f348e.img volume-snap-d9f8d3db-9434-4f16-a5a7-b1b120ceae94.img.meta
- volume-snap-466d32e7-c443-46dd-afdd-8412e76f348e.img.meta volume.meta
- ```
-
- Example snippet of faulty replica:
-
- ```shell hideCopy
- revision.counter volume-snap-792e7036-877d-4807-9641-4843c987d0a5.img
- volume-head-005.img volume-snap-792e7036-877d-4807-9641-4843c987d0a5.img.meta
- volume-head-005.img.meta volume-snap-b72764f0-4ca8-49b1-b9ca-57cb9dfb6fa9.img
- volume-snap-15660574-e47d-4217-ac92-1497e5b654a4.img volume-snap-15660574-e47d-4217-ac92-1497e5b654a4.img.meta volume-snap-cce9eb61-8f8b-42bd-ba44-8479ada98cee.img
- volume-snap-2ac410ca-2716-4255-94b1-39105b627270.img volume-snap-cce9eb61-8f8b-42bd-ba44-8479ada98cee.img.meta
- volume-snap-2ac410ca-2716-4255-94b1-39105b627270.img.meta volume-snap-d9f8d3db-9434-4f16-a5a7-b1b120ceae94.img
- volume-snap-466d32e7-c443-46dd-afdd-8412e76f348e.img volume-snap-d9f8d3db-9434-4f16-a5a7-b1b120ceae94.img.meta
- volume-snap-466d32e7-c443-46dd-afdd-8412e76f348e.img.meta volume.meta
- ```
-
- From above snippet of faulty replica, metadata for the `volume-snap-b72764f0-4ca8-49b1-b9ca-57cb9dfb6fa9.img` snapshot is not present.
-
-- If only one meta file is missing, then copy meta file name and content from one of the healthy replica to the faulty replica.
-
- For above case, copy `volume-snap-b72764f0-4ca8-49b1-b9ca-57cb9dfb6fa9.img.meta` from healthy replica to faulty replica and restart the faulty replica. You can verify the logs of the replica pod to ensure that there are no error messages as mentioned above.
-
-- If multiple meta files are missing, then delete all files from replica pods and then restart the faulty replica pod to rebuild from healthy replica.
-
-## See Also:
-
-[FAQs](/docs/additional-info/faqs) [Seek support or help](/docs/introduction/community) [Latest release notes](/docs/introduction/releases)
diff --git a/docs/main/troubleshooting/ndm.md b/docs/main/troubleshooting/ndm.md
deleted file mode 100644
index 1d7609328..000000000
--- a/docs/main/troubleshooting/ndm.md
+++ /dev/null
@@ -1,112 +0,0 @@
----
-id: ndm
-title: Troubleshooting OpenEBS - NDM
-keywords:
- - OpenEBS
- - NDM
- - NDM troubleshooting
- - OpenEBS NDM
- - OpenEBS NDM troubleshooting
- - Node Disk Manager
-description: This page contains a list of Node Disk Manager (NDM) related troubleshooting information.
----
-
-## General guidelines for troubleshooting
-
-- Contact [OpenEBS Community](/docs/introduction/community) for support.
-- Search for similar issues added in this troubleshooting section.
-- Search for any reported issues on [StackOverflow under OpenEBS tag](https://stackoverflow.com/questions/tagged/openebs)
-
-[Blockdevices are not detected by NDM](#bd-not-detected)
-
-[Unable to claim blockdevices by NDM operator](#unable-to-claim-blockdevices)
-
-### Blockdevices are not detected by NDM {#bd-not-detected}
-
-One additional disk is connected to the node, with multiple partitions on the disk. Some of the partitions have a filesystem and is mounted. `kubectl get bd -n openebs` does not show any blockdevices. Ideally the blockdevice resources for the partitions should have been shown.
-
-```shell hideCopy
-NAME FSTYPE MOUNTPOINT SIZE
-sda 1.8T
-├─sda1 500G
-├─sda2 500G
-├─sda3 500G
-└─sda4 ext4 /kubernetes 363G
-sdb 55.9G
-├─sdb1 vfat /boot/efi 512M
-└─sdb2 ext4 / 55.4G
-```
-
-**Troubleshooting:**
-
-Check the output of `lsblk` on the node and check the mountpoints of the partitions. By default NDM excludes partitions mounted at `/, /boot` and `/etc/hosts` (which is same as the partition at which kubernetes / docker filesystem exists) and the parent disks of those partitions. In the above example `/dev/sdb` is excluded because of root partitions on that disk. `/dev/sda4` contains the docker filesystem, and hence `/dev/sda` is also excluded.
-
-**Resolution:**
-
-The `ndm-config-map` needs to be edited.
-
-1. Remove `/etc/hosts` entry from the os-disk-exclude-filter
-2. Add the corresponding docker filesystem partition in exclude section of path filter. eg: `/dev/sda4`
-3. Restart the NDM daemonset pods.
-
-The blockdevices should now be created for the unused partitions.
-
-### Unable to claim blockdevices by NDM operator{#unable-to-claim-blockdevices}
-
-BlockDeviceClaims may remain in pending state, even if blockdevices are available in Unclaimed and Active state. The main reason for this will be there are no blockdevices that match the criteria specified in the BlockDeviceClaim. Sometimes, even if the criteria matches the blockdevice may be in an Unclaimed state.
-
-**Troubleshooting:**
-
-Check if the blockdevice is having any of the following annotations:
-
-1.
-
-```
- metadata:
- annotations:
- internal.openebs.io/partition-uuid:
- internal.openebs.io/uuid-scheme: legacy
-```
-
-or
-
-2.
-
-```
-metadata:
-annotations:
-internal.openebs.io/fsuuid:
-internal.openebs.io/uuid-scheme: legacy
-```
-
-If `1.` is present, it means the blockdevice was previously being used by cstor and it was not properly cleaned up. The cstor pool can be from a previous release or the disk already container some zfs labels.
-If `2.` is present, it means the blockdevice was previously being used by localPV and the cleanup was not done on the device.
-
-**Resolution:**
-
-1. ssh to the node in which the blockdevice is present
-
-2. If the disk has partitions, run wipefs on all of the partitions
-
-```
-
-wipefs -fa /dev/sdb1
-wipefs -fa /dev/sdb9
-
-```
-
-3. Run wipefs on the disk
-
-```
-
-wipefs -fa /dev/sdb
-
-```
-
-4. Restart NDM pod running on the node
-
-5. New blockdevices should get created for those disks and it can be claimed and used. The older blockdevices will go into an Unknown/Inactive state.
-
-## See Also:
-
-[FAQs](/docs/additional-info/faqs) [Seek support or help](/docs/introduction/community) [Latest release notes](/docs/introduction/releases)
diff --git a/docs/main/troubleshooting/troubleshooting-local-engine.md b/docs/main/troubleshooting/troubleshooting-local-engine.md
index 995e5eafb..b317e6d04 100644
--- a/docs/main/troubleshooting/troubleshooting-local-engine.md
+++ b/docs/main/troubleshooting/troubleshooting-local-engine.md
@@ -8,100 +8,241 @@ keywords:
description: This page contains a list of OpenEBS related troubleshooting which contains information like troubleshooting installation, troubleshooting uninstallation, and troubleshooting local engines.
---
-### General guidelines for troubleshooting
+General Troubleshooting
-- Search for similar issues mentioned in this page as well as the following troubleshooting sections.
- - [Troubleshooting Install](/docs/troubleshooting/install).
- - [Troubleshooting Uninstall](/docs/troubleshooting/uninstall).
- - [Troubleshooting NDM](/docs/troubleshooting/ndm).
- - [Troubleshooting Jiva](/docs/troubleshooting/jiva).
- - [Troubleshooting cStor](/docs/troubleshooting/cstor).
- - [Troubleshooting LocalPV](/docs/troubleshooting/localpv).
-- Contact [OpenEBS Community](/docs/introduction/community) for support.
-- Search for similar issues on [OpenEBS GitHub repository](https://github.com/openebs/openebs/issues).
-- Search for any reported issues on [StackOverflow under OpenEBS tag](https://stackoverflow.com/questions/tagged/openebs).
+### PVC in Pending state {#pvc-in-pending-state}
-#### Kubernetes related
+Created a PVC using localpv-hostpath storage class. But the PV is not created and PVC in Pending state.
-[Kubernetes node reboots because of increase in memory consumed by Kubelet](#node-reboot-when-kubelet-memory-increases)
+**Troubleshooting:**
+The default localpv storage classes from openebs have `volumeBindingMode: WaitForFirstConsumer`. This means that only when the application pod that uses the PVC is scheduled to a node, the provisioner will receive the volume provision request and will create the volume.
-[Application and OpenEBS pods terminate/restart under heavy I/O load](#Pods-restart-terminate-when-heavy-load)
+**Resolution:**
+Deploy an application that uses the PVC and the PV will be created and application will start using the PV.
-#### Others
+### Stale BDC in pending state after PVC is deleted {#stale-bdc-after-pvc-deletion}
-[Nodes in the cluster reboots frequently almost everyday in openSUSE CaaS](#reboot-cluster-nodes)
+```
+kubectl get bdc -n openebs
+```
+
+shows stale `Pending` BDCs created by localpv provisioner, even after the corresponding PVC has been deleted.
+
+**Resolution:**
+LocalPV provisioner currently does not delete BDCs in Pending state if the corresponding PVCs are deleted. To remove the stale BDC entries,
+
+1. Edit the BDC and remove the `- local.openebs.io/finalizer` finalizer
+
+```
+kubectl edit bdc -n openebs
+```
+
+2. Delete the BDC
+
+```
+kubectl delete bdc -n openebs
+```
+
+### BDC created by localPV in pending state {#bdc-by-localpv-pending-state}
+
+The BDC created by localpv provisioner (bdc-pvc-xxxx) remains in pending state and PVC does not get Bound
+
+**Troubleshooting:**
+Describe the BDC to check the events recorded on the resource
-Kubernetes related
+```
+kubectl describe bdc bdc-pvc-xxxx -n openebs
+```
+
+The following are different types of messages shown when the node on which localpv application pod is scheduled, does not have a blockdevice available.
-## Kubernetes node reboots because of increase in memory consumed by Kubelet {#node-reboot-when-kubelet-memory-increases}
+1. No blockdevices found
+
+```shell hideCopy
+Warning SelectionFailed 14m (x25 over 16m) blockdeviceclaim-operator no blockdevices found
+```
-Sometime it is observed that iscsiadm is continuously fails and repeats rapidly and for some reason this causes the memory consumption of kubelet to grow until the node goes out-of-memory and needs to be rebooted. Following type of error can be observed in journalctl and cstor-istgt container.
+It means that there were no matching blockdevices after listing based on the labels. Check if there is any `block-device-tag` on the storage class and corresponding tags are available on the blockdevices also
-**journalctl logs**
+2. No devices with matching criteria
```shell hideCopy
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: failed to send SendTargets PDU
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: connection login retries (reopen_max) 5 exceeded
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: Connection to Discovery Address 10.233.46.76 failed
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: failed to send SendTargets PDU
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: connection login retries (reopen_max) 5 exceeded
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: Connection to Discovery Address 10.233.46.76 failed
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: failed to send SendTargets PDU
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: connection login retries (reopen_max) 5 exceeded
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: Connection to Discovery Address 10.233.46.76 failed
-Feb 06 06:11:38 kubelet[1063]: iscsiadm: failed to send SendTargets PDU
+Warning SelectionFailed 6m25s (x18 over 11m) blockdeviceclaim-operator no devices found matching the criteria
```
-**cstor-istgt container logs**
+It means that the there are no devices for claiming after filtering based on filesystem type and node name. Make sure the blockdevices on the node
+have the correct filesystem as mentioned in the storage class (default is `ext4`)
+
+3. No devices with matching resource requirements
```shell hideCopy
-2019-02-05/15:43:30.250 worker :6088: c#0.140005771040512.: iscsi_read_pdu() EOF
+Warning SelectionFailed 85s (x74 over 11m) blockdeviceclaim-operator could not find a device with matching resource requirements
+```
+
+It means that there are no devices available on the node with a matching capacity requirement.
+
+**Resolution**
+
+To schedule the application pod to a node, which has the blockdevices available, a node selector can be used on the application pod. Here the node with hostname `svc1` has blockdevices available, so a node selector is used to schedule the pod to that node.
+
+Example:
+
+```
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod1
+spec:
+ volumes:
+ - name: local-storage
+ persistentVolumeClaim:
+ claimName: pvc1
+ containers:
+ - name: hello-container
+ image: busybox
+ command:
+ - sh
+ - -c
+ - 'while true; do echo "`date` [`hostname`] Hello from OpenEBS Local PV." >> /mnt/store/greet.txt; sleep $(($RANDOM % 5 + 300)); done'
+ volumeMounts:
+ - mountPath: /mnt/store
+ name: local-storage
+ nodeSelector:
+ kubernetes.io/hostname: svc1
+```
-2019-02-05/15:43:30.250 sender :5852: s#0.140005666154240.: sender loop ended (0:14:43084)
+Installation Related
-2019-02-05/15:43:30.251 worker :6292: c#0.140005771040512.: worker 0/-1/43084 end (c#0.140005771040512/s#0.140005666154240)
-2019-02-05/15:43:30.264 worker :5885: c#1.140005666154240.: con:1/16 [8d614b93:43088->10.233.45.100:3260,1]
-2019-02-05/15:43:30.531 istgt_iscsi_op_log:1923: c#1.140005666154240.: login failed, target not ready
+### Installation failed because of insufficient user rights {#install-failed-user-rights}
-2019-02-05/15:43:30.782 worker :6088: c#1.140005666154240.: iscsi_read_pdu() EOF
+OpenEBS installation can fail in some cloud platform with the following errors.
-2019-02-05/15:43:30.782 sender :5852: s#1.140005649413888.: sender loop ended (1:16:43088)
+```shell hideCopy
+namespace "openebs" created
+serviceaccount "openebs-maya-operator" created
+clusterrolebinding.rbac.authorization.k8s.io "openebs-maya-operator" created
+deployment.apps "maya-apiserver" created
+service "maya-apiserver-service" created
+deployment.apps "openebs-provisioner" created
+deployment.apps "openebs-snapshot-operator" created
+configmap "openebs-ndm-config" created
+daemonset.extensions "openebs-ndm" created
+Error from server (Forbidden): error when creating "https://raw.githubusercontent.com/openebs/openebs/v0.8.x/k8s/openebs-operator.yaml": clusterroles.rbac.authorization.k8s.io "openebs-maya-operator" is forbidden: attempt to grant extra privileges: [{[*] [*] [nodes] [] []} {[*] [*] [nodes/proxy] [] []} {[*] [*] [namespaces] [] []} {[*] [*] [services] [] []} {[*] [*] [pods] [] []} {[*] [*] [deployments] [] []} {[*] [*] [events] [] []} {[*] [*] [endpoints] [] []} {[*] [*] [configmaps] [] []} {[*] [*] [jobs] [] []} {[*] [*] [storageclasses] [] []} {[*] [*] [persistentvolumeclaims] [] []} {[*] [*] [persistentvolumes] [] []} {[get] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[list] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[watch] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[create] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[update] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[patch] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[delete] [volumesnapshot.external-storage.k8s.io] [volumesnapshots] [] []} {[get] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[list] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[watch] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[create] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[update] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[patch] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[delete] [volumesnapshot.external-storage.k8s.io] [volumesnapshotdatas] [] []} {[get] [apiextensions.k8s.io] [customresourcedefinitions] [] []} {[list] [apiextensions.k8s.io] [customresourcedefinitions] [] []} {[create] [apiextensions.k8s.io] [customresourcedefinitions] [] []} {[update] [apiextensions.k8s.io] [customresourcedefinitions] [] []} {[delete] [apiextensions.k8s.io] [customresourcedefinitions] [] []} {[*] [*] [disks] [] []} {[*] [*] [storagepoolclaims] [] []} {[*] [*] [storagepools] [] []} {[*] [*] [castemplates] [] []} {[*] [*] [runtasks] [] []} {[*] [*] [cstorpools] [] []} {[*] [*] [cstorvolumereplicas] [] []} {[*] [*] [cstorvolumes] [] []} {[get] [] [] [] [/metrics]}] user=&{user.name@mayadata.io [system:authenticated] map[user-assertion.cloud.google.com:[AKUJVpmzjjLCED3Vk2Q7wSjXV1gJs/pA3V9ZW53TOjO5bHOExEps6b2IZRjnru9YBKvaj3pgVu+34A0fKIlmLXLHOQdL/uFA4WbKbKfMdi1XC52CcL8gGTXn0/G509L844+OiM+mDJUftls7uIgOIRFAyk2QBixnYv22ybLtO2n8kcpou+ZcNFEVAD6z8Xy3ZLEp9pMd9WdQuttS506x5HIQSpDggWFf9T96yPc0CYmVEmkJm+O7uw==]]} ownerrules=[{[create] [authorization.k8s.io] [selfsubjectaccessreviews selfsubjectrulesreviews] [] []} {[get] [] [] [] [/api /api/* /apis /apis/* /healthz /openapi /openapi/* /swagger-2.0.0.pb-v1 /swagger.json /swaggerapi /swaggerapi/* /version /version/]}] ruleResolutionErrors=[]
+```
+
+**Troubleshooting**
+
+You must enable RBAC before OpenEBS installation. This can be done from the kubernetes master console by executing the following command.
-2019-02-05/15:43:30.783 worker :6292: c#1.140005666154240.: worker 1/-1/43088 end (c#1.140005666154240/s#1.140005649413888)
-2019-02-05/15:43:33.285 worker :5885: c#2.140005649413888.: con:2/18 [8d614b93:43092->10.233.45.100:3260,1]
-2019-02-05/15:43:33.536 istgt_iscsi_op_log:1923: c#2.140005649413888.: login failed, target not ready
+```
+kubectl create clusterrolebinding -admin-binding --clusterrole=cluster-admin --user=
+```
-2019-02-05/15:43:33.787 worker :6088: c#2.140005649413888.: iscsi_read_pdu() EOF
+### Why does OpenEBS provisioner pod restart continuously?{#openebs-provisioner-restart-continuously}
-2019-02-05/15:43:33.787 sender :5852: s#2.140005632636672.: sender loop ended (2:18:43092)
+The following output displays the pod status of all namespaces in which the OpenEBS provisioner is restarting continuously.
-2019-02-05/15:43:33.788 worker :6292: c#2.140005649413888.: worker 2/-1/43092 end (c#2.140005649413888/s#2.140005632636672)
-2019-02-05/15:43:35.251 istgt_remove_conn :7039: c#0.140005771040512.: remove_conn->initiator:147.75.97.141(iqn.2019-02.net.packet:device.7c8ad781) Target: 10.233.109.82(dummy LU0) conn:0x7f55a4c18000:0 tsih:1 connections:0 IOPending=0
-2019-02-05/15:43:36.291 worker :5885: c#0.140005666154240.: con:0/14 [8d614b93:43094->10.233.45.100:3260,1]
-2019-02-05/15:43:36.540 istgt_iscsi_op_log:1923: c#0.140005666154240.: login failed, target not ready
+```
+NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
+default percona 0/1 Pending 0 36m
+kube-system calico-etcd-tl4td 1/1 Running 0 1h 192.168.56.65 master
+kube-system calico-kube-controllers-84fd4db7cd-jz9wt 1/1 Running 0 1h 192.168.56.65 master
+kube-system calico-node-node1 2/2 Running 0 1h 192.168.56.65 master
+kube-system calico-node-zt95x 2/2 Running 0 1h 192.168.56.66 node
+kube-system coredns-78fcdf6894-2test 1/1 Running 0 1h 192.168.219.65 master
+kube-system coredns-78fcdf6894-test7 1/1 Running 0 1h 192.168.219.66 master
+kube-system etcd-master 1/1 Running 0 1h 192.168.56.65 master
+kube-system kube-apiserver-master 1/1 Running 0 1h 192.168.56.65 master
+kube-system kube-controller-manager-master 1/1 Running 0 1h 192.168.56.65 master
+kube-system kube-proxy-9t98s 1/1 Running 0 1h 192.168.56.65 master
+kube-system kube-proxy-mwk9f 1/1 Running 0 1h 192.168.56.66 node
+kube-system kube-scheduler-master 1/1 Running 0 1h 192.168.56.65 master
+openebs maya-apiserver-5598cf68ff-pod17 1/1 Running 0 1h 192.168.167.131 node
+openebs openebs-provisioner-776846bbff-pod19 0/1 CrashLoopBackOff 16 1h 192.168.167.129 node
+openebs openebs-snapshot-operator-5b5f97dd7f-np79k 0/2 CrashLoopBackOff 32 1h 192.168.167.130 node
```
**Troubleshooting**
-The cause of high memory consumption of kubelet is mainly due to the following.
+Perform the following steps to verify if the issue is due to misconfiguration while installing the network component.
+
+1. Check if your network related pods are running fine.
+
+2. Check if OpenEBS provisioner HTTPS requests are reaching the apiserver
-There are 3 modules are involved - cstor-istgt, kubelet and iscsiInitiator(iscsiadm). kubelet runs iscsiadm command to do discovery on cstor-istgt. If there is any delay in receiving response of discovery opcode (either due to network or delay in processing on target side), iscsiadm retries few times, and, gets into infinite loop dumping error messages as below:
+3. Use the latest version of network provider images.
+
+4. Try other network components such as Calico, kube-router etc. if you are not using any of these.
+
+### OpenEBS installation fails on Azure {#install-failed-azure-no-rbac-set}
+
+On AKS, while installing OpenEBS using Helm, you may see the following error.
+
+```
+$ helm install openebs/openebs --name openebs --namespace openebs
+```
```shell hideCopy
-iscsiadm: Connection to Discovery Address 127.0.0.1 failed
-iscsiadm: failed to send SendTargets PDU
-iscsiadm: connection login retries (reopen_max) 5 exceeded
-iscsiadm: Connection to Discovery Address 127.0.0.1 failed
-iscsiadm: failed to send SendTargets PDU
+Error: release openebs failed: clusterroles.rbac.authorization.k8s.io "openebs" isforbidden: attempt to grant extra privileges:[PolicyRule{Resources:["nodes"], APIGroups:["*"],Verbs:["get"]} PolicyRule{Resources:["nodes"],APIGroups:["*"], Verbs:["list"]}PolicyRule{Resources:["nodes"], APIGroups:["*"],Verbs:["watch"]} PolicyRule{Resources:["nodes/proxy"],APIGroups:["*"], Verbs:["get"]}PolicyRule{Resources:["nodes/proxy"], APIGroups:["*"],Verbs:["list"]} PolicyRule{Resources:["nodes/proxy"],APIGroups:["*"], Verbs:["watch"]}PolicyRule{Resources:["namespaces"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["services"],APIGroups:["*"], Verbs:["*"]} PolicyRule{Resources:["pods"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["deployments"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["events"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["endpoints"], APIGroups:["*"],Verbs:["*"]} PolicyRule{Resources:["persistentvolumes"],APIGroups:["*"], Verbs:["*"]} PolicyRule{Resources:["persistentvolumeclaims"],APIGroups:["*"], Verbs:["*"]}PolicyRule{Resources:["storageclasses"],APIGroups:["storage.k8s.io"], Verbs:["*"]}PolicyRule{Resources:["storagepools"], APIGroups:["*"],Verbs:["get"]} PolicyRule{Resources:["storagepools"], APIGroups:["*"],Verbs:["list"]} PolicyRule{NonResourceURLs:["/metrics"],Verbs:["get"]}] user=&{system:serviceaccount:kube-system:tiller6f3172cc-4a08-11e8-9af5-0a58ac1f1729 [system:serviceaccounts system:serviceaccounts:kube-systemsystem:authenticated] map[]} ownerrules=[]ruleResolutionErrors=[clusterroles.rbac.authorization.k8s.io"cluster-admin" not found]
```
-kubelet keeps taking this response and accumulates the memory. More details can be seen [here](https://github.com/openebs/openebs/issues/2382).
+**Troubleshooting**
+
+You must enable RBAC on Azure before OpenEBS installation. For more details, see [Prerequisites](../user-guides/local-engine-user-guide/prerequisites.mdx).
+
+### A multipath.conf file claims all SCSI devices in OpenShift {#multipath-conf-claims-all-scsi-devices-openshift}
+
+A multipath.conf file without either find_multipaths or a manual blacklist claims all SCSI devices.
-**Workaround**
+#### Workaround
-Restart the corresponding istgt pod to avoid memory consumption.
+1. Add the find _multipaths line to_ \_/etc/multipath.conf\_ file similar to the following snippet.
-## Application and OpenEBS pods terminate/restart under heavy I/O load {#Pods-restart-terminate-when-heavy-load}
+ ```
+ defaults {
+ user_friendly_names yes
+ find_multipaths yes
+ }
+ ```
+
+2. Run `multipath -w /dev/sdc` command (replace the devname with your persistent devname).
+
+### Set Cluster-admin User Context
+
+For installation of OpenEBS, cluster-admin user context is a must. OpenEBS installs service accounts and custom resource definitions that are only allowed for cluster administrators.
+
+Use the `kubectl auth can-i` commands to verify that you have the cluster-admin context. You can use the following commands to verify if you have access:
+
+```
+kubectl auth can-i 'create' 'namespace' -A
+kubectl auth can-i 'create' 'crd' -A
+kubectl auth can-i 'create' 'sa' -A
+kubectl auth can-i 'create' 'clusterrole' -A
+```
+
+If there is no cluster-admin user context already present, create one and use it. Use the following command to create the new context.
+
+```
+kubectl config set-context NAME [--cluster=cluster_nickname] [--user=user_nickname] [--namespace=namespace]
+```
+
+Example:
+
+```
+kubectl config set-context admin-ctx --cluster=gke_strong-eon-153112_us-central1-a_rocket-test2 --user=cluster-admin
+```
+
+Set the existing cluster-admin user context or the newly created context by using the following command.
+
+Example:
+
+```
+kubectl config use-context admin-ctx
+```
+
+Kubernetes Related
+
+### Application and OpenEBS pods terminate/restart under heavy I/O load {#Pods-restart-terminate-when-heavy-load}
This is caused due to lack of resources on the Kubernetes nodes, which causes the pods to evict under loaded conditions as the node becomes _unresponsive_. The pods transition from _Running_ state to _unknown_ state followed by _Terminating_ before restarting again.
@@ -115,9 +256,9 @@ You can resolve this issue by upgrading the Kubernetes cluster infrastructure re
Others
-## Nodes in the cluster reboots frequently almost everyday in openSUSE CaaS {#reboot-cluster-nodes}
+### Nodes in the cluster reboots frequently almost everyday in openSUSE CaaS {#reboot-cluster-nodes}
-Setup the cluster using RKE with openSUSE CaaS MicroOS using CNI Plugin Cilium. Install OpenEBS, create a PVC and allocate to a fio job/ busybox. Run FIO test on the same. Observed nodes in the cluster getting restarted on a schedule basis.
+Setup the cluster using RKE with openSUSE CaaS MicroOS using CNI Plugin Cilium. Install OpenEBS, create a PVC and allocate to a fio job/busybox. Run FIO test on the same. Observed nodes in the cluster getting restarted on a schedule basis.
**Troubleshooting**
@@ -176,7 +317,7 @@ There are 2 possible solutions.
Approach1:
-DO the following on each nodes to stop the transactional update.
+Do the following on each nodes to stop the transactional update.
```
systemctl disable --now rebootmgr.service
@@ -189,6 +330,20 @@ Approach2:
Set the reboot timer schedule at different time i.e staggered at various interval of the day, so that only one nodes get rebooted at a time.
-## See Also:
+### How to fetch the OpenEBS Dynamic Local Provisioner logs?
+
+**Workaround:**
+
+Review the logs of the OpenEBS Local PV provisioner. OpenEBS Dynamic Local Provisioner logs can be fetched using.
+
+```
+kubectl logs -n openebs -l openebs.io/component-name=openebs-localpv-provisioner
+```
+
+## See Also
-[Troubleshooting Install](/docs/troubleshooting/install) [Troubleshooting Uninstall](/docs/troubleshooting/uninstall) [Troubleshooting NDM](/docs/troubleshooting/ndm) [Troubleshooting Jiva](/docs/troubleshooting/jiva) [Troubleshooting cStor](/docs/troubleshooting/cstor) [Troubleshooting Local PV](/docs/troubleshooting/localpv) [Troubleshooting Mayastor](/docs/troubleshooting/mayastor) [FAQs](/docs/additional-info/faqs) [Seek support or help](/docs/introduction/community) [Latest release notes](/docs/introduction/releases)
+[FAQs](../faqs/faqs.md)
+[Latest Release Notes](../releases.md)
+[OpenEBS Community](../community.md)
+[OpenEBS GitHub repository](https://github.com/openebs/openebs/issues)
+[StackOverflow under OpenEBS tag](https://stackoverflow.com/questions/tagged/openebs)
diff --git a/docs/main/troubleshooting/troubleshooting-replicated-engine.md b/docs/main/troubleshooting/troubleshooting-replicated-engine.md
index a560f70de..f0c0a54a7 100644
--- a/docs/main/troubleshooting/troubleshooting-replicated-engine.md
+++ b/docs/main/troubleshooting/troubleshooting-replicated-engine.md
@@ -1,13 +1,13 @@
---
-id: troubleshooting-re
+id: troubleshootingre
title: Troubleshooting - Replicated Engine
-slug: /troubleshooting
+slug: /troubleshootingre
keywords:
- OpenEBS
- OpenEBS troubleshooting
description: This page contains a list of OpenEBS related troubleshooting which contains information like troubleshooting installation, troubleshooting uninstallation, and troubleshooting replicated engine.
---
-# Basic Troubleshooting
+# Troubleshooting - Replicated Engine
## Logs
diff --git a/docs/main/user-guides/cstor/advanced-topic.md b/docs/main/user-guides/cstor/advanced-topic.md
deleted file mode 100644
index 0ce45decb..000000000
--- a/docs/main/user-guides/cstor/advanced-topic.md
+++ /dev/null
@@ -1,1180 +0,0 @@
----
-id: advanced
-title: cStor User Guide - Advanced
-keywords:
- - cStor csi
- - cStor User Guide
- - Scaling cStor pools
- - Cloning a cStor Snapshot
- - Cleaning up a cStor setup
- - Expanding a cStor volume
- - Tuning cStor Volumes
-description: This user guide of cStor contains advanced level of cStor related topics such as expanding a cStor volume, taking Snapshot and Clone of a cStor volume, scaling up cStor pools, Block Device Tagging, Tuning cStor Pools and Tuning cStor Volumes
----
-
-This user guide of cStor contains advanced level of cStor related topics such as expanding a cStor volume, taking Snapshot and Clone of a cStor volume, scaling up cStor pools, Block Device Tagging, Tuning cStor Pools and Tuning cStor Volumes
-
-- [Scaling up cStor pools](#scaling-cstor-pools)
-- [Snapshot and Clone of a cStor volume](#snapshot-and-clone-of-a-cstor-volume)
-- [Expanding a cStor volume](#expanding-a-cstor-volume)
-- [Block Device Tagging](#block-device-tagging)
-- [Tuning cStor Pools](#tuning-cstor-pools)
-- [Tuning cStor Volumes](#tuning-cstor-volumes)
-
-## Scaling cStor pools
-
-Once the cStor storage pools are created you can scale-up your existing cStor pool.
-To scale-up the pool size, you need to edit the CSPC YAML that was used for creation of CStorPoolCluster.
-
-Scaling up can done by two methods:
-
-1. [Adding new nodes(with new disks) to the existing CSPC](#adding-disk-new-node)
-2. [Adding new disks to existing nodes](#adding-disk-same-node)
-
-**Note:** The dataRaidGroupType: can either be set as stripe or mirror as per your requirement. In the following example it is configured as stripe.
-
-### Adding new nodes(with new disks) to the existing CSPC
-
-A new node spec needs to be added to previously deployed YAML,
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
- pools:
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-1"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-10ad9f484c299597ed1e126d7b857967"
- poolConfig:
- dataRaidGroupType: "stripe"
-
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-2"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b"
- poolConfig:
- dataRaidGroupType: "stripe"
-
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-3"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68"
- poolConfig:
- dataRaidGroupType: "stripe"
-
- # New node spec added -- to create a cStor pool on worker-3
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-4"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-02d9b2dc8954ce0347850b7625375e24"
- poolConfig:
- dataRaidGroupType: "stripe"
-
-```
-
-Now verify the status of CSPC and CSPI(s):
-
-```
-kubectl get cspc -n openebs
-```
-
-Sample Output:
-
-```shell hideCopyshell hideCopy
-NAME HEALTHYINSTANCES PROVISIONEDINSTANCES DESIREDINSTANCES AGE
-cspc-disk-pool 4 4 4 8m5s
-```
-
-```
-kubectl get cspi -n openebs
-```
-
-Sample Output:
-
-```shell hideCopyshell hideCopy
-NAME HOSTNAME FREE CAPACITY READONLY STATUS AGE
-cspc-disk-pool-d9zf worker-node-1 28800M 28800071k false ONLINE 7m50s
-cspc-disk-pool-lr6z worker-node-2 28800M 28800056k false ONLINE 7m50s
-cspc-disk-pool-x4b4 worker-node-3 28800M 28800056k false ONLINE 7m50s
-cspc-disk-pool-rt4k worker-node-4 28800M 28800056k false ONLINE 15s
-
-```
-
-As a result of this, we can see that a new pool have been added, increasing the number of pools to 4
-
-### Adding new disks to existing nodes
-
-A new `blockDeviceName` under `blockDevices` needs to be added to previously deployed YAML. Execute the following command to edit the CSPC,
-
-```
-kubectl edit cspc -n openebs cstor-disk-pool
-```
-
-Sample YAML:
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
- pools:
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-1"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-10ad9f484c299597ed1e126d7b857967"
- - blockDeviceName: "blockdevice-f036513d98f6c7ce31fd6e1ac3fad2f5" //# New blockdevice added
- poolConfig:
- dataRaidGroupType: "stripe"
-
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-2"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b"
- - blockDeviceName: "blockdevice-fb7c995c4beccd6c872b7b77aad32932" //# New blockdevice added
- poolConfig:
- dataRaidGroupType: "stripe"
-
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-3"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68"
- - blockDeviceName: "blockdevice-46ddda7223b35b81415b0a1b12e40bcb" //# New blockdevice added
- poolConfig:
- dataRaidGroupType: "stripe"
-
-```
-
-## Snapshot and Clone of a cStor Volume
-
-An OpenEBS snapshot is a set of reference markers for data at a particular point in time. A snapshot act as a detailed table of contents, with accessible copies of data that user can roll back to the required point of instance. Snapshots in OpenEBS are instantaneous and are managed through kubectl.
-
-During the installation of OpenEBS, a snapshot-controller and a snapshot-provisioner are setup which assist in taking the snapshots. During the snapshot creation, snapshot-controller creates VolumeSnapshot and VolumeSnapshotData custom resources. A snapshot-provisioner is used to restore a snapshot as a new Persistent Volume(PV) via dynamic provisioning.
-
-### Creating a cStor volume Snapshot
-
-1. Before proceeding to create a cStor volume snapshot and use it further for restoration, it is necessary to create a `VolumeSnapshotClass`. Copy the following YAML specification into a file called `snapshot_class.yaml`.
-
- ```
- kind: VolumeSnapshotClass
- apiVersion: snapshot.storage.k8s.io/v1
- metadata:
- name: csi-cstor-snapshotclass
- annotations:
- snapshot.storage.kubernetes.io/is-default-class: "true"
- driver: cstor.csi.openebs.io
- deletionPolicy: Delete
- ```
-
- The deletion policy can be set as `Delete or Retain`. When it is set to Retain, the underlying physical snapshot on the storage cluster is retained even when the VolumeSnapshot object is deleted.
- To apply, execute:
-
- ```
- kubectl apply -f snapshot_class.yaml
- ```
-
- **Note:** In clusters that only install `v1beta1` version of VolumeSnapshotClass as the supported version(eg. OpenShift(OCP) 4.5 ), the following error might be encountered.
-
- ```
- no matches for kind "VolumeSnapshotClass" in version "snapshot.storage.k8s.io/v1"
- ```
-
- In such cases, the apiVersion needs to be updated to `apiVersion: snapshot.storage.k8s.io/v1beta1`
-
-2. For creating the snapshot, you need to create a YAML specification and provide the required PVC name into it. The only prerequisite check is to be performed is to ensure that there is no stale entries of snapshot and snapshot data before creating a new snapshot. Copy the following YAML specification into a file called `snapshot.yaml`.
-
- ```
- apiVersion: snapshot.storage.k8s.io/v1
- kind: VolumeSnapshot
- metadata:
- name: cstor-pvc-snap
- spec:
- volumeSnapshotClassName: csi-cstor-snapshotclass
- source:
- persistentVolumeClaimName: cstor-pvc
- ```
-
- Run the following command to create the snapshot,
-
- ```
- kubectl create -f snapshot.yaml
- ```
-
- To list the snapshots, execute:
-
- ```
- kubectl get volumesnapshots -n default
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME AGE
- cstor-pvc-snap 10s
- ```
-
- A VolumeSnapshot is analogous to a PVC and is associated with a `VolumeSnapshotContent` object that represents the actual snapshot. To identify the VolumeSnapshotContent object for the VolumeSnapshot execute:
-
- ```
- kubectl describe volumesnapshots cstor-pvc-snap -n default
- ```
-
- Sample Output:
-
- ```shell hideCopy
- Name: cstor-pvc-snap
- Namespace: default
- .
- .
- .
- Spec:
- Snapshot Class Name: cstor-csi-snapshotclass
- Snapshot Content Name: snapcontent-e8d8a0ca-9826-11e9-9807-525400f3f660
- Source:
- API Group:
- Kind: PersistentVolumeClaim
- Name: cstor-pvc
- Status:
- Creation Time: 2020-06-20T15:27:29Z
- Ready To Use: true
- Restore Size: 5Gi
-
- ```
-
- The `SnapshotContentName` identifies the `VolumeSnapshotContent` object which serves this snapshot. The `Ready To Use` parameter indicates that the Snapshot has been created successfully and can be used to create a new PVC.
-
-**Note:** All cStor snapshots should be created in the same namespace of source PVC.
-
-### Cloning a cStor Snapshot
-
-Once the snapshot is created, you can use it to create a PVC. In order to restore a specific snapshot, you need to create a new PVC that refers to the snapshot. Below is an example of a YAML file that restores and creates a PVC from a snapshot.
-
-```
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: restore-cstor-pvc
-spec:
- storageClassName: cstor-csi-disk
- dataSource:
- name: cstor-pvc-snap
- kind: VolumeSnapshot
- apiGroup: snapshot.storage.k8s.io
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 5Gi
-```
-
-The `dataSource` shows that the PVC must be created using a VolumeSnapshot named `cstor-pvc-snap` as the source of the data. This instructs cStor CSI to create a PVC from the snapshot. Once the PVC is created, it can be attached to a pod and used just like any other PVC.
-
-To verify the creation of PVC execute:
-
-```
-kubectl get pvc
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
-restore-cstor-pvc Bound pvc-2f2d65fc-0784-11ea-b887-42010a80006c 5Gi RWO cstor-csi-disk 5s
-```
-
-## Expanding a cStor volume
-
-OpenEBS cStor introduces support for expanding a PersistentVolume using the CSI provisioner. Provided cStor is configured to function as a CSI provisioner, you can expand PVs that have been created by cStor CSI Driver. This feature is supported with Kubernetes versions 1.16 and above.
-
-For expanding a cStor PV, you must ensure the following items are taken care of:
-
-- The StorageClass must support volume expansion. This can be done by editing the StorageClass definition to set the allowVolumeExpansion: true.
-- To resize a PV, edit the PVC definition and update the spec.resources.requests.storage to reflect the newly desired size, which must be greater than the original size.
-- The PV must be attached to a pod for it to be resized. There are two scenarios when resizing an cStor PV:
- - If the PV is attached to a pod, cStor CSI driver expands the volume on the storage backend, re-scans the device and resizes the filesystem.
- - When attempting to resize an unattached PV, cStor CSI driver expands the volume on the storage backend. Once the PVC is bound to a pod, the driver re-scans the device and resizes the filesystem. Kubernetes then updates the PVC size after the expansion operation has successfully completed.
-
-Below example shows the way for expanding cStor volume and how it works. For an already existing StorageClass, you can edit the StorageClass to include the `allowVolumeExpansion: true` parameter.
-
-```
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: cstor-csi-disk
-provisioner: cstor.csi.openebs.io
-allowVolumeExpansion: true
-parameters:
- replicaCount: "3"
- cstorPoolCluster: "cspc-disk-pool"
- cas-type: "cstor"
-```
-
-For example an application busybox pod is using the below PVC associated with PV. To get the status of the pod, execute:
-
-```
-$ kubectl get pods
-```
-
-The following is a Sample Output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE
-busybox 1/1 Running 0 38m
-```
-
-To list PVCs, execute:
-
-```
-$ kubectl get pvc
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
-cstor-pvc Bound pvc-849bd646-6d3f-4a87-909e-2416d4e00904 5Gi RWO cstor-csi-disk 1d
-```
-
-To list PVs, execute:
-
-```
-$ kubectl get pv
-```
-
-Sample Output:
-
-```
-NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
-pvc-849bd646-6d3f-4a87-909e-2416d4e00904 5Gi RWO Delete Bound default/cstor-pvc cstor-csi-disk 40m
-```
-
-To resize the PV that has been created from 5Gi to 10Gi, edit the PVC definition and update the spec.resources.requests.storage to 10Gi. It may take a few seconds to update the actual size in the PVC resource, wait for the updated capacity to reflect in PVC status (pvc.status.capacity.storage). It is internally a two step process for volumes containing a file system:
-
-- Volume expansion
-- FileSystem expansion
-
-```
-$ kubectl edit pvc cstor-pvc
-```
-
-```
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- annotations:
- pv.kubernetes.io/bind-completed: "yes"
- pv.kubernetes.io/bound-by-controller: "yes"
- volume.beta.kubernetes.io/storage-provisioner: cstor.csi.openebs.io
- creationTimestamp: "2020-06-24T12:22:24Z"
- finalizers:
- - kubernetes.io/pvc-protection
- name: cstor-pvc
- namespace: default
- resourceVersion: "766"
- selfLink: /api/v1/namespaces/default/persistentvolumeclaims/cstor-pvc
- uid: 849bd646-6d3f-4a87-909e-2416d4e00904
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 10Gi
-```
-
-Now, we can validate the resize has worked correctly by checking the size of the PVC, PV, or describing the pvc to get all events.
-
-```
-$ kubectl describe pvc cstor-pvc
-```
-
-```shell hideCopy
-Name: cstor-pvc
-Namespace: default
-StorageClass: cstor-csi-disk
-Status: Bound
-Volume: pvc-849bd646-6d3f-4a87-909e-2416d4e00904
-Labels:
-Annotations: pv.kubernetes.io/bind-completed: yes
- pv.kubernetes.io/bound-by-controller: yes
- volume.beta.kubernetes.io/storage-provisioner: cstor.csi.openebs.io
-Finalizers: [kubernetes.io/pvc-protection]
-Capacity: 10Gi
-Access Modes: RWO
-VolumeMode: Filesystem
-Mounted By: busybox-cstor
-Events:
- Type Reason Age From Message
- ---- ------ ---- ---- -------
- Normal ExternalProvisioning 46m (x2 over 46m) persistentvolume-controller waiting for a volume to be created, either by external provisioner "cstor.csi.openebs.io" or manually created by system administrator
- Normal Provisioning 46m cstor.csi.openebs.io_openebs-cstor-csi-controller-0_bcba3893-c1c4-4e86-aee4-de98858ec0b7 External provisioner is provisioning volume for claim "default/claim-csi-123"
- Normal ProvisioningSucceeded 46m cstor.csi.openebs.io_openebs-cstor-csi-controller-0_bcba3893-c1c4-4e86-aee4-de98858ec0b7 Successfully provisioned volume pvc-849bd646-6d3f-4a87-909e-2416d4e00904
- Warning ExternalExpanding 93s volume_expand Ignoring the PVC: didn't find a plugin capable of expanding the volume; waiting for an external controller to process this PVC.
- Normal Resizing 93s external-resizer cstor.csi.openebs.io External resizer is resizing volume pvc-849bd646-6d3f-4a87-909e-2416d4e00904
- Normal FileSystemResizeRequired 88s external-resizer cstor.csi.openebs.io Require file system resize of volume on node
- Normal FileSystemResizeSuccessful 4s kubelet, 127.0.0.1 MountVolume.NodeExpandVolume succeeded for volume "pvc-849bd646-6d3f-4a87-909e-2416d4e00904"
-```
-
-```
-$ kubectl get pvc
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
-cstor-pvc Bound pvc-849bd646-6d3f-4a87-909e-2416d4e00904 10Gi RWO cstor-csi-disk 1d
-```
-
-```
-$ kubectl get pv
-```
-
-Sample Output:
-
-```
-NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
-pvc-849bd646-6d3f-4a87-909e-2416d4e00904 10Gi RWO Delete Bound default/cstor-pvc cstor-csi-disk 40m
-```
-
-## Block Device Tagging
-
-NDM provides you with an ability to reserve block devices to be used for specific applications via adding tag(s) to your block device(s). This feature can be used by cStor operators to specify the block devices which should be consumed by cStor pools and conversely restrict anyone else from using those block devices. This helps in protecting against manual errors in specifying the block devices in the CSPC yaml by users.
-
-1. Consider the following block devices in a Kubernetes cluster, they will be used to provision a storage pool. List the labels added to these block devices,
-
-```
-kubectl get bd -n openebs --show-labels
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME NODENAME SIZE CLAIMSTATE STATUS AGE LABELS
-blockdevice-00439dc464b785256242113bf0ef64b9 worker-node-3 21473771008 Unclaimed Active 34h kubernetes.io/hostname=worker-node-3,ndm.io/blockdevice-type=blockdevice,ndm.io/managed=true
-blockdevice-022674b5f97f06195fe962a7a61fcb64 worker-node-1 21473771008 Unclaimed Active 34h kubernetes.io/hostname=worker-node-1,ndm.io/blockdevice-type=blockdevice,ndm.io/managed=true
-blockdevice-241fb162b8d0eafc640ed89588a832df worker-node-2 21473771008 Unclaimed Active 34h kubernetes.io/hostname=worker-node-2,ndm.io/blockdevice-type=blockdevice,ndm.io/managed=true
-
-```
-
-2. Now, to understand how block device tagging works we will be adding `openebs.io/block-device-tag=fast` to the block device attached to worker-node-3 _(i.e blockdevice-00439dc464b785256242113bf0ef64b9)_
-
-```
-kubectl label bd blockdevice-00439dc464b785256242113bf0ef64b9 -n openebs openebs.io/block-device-tag=fast
-```
-
-```
-kubectl get bd -n openebs blockdevice-00439dc464b785256242113bf0ef64b9 --show-labels
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME NODENAME SIZE CLAIMSTATE STATUS AGE LABELS
-blockdevice-00439dc464b785256242113bf0ef64b9 worker-node-3 21473771008 Unclaimed Active 34h kubernetes.io/hostname=worker-node-3,ndm.io/blockdevice-type=blockdevice,ndm.io/managed=true,openebs.io/block-device-tag=fast
-```
-
-Now, provision cStor pools using the following CSPC YAML. Note, `openebs.io/allowed-bd-tags:` is set to `cstor, ssd` which ensures the CSPC will be created using the block devices that either have the label set to cstor or ssd, or have no such label.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
-name: cspc-disk-pool
-namespace: openebs
-annotations:
- # This annotation helps to specify the BD that can be allowed.
- openebs.io/allowed-bd-tags: cstor,ssd
-spec:
-pools:
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-1"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-022674b5f97f06195fe962a7a61fcb64"
- poolConfig:
- dataRaidGroupType: "stripe"
-- nodeSelector:
- kubernetes.io/hostname: "worker-node-2"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-241fb162b8d0eafc640ed89588a832df"
- poolConfig:
- dataRaidGroupType: "stripe"
-- nodeSelector:
- kubernetes.io/hostname: "worker-node-3"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-00439dc464b785256242113bf0ef64b9"
- poolConfig:
- dataRaidGroupType: "stripe"
-```
-
-Apply the above CSPC file for CSPIs to get created and check the CSPI status.
-
-```
-kubectl apply -f cspc.yaml
-```
-
-```
-kubectl get cspi -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME HOSTNAME FREE CAPACITY READONLY PROVISIONEDREPLICAS HEALTHYREPLICAS STATUS AGE
-cspc-stripe-b9f6 worker-node-2 19300M 19300614k false 0 0 ONLINE 89s
-cspc-stripe-q7xn worker-node-1 19300M 19300614k false 0 0 ONLINE 89s
-
-```
-
-Note that CSPI for node **worker-node-3** is not created because:
-
-- CSPC YAML created above has `openebs.io/allowed-bd-tags: cstor, ssd` in its annotation. Which means that the CSPC operator will only consider those block devices for provisioning that either do not have a BD tag, openebs.io/block-device-tag, on the block device or have the tag with the values set as `cstor or ssd`.
-- In this case, the blockdevice-022674b5f97f06195fe962a7a61fcb64 (on node worker-node-1) and blockdevice-241fb162b8d0eafc640ed89588a832df (on node worker-node-2) do not have the label. Hence, no restrictions are applied on it and they can be used as the CSPC operator for pool provisioning.
-- For blockdevice-00439dc464b785256242113bf0ef64b9 (on node worker-node-3), the label `openebs.io/block-device-tag` has the value fast. But on the CSPC, the annotation openebs.io/allowed-bd-tags has value cstor and ssd. There is no fast keyword present in the annotation value and hence this block device cannot be used.
-
-**NOTE:**
-
-1. To allow multiple tag values, the bd tag annotation can be written in the following comma-separated manner:
-
-```
- openebs.io/allowed-bd-tags: fast,ssd,nvme
-```
-
-2. BD tag can only have one value on the block device CR. For example,
- - openebs.io/block-device-tag: fast
- Block devices should not be tagged in a comma-separated format. One of the reasons for this is, cStor allowed bd tag annotation takes comma-separated values and values like(i.e fast, ssd ) can never be interpreted as a single word in cStor and hence BDs tagged in above format cannot be utilised by cStor.
-3. If any block device mentioned in CSPC has an empty value for `the openebs.io/block-device-tag`, then those block devices will not be considered for pool provisioning and other operations. Block devices with empty tag value are implicitly not allowed by the CSPC operator.
-
-## Tuning cStor Pools
-
-Allow users to set available performance tunings in cStor Pools based on their workload. cStor pool(s) can be tuned via CSPC and is the recommended way to do it. Below are the tunings that can be applied:
-
-**Resource requests and limits:** This ensures high quality of service when applied for the pool manager containers.
-
-**Toleration for pool manager pod:** This ensures scheduling of pool pods on the tainted nodes.
-
-**Set priority class:** Sets the priority levels as required.
-
-**Compression:** This helps in setting the compression for cStor pools.
-
-**ReadOnly threshold:** Helps in specifying read only thresholds for cStor pools.
-
-**Example configuration for Resource and Limits:**
-
-Following CSPC YAML specifies resources and auxResources that will get applied to all pool manager pods for the CSPC. Resources get applied to cstor-pool containers and auxResources gets applied to sidecar containers i.e. cstor-pool-mgmt and pool-exporter.
-
-In the following CSPC YAML we have only one pool spec (@spec.pools). It is also possible to override the resource and limit value for a specific pool.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
- resources:
- requests:
- memory: "2Gi"
- cpu: "250m"
- limits:
- memory: "4Gi"
- cpu: "500m"
-
- auxResources:
- requests:
- memory: "500Mi"
- cpu: "100m"
- limits:
- memory: "1Gi"
- cpu: "200m"
- pools:
- - nodeSelector:
- kubernetes.io/hostname: worker-node-1
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
-
- poolConfig:
- dataRaidGroupType: mirror
-```
-
-Following CSPC YAML explains how the resource and limits can be overridden. If you look at the CSPC YAML, there are no resources and auxResources specified at pool level for worker-node-1 and worker-node-2 but specified for worker-node-3. In this case, for worker-node-1 and worker-node-2 the resources and auxResources will be applied from @spec.resources and @spec.auxResources respectively but for worker-node-3 these will be applied from @spec.pools[2].poolConfig.resources and @spec.pools[2].poolConfig.auxResources respectively.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
- resources:
- requests:
- memory: "64Mi"
- cpu: "250m"
- limits:
- memory: "128Mi"
- cpu: "500m"
- auxResources:
- requests:
- memory: "50Mi"
- cpu: "400m"
- limits:
- memory: "100Mi"
- cpu: "400m"
- pools:
- - nodeSelector:
- kubernetes.io/hostname: worker-node-1
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
- poolConfig:
- dataRaidGroupType: mirror
-
- - nodeSelector:
- kubernetes.io/hostname: worker-node-2
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f39
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f40
- poolConfig:
- dataRaidGroupType: mirror
-
- - nodeSelector:
- kubernetes.io/hostname: worker-node-3
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f42
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f43
- poolConfig:
- dataRaidGroupType: mirror
- resources:
- requests:
- memory: 70Mi
- cpu: 300m
- limits:
- memory: 130Mi
- cpu: 600m
- auxResources:
- requests:
- memory: 60Mi
- cpu: 500m
- limits:
- memory: 120Mi
- cpu: 500m
-
-```
-
-**Example configuration for Tolerations:**
-
-Tolerations are applied in a similar manner like resources and auxResources. The following is a sample CSPC YAML that has tolerations specified. For worker-node-1 and worker-node-2 tolerations are applied form @spec.tolerations but for worker-node-3 it is applied from @spec.pools[2].poolConfig.tolerations
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
-
- tolerations:
- - key: data-plane-node
- operator: Equal
- value: true
- effect: NoSchedule
-
- pools:
- - nodeSelector:
- kubernetes.io/hostname: worker-node-1
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
-
- poolConfig:
- dataRaidGroupType: mirror
-
- - nodeSelector:
- kubernetes.io/hostname: worker-node-2
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f39
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f40
-
- poolConfig:
- dataRaidGroupType: mirror
-
- - nodeSelector:
- kubernetes.io/hostname: worker-node-3
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f42
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f43
-
- poolConfig:
- dataRaidGroupType: mirror
- tolerations:
- - key: data-plane-node
- operator: Equal
- value: true
- effect: NoSchedule
-
- - key: apac-zone
- operator: Equal
- value: true
- effect: NoSchedule
-```
-
-**Example configuration for Priority Class:**
-
-Priority Classes are also applied in a similar manner like resources and auxResources. The following is a sample CSPC YAML that has a priority class specified. For worker-node-1 and worker-node-2 priority classes are applied from @spec.priorityClassName but for worker-node-3 it is applied from @spec.pools[2].poolConfig.priorityClassName. Check more info about [priorityclass](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass).
-
-**Note:**
-
-1. Priority class needs to be created beforehand. In this case, high-priority and ultra-priority priority classes should exist.
-2. The index starts from 0 for @.spec.pools list.
-
- ```
- apiVersion: cstor.openebs.io/v1
- kind: CStorPoolCluster
- metadata:
- name: cstor-disk-pool
- namespace: openebs
- spec:
-
- priorityClassName: high-priority
-
- pools:
- - nodeSelector:
- kubernetes.io/hostname: worker-node-1
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
-
- poolConfig:
- dataRaidGroupType: mirror
-
- - nodeSelector:
- kubernetes.io/hostname: worker-node-2
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f39
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f40
-
- poolConfig:
- dataRaidGroupType: mirror
-
- - nodeSelector:
- kubernetes.io/hostname: worker-node-3
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f42
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f43
-
- poolConfig:
- dataRaidGroupType: mirror
- priorityClassName: utlra-priority
- ```
-
- **Example configuration for Compression:**
-
- Compression values can be set at **pool level only**. There is no override mechanism like it was there in case of tolerations, resources, auxResources and priorityClass. Compression value must be one of
-
- - on
- - off
- - lzjb
- - gzip
- - gzip-[1-9]
- - zle
- - lz4
-
-**Note:** lz4 is the default compression algorithm that is used if the compression field is left unspecified on the cspc. Below is the sample yaml which has compression specified.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
- pools:
- - nodeSelector:
- kubernetes.io/hostname: worker-node-1
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
-
- poolConfig:
- dataRaidGroupType: mirror
- compression: lz4
-```
-
-**Example configuration for Read Only Threshold:**
-
-RO threshold can be set in a similar manner like compression. ROThresholdLimit is the threshold(percentage base) limit for pool read only mode. If ROThresholdLimit (%) amount of pool storage is consumed then the pool will be set to readonly. If ROThresholdLimit is set to 100 then entire pool storage will be used. By default it will be set to 85% i.e when unspecified on the CSPC. ROThresholdLimit value will be 0 < ROThresholdLimit <= 100. Following CSPC yaml has the ReadOnly Threshold percentage specified.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-csi-disk
- namespace: openebs
-spec:
- pools:
- - nodeSelector:
- kubernetes.io/hostname: worker-node-1
-
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f36
- - blockDeviceName: blockdevice-ada8ef910929513c1ad650c08fbe3f37
-
- poolConfig:
- dataRaidGroupType: mirror
-
- roThresholdLimit : 70
-```
-
-## Tuning cStor Volumes
-
-Similar to tuning of the cStor Pool cluster, there are possible ways for tuning cStor volumes. cStor volumes can be provisioned using different policy configurations. However, `cStorVolumePolicy` needs to be created first. It must be created prior to creation of StorageClass as `CStorVolumePolicy` name needs to be specified to provision cStor volume based on configured policy. A sample StorageClass YAML that utilises `cstorVolumePolicy` is given below for reference:
-
-```
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: cstor-csi-disk
-provisioner: cstor.csi.openebs.io
-allowVolumeExpansion: true
-parameters:
- replicaCount: "1"
- cstorPoolCluster: "cstor-disk-pool"
- cas-type: "cstor"
- fsType: "xfs" // default type is ext4
- cstorVolumePolicy: "csi-volume-policy"
-```
-
-If the volume policy is not created before volume provisioning and needs to be modified later,
-it can be changed by editing the cStorVolumeConfig(CVC) resource as per volume bases which will be reconciled by the CVC controller to the respected volume resources.
-Each PVC creation request will create a CStorVolumeConfig(cvc) resource which can be used to manage volume, its policies and any supported operations (like, Scale up/down), per volume bases.
-To edit, execute:
-
-```
-kubectl edit cvc -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumeConfig
-metadata:
- annotations:
- openebs.io/persistent-volume-claim: "cstor-pvc"
- openebs.io/volume-policy: csi-volume-policy
- openebs.io/volumeID: pvc-25e79ecb-8357-49d4-83c2-2e63ebd66278
- creationTimestamp: "2020-07-22T11:36:13Z"
- finalizers:
- - cvc.openebs.io/finalizer
- generation: 3
- labels:
- cstor.openebs.io/template-hash: "3278395555"
- openebs.io/cstor-pool-cluster: cstor-disk-pool
- name: pvc-25e79ecb-8357-49d4-83c2-2e63ebd66278
- namespace: openebs
- resourceVersion: "1283"
- selfLink: /apis/cstor.openebs.io/v1/namespaces/openebs/cstorvolumeconfigs/pvc-25e79ecb-8357-49d4-83c2-2e63ebd66278
- uid: 389320d8-5f0b-439d-8ef2-59f4d01b393a
-publish:
- nodeId: 127.0.0.1
-spec:
- capacity:
- storage: 1Gi
- cstorVolumeRef:
- apiVersion: cstor.openebs.io/v1
- kind: CStorVolume
- name: pvc-25e79ecb-8357-49d4-83c2-2e63ebd66278
- namespace: openebs
- resourceVersion: "1260"
- uid: ea6e09f2-1e65-41ab-820a-ed1ecd14873c
- policy:
- provision:
- replicaAffinity: true
- replica:
- zvolWorkers: "1"
- replicaPoolInfo:
- - poolName: cstor-disk-pool-vn92
- target:
- affinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: openebs.io/target-affinity
- operator: In
- values:
- - percona
- namespaces:
- - default
- topologyKey: kubernetes.io/hostname
- auxResources:
- limits:
- cpu: 500m
- memory: 128Mi
- requests:
- cpu: 250m
- memory: 64Mi
- luWorkers: 8
- priorityClassName: system-cluster-critical
- queueDepth: "16"
- resources:
- limits:
- cpu: 500m
- memory: 128Mi
- requests:
- .
- .
- .
-```
-
-The list of policies that can be configured are as follows:
-
-- [Replica Affinity to create a volume replica on specific pool](#replica-affinity)
-
-- [Volume Target Pod Affinity](#volume-target-pod-affinity)
-
-- [Volume Tunable](#volume-tunable)
-
-- [Memory and CPU Resources QoS](#memory-and-cpu-qos)
-
-- [Toleration for target pod to ensure scheduling of target pods on tainted nodes {#toleration-for-target-pod}](#toleration-for-target-pod)
-
-- [Priority class for volume target deployment](#priority-class-for-volume-target-deployment)
-
-### Replica Affinity to create a volume replica on specific pool {#replica-affinity}
-
-For StatefulSet applications, to distribute single replica volume on specific cStor pool we can use replicaAffinity enabled scheduling. This feature should be used with delay volume binding i.e. `volumeBindingMode: WaitForFirstConsumer` in StorageClass. When `volumeBindingMode` is set to `WaitForFirstConsumer` the csi-provisioner waits for the scheduler to select a node. The topology of the selected node will then be set as the first entry in preferred list and will be used by the volume controller to create the volume replica on the cstor pool scheduled on preferred node.
-
-```
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: cstor-csi-disk
-provisioner: cstor.csi.openebs.io
-allowVolumeExpansion: true
-volumeBindingMode: WaitForFirstConsumer
-parameters:
- replicaCount: "1"
- cstorPoolCluster: "cstor-disk-pool"
- cas-type: "cstor"
- cstorVolumePolicy: "csi-volume-policy" // policy created with replicaAffinity set to true
-```
-
-The `replicaAffinity` spec needs to be enabled via volume policy before provisioning the volume
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumePolicy
-metadata:
- name: csi-volume-policy
- namespace: openebs
-spec:
- provision:
- replicaAffinity: true
-```
-
-### Volume Target Pod Affinity
-
-The Stateful workloads access the OpenEBS storage volume by connecting to the Volume Target Pod. Target Pod Affinity policy can be used to co-locate volume target pod on the same node as the workload. This feature makes use of the Kubernetes Pod Affinity feature that is dependent on the Pod labels.
-For this labels need to be added to both, Application and volume Policy.
-Given below is a sample YAML of `CStorVolumePolicy` having target-affinity label using `kubernetes.io/hostname` as a topologyKey in CStorVolumePolicy:
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumePolicy
-metadata:
- name: csi-volume-policy
- namespace: openebs
-spec:
- target:
- affinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: openebs.io/target-affinity
- operator: In
- values:
- - fio-cstor // application-unique-label
- topologyKey: kubernetes.io/hostname
- namespaces: ["default"] // application namespace
-```
-
-Set the label configured in volume policy, openebs.io/target-affinity: fio-cstor , on the app pod which will be used to find pods, by label, within the domain defined by topologyKey.
-
-```
-apiVersion: v1
-kind: Pod
-metadata:
- name: fio-cstor
- namespace: default
- labels:
- name: fio-cstor
- openebs.io/target-affinity: fio-cstor
-```
-
-### Volume Tunable
-
-Performance tunings based on the workload can be set using Volume Policy. The list of tunings that can be configured are given below:
-
-- **queueDepth:** This limits the ongoing IO count from iscsi client on Node to cStor target pod. The default value for this parameter is set at 32.
-- **luworkers:** cStor target IO worker threads, sets the number of threads that are working on QueueDepth queue. The default value for this parameter is set at 6. In case of better number of cores and RAM, this value can be 16, which means 16 threads will be running for each volume.
-- **zvolWorkers:** cStor volume replica IO worker threads, defaults to the number of cores on the machine. In case of better number of cores and RAM, this value can be 16.
-
-Given below is a sample YAML that has the above parameters configured.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumePolicy
-metadata:
- name: csi-volume-policy
- namespace: openebs
-spec:
- replica:
- zvolWorkers: "4"
- target:
- luWorkers: 6
- queueDepth: "32"
-```
-
-**Note:** These Policy tunable configurations can be changed for already provisioned volumes by editing the corresponding volume CStorVolumeConfig resources.
-
-### Memory and CPU Resources QoS
-
-CStorVolumePolicy can also be used to configure the volume Target pod resource requests and limits to ensure QoS. Given below is a sample YAML that configures the target container's resource requests and limits, and auxResources configuration for the sidecar containers.
-
-_To know more about Resource configuration in Kubernetes, [click here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/)_.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumePolicy
-metadata:
- name: csi-volume-policy
- namespace: openebs
-spec:
- target:
- resources:
- requests:
- memory: "64Mi"
- cpu: "250m"
- limits:
- memory: "128Mi"
- cpu: "500m"
- auxResources:
- requests:
- memory: "64Mi"
- cpu: "250m"
- limits:
- memory: "128Mi"
- cpu: "500m"
-```
-
-**Note:** These resource configuration(s) can be changed, for provisioned volumes, by editing the CStorVolumeConfig resource on per volume level.
-
-An example to patch an already existing `CStorVolumeConfig` resource is given below,
-Create a file, say patch-resources-cvc.yaml, that contains the changes and apply the patch on the resource.
-
-```
-spec:
- policy:
- target:
- resources:
- limits:
- cpu: 500m
- memory: 128Mi
- requests:
- cpu: 250m
- memory: 64Mi
- auxResources:
- limits:
- cpu: 500m
- memory: 128Mi
- requests:
- cpu: 250m
- memory: 64Mi
-```
-
-To apply the patch,
-
-```
-kubectl patch cvc -n openebs -p "$(cat patch-resources-cvc.yaml)" pvc-0478b13d-b1ef-4cff-813e-8d2d13bcb316 --type merge
-```
-
-### Toleration for target pod to ensure scheduling of target pods on tainted nodes {#toleration-for-target-pod}
-
-This Kubernetes feature allows users to taint the node. This ensures no pods are be scheduled to it, unless a pod explicitly tolerates the taint. This Kubernetes feature can be used to reserve nodes for specific pods by adding labels to the desired node(s).
-
-One such scenario where the above tunable can be used is: all the volume specific pods, to operate flawlessly, have to be scheduled on nodes that are reserved for storage.
-
-Sample YAML:
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumePolicy
-metadata:
- name: csi-volume-policy
- namespace: openebs
-spec:
- replica: {}
- target:
- tolerations:
- - key: "key1"
- operator: "Equal"
- value: "value1"
- effect: "NoSchedule"
-```
-
-### Priority class for volume target deployment
-
-Priority classes can help in controlling the Kubernetes schedulers decisions to favor higher priority pods over lower priority pods. The Kubernetes scheduler can even preempt lower priority pods that are running, so that pending higher priority pods can be scheduled. Setting pod priority also prevents lower priority workloads from impacting critical workloads in the cluster, especially in cases where the cluster starts to reach its resource capacity.
-_To know more about PriorityClasses in Kubernetes, [click here](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)_.
-
-**Note:** Priority class needs to be created before volume provisioning.
-
-Given below is a sample CStorVolumePolicy YAML which utilises priority class.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorVolumePolicy
-metadata:
- name: csi-volume-policy
- namespace: openebs
-spec:
- provision:
- replicaAffinity: true
- target:
- priorityClassName: "storage-critical"
-```
diff --git a/docs/main/user-guides/cstor/clean-up.md b/docs/main/user-guides/cstor/clean-up.md
deleted file mode 100644
index 717a01681..000000000
--- a/docs/main/user-guides/cstor/clean-up.md
+++ /dev/null
@@ -1,155 +0,0 @@
----
-id: clean-up
-title: cStor User Guide - Clean Up
-keywords:
- - cStor csi
- - cStor User Guide
- - Cleaning up a cStor setup
-description: This user guide will help you in cleaning up your cStor setup.
----
-
-This user guide will help you in cleaning up your cStor setup.
-
-### Clean up
-
-- [Cleaning up a cStor setup](#cleaning-up-a-cstor-setup)
-
-## Cleaning up a cStor setup
-
-Follow the steps below to cleanup of a cStor setup. On successful cleanup you can reuse the cluster's disks/block devices for other storage engines.
-
-1. Delete the application or deployment which uses CSI based cStor CAS engine. In this example we are going to delete the Busybox application that was deployed previously. To delete, execute:
-
- ```
- kubectl delete pod
- ```
-
- Example command:
-
-```
- kubectl delete busybox
-```
-
-Verify that the application pod has been deleted
-
-```
-kubectl get pods
-```
-
-Sample Output:
-
-```shell hideCopy
-No resources found in default namespace.
-```
-
-2. Next, delete the corresponding PVC attached to the application. To delete PVC, execute:
-
- ```
- kubectl delete pvc
- ```
-
- Example command:
-
- ```
- kubectl delete pvc cstor-pvc
- ```
-
- Verify that the application-PVC has been deleted.
-
- ```
- kubectl get pvc
- ```
-
- Sample Output:
-
- ```shell hideCopy
- No resources found in default namespace.
- ```
-
-3. Delete the corresponding StorageClass used by the application PVC.
-
- ```
- kubectl delete sc
- ```
-
- Example command:
-
- ```
- kubectl delete sc cstor-csi-disk
- ```
-
- To verify that the StorageClass has been deleted, execute:
-
- ```
- kubectl get sc
- ```
-
- Sample Output:
-
- ```shell hideCopy
- No resources found
- ```
-
-4. The blockdevices used to create CSPCs will currently be in claimed state. To get the blockdevice details, execute:
-
- ```
- kubectl get bd -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME NODENAME SIZE CLAIMSTATE STATUS AGE
- blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68 worker-node-3 21474836480 Claimed Active 2m10s
- blockdevice-10ad9f484c299597ed1e126d7b857967 worker-node-1 21474836480 Claimed Active 2m17s
- blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b worker-node-2 21474836480 Claimed Active 2m12s
- ```
-
- To get these blockdevices to unclaimed state delete the associated CSPC. To delete, execute:
-
- ```
- kubectl delete cspc -n openebs
- ```
-
- Example command:
-
- ```
- kubectl delete cspc cstor-disk-pool -n openebs
- ```
-
- Verify that the CSPC and CSPIs have been deleted.
-
- ```
- kubectl get cspc -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- No resources found in openebs namespace.
- ```
-
- ```
- kubectl get cspi -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- No resources found in openebs namespace.
- ```
-
- Now, the blockdevices must be unclaimed state. To verify, execute:
-
- ```
- kubectl get bd -n openebs
- ```
-
- Sample output:
-
- ```shell hideCopy
- NAME NODENAME SIZE CLAIMSTATE STATUS AGE
- blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68 worker-node-3 21474836480 Unclaimed Active 21m10s
- blockdevice-10ad9f484c299597ed1e126d7b857967 worker-node-1 21474836480 Unclaimed Active 21m17s
- blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b worker-node-2 21474836480 Unclaimed Active 21m12s
- ```
diff --git a/docs/main/user-guides/cstor/install-and-setup.md b/docs/main/user-guides/cstor/install-and-setup.md
deleted file mode 100644
index b4f3f5549..000000000
--- a/docs/main/user-guides/cstor/install-and-setup.md
+++ /dev/null
@@ -1,267 +0,0 @@
----
-id: install-and-setup
-title: cStor User Guide - install and setup
-slug: /user-guides/cstor
-keywords:
- - cStor csi
- - cStor User Guide
- - Creating cStor storage pools
- - Creating cStor storage classes
-description: This user guide will help you to configure cStor storage and use cStor Volumes for running your stateful workloads.
----
-
-This user guide will help you to configure cStor storage and use cStor Volumes for running your stateful workloads.
-
-:::note
-If you are an existing user of cStor and have [setup cStor storage using StoragePoolClaim(SPC)](/concepts/cstor), we strongly recommend you to migrate to using CStorPoolCluster(CSPC). CSPC based cStor uses Kubernetes CSI Driver, provides additional flexibility in how devices are used by cStor and has better resiliency against node failures. For detailed instructions, refer to the [cStor SPC to CSPC migration guide](https://github.com/openebs/upgrade/blob/master/docs/migration.md).
-:::
-
-### Install and Setup
-
-- [Pre-requisites](#prerequisites)
-- [Creating cStor storage pools](#creating-cstor-storage-pools)
-- [Creating cStor storage classes](#creating-cstor-storage-classes)
-
-## Prerequisites
-
-- cStor uses the raw block devices attached to the Kubernetes worker nodes to create cStor Pools. Applications will connect to cStor volumes using `iSCSI`. This requires you ensure the following:
-
- - There are raw (unformatted) block devices attached to the Kubernetes worker nodes. The devices can be either direct attached devices (SSD/HDD) or cloud volumes (GPD, EBS)
- - `iscsi` utilities are installed on all the worker nodes where Stateful applications will be launched. The steps for setting up the iSCSI utilities might vary depending on your Kubernetes distribution. Please see [prerequisites verification](/user-guides/prerequisites)
-
-- If you are setting up OpenEBS in a new cluster. You can use one of the following steps to install OpenEBS. If OpenEBS is already installed, skip this step.
-
- Using helm,
-
- ```
- helm repo add openebs https://openebs.github.io/charts
- helm repo update
- helm install openebs --namespace openebs openebs/openebs --set cstor.enabled=true --create-namespace
- ```
-
- The above command will install all the default OpenEBS components along with cStor.
-
- Using kubectl,
-
- ```
- kubectl apply -f https://openebs.github.io/charts/cstor-operator.yaml
- ```
-
- The above command will install all the required components for running cStor.
-
-- Enable cStor on already existing OpenEBS
-
- Using helm, you can enable cStor on top of your openebs installation as follows:
-
- ```
- helm ls -n openebs
- # Note the release name used for OpenEBS
- # Upgrade the helm by enabling cStor
- # helm upgrade [helm-release-name] [helm-chart-name] flags
- helm upgrade openebs openebs/openebs --set cstor.enabled=true --reuse-values --namespace openebs
- ```
-
- Using kubectl,
-
- ```
- kubectl apply -f https://openebs.github.io/charts/cstor-operator.yaml
- ```
-
-- Verify cStor and NDM pods are running in your cluster.
-
- To get the status of the pods execute:
-
- ```
- kubectl get pod -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME READY STATUS RESTARTS AGE
- cspc-operator-5fb7db848f-wgnq8 1/1 Running 0 6d7h
- cvc-operator-7f7d8dc4c5-sn7gv 1/1 Running 0 6d7h
- openebs-cstor-admission-server-7585b9659b-rbkmn 1/1 Running 0 6d7h
- openebs-cstor-csi-controller-0 6/6 Running 0 6d7h
- openebs-cstor-csi-node-dl58c 2/2 Running 0 6d7h
- openebs-cstor-csi-node-jmpzv 2/2 Running 0 6d7h
- openebs-cstor-csi-node-tfv45 2/2 Running 0 6d7h
- openebs-ndm-gctb7 1/1 Running 0 6d7h
- openebs-ndm-operator-7c8759dbb5-58zpl 1/1 Running 0 6d7h
- openebs-ndm-sfczv 1/1 Running 0 6d7h
- openebs-ndm-vgdnv 1/1 Running 0 6d7h
- ```
-
-- Nodes must have disks attached to them. To get the list of attached block devices, execute:
-
- ```
- kubectl get bd -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME NODENAME SIZE CLAIMSTATE STATUS AGE
- blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68 worker-node-3 21474836480 Unclaimed Active 2m10s
- blockdevice-10ad9f484c299597ed1e126d7b857967 worker-node-1 21474836480 Unclaimed Active 2m17s
- blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b worker-node-2 21474836480 Unclaimed Active 2m12s
- ```
-
-## Creating cStor storage pools
-
-You will need to create a Kubernetes custom resource called **CStorPoolCluster**, specifying the details of the nodes and the devices on those nodes that must be used to setup cStor pools. You can start by copying the following **Sample CSPC yaml** into a file named `cspc.yaml` and modifying it with details from your cluster.
-
-```
-apiVersion: cstor.openebs.io/v1
-kind: CStorPoolCluster
-metadata:
- name: cstor-disk-pool
- namespace: openebs
-spec:
- pools:
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-1"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-10ad9f484c299597ed1e126d7b857967"
- poolConfig:
- dataRaidGroupType: "stripe"
-
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-2"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b"
- poolConfig:
- dataRaidGroupType: "stripe"
-
- - nodeSelector:
- kubernetes.io/hostname: "worker-node-3"
- dataRaidGroups:
- - blockDevices:
- - blockDeviceName: "blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68"
- poolConfig:
- dataRaidGroupType: "stripe"
-```
-
-- Get all the node labels present in the cluster with the following command, these node labels will be required to modify the CSPC yaml.
-
- ```
- kubectl get node --show-labels
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME STATUS ROLES AGE VERSION LABELS
-
- master Ready master 5d2h v1.20.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/master=
-
- worker-node-1 Ready 5d2h v1.20.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node-1,kubernetes.io/os=linux
-
- worker-node-2 Ready 5d2h v1.20.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node-2,kubernetes.io/os=linux
-
- worker-node-3 Ready 5d2h v1.18.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node-3,kubernetes.io/os=linux
- ```
-
-- Modify the CSPC yaml to use the worker nodes. Use the value from labels `kubernetes.io/hostname=< node_name >`. This label value and node name could be different in some platforms. In this case, the label values and node names are: `kubernetes.io/hostname:"worker-node-1"`, `kubernetes.io/hostname: "worker-node-2"` and `kubernetes.io/hostname: "worker-node-3"`.
-
-- Modify CSPC yaml file to specify the block device attached to the above selected node where the pool is to be provisioned. You can use the following command to get the available block devices on each of the worker node:
-
- ```
- kubectl get bd -n openebs
- ```
-
- Sample Output:
-
- ```
- NAME NODENAME SIZE CLAIMSTATE STATUS AGE
- blockdevice-01afcdbe3a9c9e3b281c7133b2af1b68 worker-node-3 21474836480 Unclaimed Active 2m10s
- blockdevice-10ad9f484c299597ed1e126d7b857967 worker-node-1 21474836480 Unclaimed Active 2m17s
- blockdevice-3ec130dc1aa932eb4c5af1db4d73ea1b worker-node-2 21474836480 Unclaimed Active 2m12s
- ```
-
-- The `dataRaidGroupType:` can either be set as `stripe` or `mirror` as per your requirement. In the following example it is configured as `stripe`.
-
- We have named the configuration YAML file as `cspc.yaml`. Execute the following command for CSPC creation,
-
- ```
- kubectl apply -f cspc.yaml
- ```
-
- To verify the status of created CSPC, execute:
-
- ```
- kubectl get cspc -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME HEALTHYINSTANCES PROVISIONEDINSTANCES DESIREDINSTANCES AGE
- cstor-disk-pool 3 3 3 2m2s
- ```
-
- Check if the pool instances report their status as **ONLINE** using the below command:
-
- ```
- kubectl get cspi -n openebs
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME HOSTNAME ALLOCATED FREE CAPACITY STATUS AGE
- cstor-disk-pool-vn92 worker-node-1 60k 9900M 9900M ONLINE 2m17s
- cstor-disk-pool-al65 worker-node-2 60k 9900M 9900M ONLINE 2m17s
- cstor-disk-pool-y7pn worker-node-3 60k 9900M 9900M ONLINE 2m17s
- ```
-
- Once all the pods are in running state, these pool instances can be used for creation of cStor volumes.
-
-## Creating cStor storage classes
-
-StorageClass definition is an important task in the planning and execution of OpenEBS storage. The real power of CAS architecture is to give an independent or a dedicated storage engine like cStor for each workload, so that granular policies can be applied to that storage engine to tune the behaviour or performance as per the workload's need.
-
-Steps to create a cStor StorageClass
-
-1. Decide the CStorPoolCluster for which you want to create a Storage Class. Let us say you pick up `cstor-disk-pool` that you created in the above step.
-2. Decide the replicaCount based on your requirement/workloads. OpenEBS doesn't restrict the replica count to set, but a **maximum of 5** replicas are allowed. It depends how users configure it, but for the availability of volumes **at least (n/2 + 1) replicas** should be up and connected to the target, where n is the replicaCount. The Replica Count should be always less than or equal to the number of cStor Pool Instances(CSPIs). The following are some example cases:
- - If a user configured replica count as 2, then always 2 replicas should be available to perform operations on volume.
- - If a user configured replica count as 3 it should require at least 2 replicas should be available for volume to be operational.
- - If a user configured replica count as 5 it should require at least 3 replicas should be available for volume to be operational.
-3. Create a YAML spec file `cstor-csi-disk.yaml` using the template given below. Update the pool, replica count and other policies. By using this sample configuration YAML, a StorageClass will be created with 3 OpenEBS cStor replicas and will configure themselves on the pool instances.
-
- ```
- kind: StorageClass
- apiVersion: storage.k8s.io/v1
- metadata:
- name: cstor-csi-disk
- provisioner: cstor.csi.openebs.io
- allowVolumeExpansion: true
- parameters:
- cas-type: cstor
- # cstorPoolCluster should have the name of the CSPC
- cstorPoolCluster: cstor-disk-pool
- # replicaCount should be <= no. of CSPI created in the selected CSPC
- replicaCount: "3"
- ```
-
- To deploy the YAML, execute:
-
- ```
- kubectl apply -f cstor-csi-disk.yaml
- ```
-
- To verify, execute:
-
- ```
- kubectl get sc
- ```
-
- Sample Output:
-
- ```shell hideCopy
- NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
- cstor-csi cstor.csi.openebs.io Delete Immediate true 4s
- ```
diff --git a/docs/main/user-guides/cstor/launch-sample-application.md b/docs/main/user-guides/cstor/launch-sample-application.md
deleted file mode 100644
index ba6de0951..000000000
--- a/docs/main/user-guides/cstor/launch-sample-application.md
+++ /dev/null
@@ -1,100 +0,0 @@
----
-id: launch-sample-application
-title: cStor User Guide - Deploying a sample application
-keywords:
- - cStor csi
- - cStor User Guide
- - Deploying a sample application
-description: This user guide will guide you in deploying your sample application in a cStor setup.
----
-
-This user guide will guide you in deploying your sample application in a cStor setup.
-
-### Launch Sample Application
-
-- [Deploying a sample application](#deploying-a-sample-application)
-
-## Deploying a sample application
-
-To deploy a sample application using the above created CSPC and StorageClass, a PVC, that utilises the created StorageClass, needs to be deployed. Given below is an example YAML for a PVC which uses the SC created earlier.
-
-```
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: cstor-pvc
-spec:
- storageClassName: cstor-csi-disk
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 5Gi
-```
-
-Apply the above PVC yaml to dynamically create volume and verify that the PVC has been successfully created and bound to a PersistentVolume (PV).
-
-```
-kubectl get pvc
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
-cstor-pvc Bound pvc-f1383b36-2d4d-4e9f-9082-6728d6c55bd1 5Gi RWO cstor-csi-disk 12s
-```
-
-Now, to deploy an application using the above created PVC specify the `claimName` parameter under `volumes`.
-
-Given below is a sample busybox application YAML that uses the PVC created earlier.
-
-```
-apiVersion: v1
-kind: Pod
-metadata:
- name: busybox
- namespace: default
-spec:
- containers:
- - command:
- - sh
- - -c
- - 'date >> /mnt/openebs-csi/date.txt; hostname >> /mnt/openebs-csi/hostname.txt; sync; sleep 5; sync; tail -f /dev/null;'
- image: busybox
- imagePullPolicy: Always
- name: busybox
- volumeMounts:
- - mountPath: /mnt/openebs-csi
- name: demo-vol
- volumes:
- - name: demo-vol
- persistentVolumeClaim:
- claimName: cstor-pvc
-```
-
-Apply the above YAML.
-Verify that the pod is running and is able to write data to the volume.
-
-```
-kubectl get pods
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE
-busybox 1/1 Running 0 97s
-```
-
-The example busybox application will write the current date into the mounted path, i.e, _/mnt/openebs-csi/date.txt_ when it starts. To verify, exec into the busybox container.
-
-```
-kubectl exec -it busybox -- cat /mnt/openebs-csi/date.txt
-```
-
-Sample Output:
-
-```shell hideCopy
-Fri May 28 05:00:31 UTC 2021
-```
diff --git a/docs/main/user-guides/cstor/troubleshooting.md b/docs/main/user-guides/cstor/troubleshooting.md
deleted file mode 100644
index cb8102386..000000000
--- a/docs/main/user-guides/cstor/troubleshooting.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-id: troubleshooting
-title: cStor User Guide - troubleshooting
-keywords:
- - cStor csi
- - cStor User Guide
- - cStor troubleshooting
-description: This user guide will help you in troubleshooting cStor.
----
-
-This user guide will help you in troubleshooting cStor.
-
-### Troubleshooting
-
-- [Troubleshooting cStor setup](#troubleshooting)
-
-## Troubleshooting
-
-- The volumes remains in `Init` state, even though pool pods are running. This can happen due to the pool pods failing to connect to Kubernetes API server. Check the logs of cstor pool pods. Restarting the pool pod can fix this issue. This is seen to happen in cases where cstor control plane is deleted and re-installed, while the pool pods were running.
diff --git a/docs/main/user-guides/jiva/jiva-install.md b/docs/main/user-guides/jiva/jiva-install.md
deleted file mode 100644
index d0194b0e5..000000000
--- a/docs/main/user-guides/jiva/jiva-install.md
+++ /dev/null
@@ -1,84 +0,0 @@
----
-id: jiva-install
-title: Install and setup
-keywords:
- - Jiva install and setup
----
-This user guide will help you to configure Jiva and use Jiva Volumes for running your stateful workloads.
-
-### Installing Jiva
-
-- To install the latest Jiva release, execute:
-
- ```
-kubectl apply -f https://openebs.github.io/charts/jiva-operator.yaml
- ```
-
-- Next, verify that the Jiva operator and CSI pods are running on your cluster.
- To get the status of the pods execute:
- ```
- kubectl get pod -n openebs
- ```
-
- Sample Output:
- ```
-NAME READY STATUS RESTARTS AGE
-jiva-operator-7765cbfffd-vt787 1/1 Running 0 10s
-openebs-localpv-provisioner-57b44f4664-klsrw 1/1 Running 0 118s
-openebs-jiva-csi-controller-0 4/4 Running 0 6m14s
-openebs-jiva-csi-node-56t5g 2/2 Running 0 6m13s
-openebs-jiva-csi-node-xtyhu 2/2 Running 0 6m20s
-openebs-jiva-csi-node-h2unk 2/2 Running 0 6m20s
- ```
-
-### Provisioning Jiva volumes
-
-- The Jiva volume policies need to be defined before creation of a Jiva volume. Given below is a sample Jiva volume policy CR.
- ```
- apiVersion: openebs.io/v1alpha1
- kind: JivaVolumePolicy
- metadata:
- name: example-jivavolumepolicy
- namespace: openebs
- spec:
- replicaSC: openebs-hostpath
- target:
- # This sets the number of replicas for high-availability
- # replication factor <= no. of (CSI) nodes
- replicationFactor: 3
- # disableMonitor: false
- # auxResources:
- # tolerations:
- # resources:
- # affinity:
- # nodeSelector:
- # priorityClassName:
- # replica:
- # tolerations:
- # resources:
- # affinity:
- # nodeSelector:
- # priorityClassName:
- ```
-
-
-:::note
-By default, the volume data is stored at /var/openebs/ on the worker nodes, to change this behavior, a new replicaSC needs to be created.
-:::
-
-### Creating storage classes
-
-A storage class specifying the above `JivaVolumePolicy` needs to be created. This will be used to dynamically provision Jiva volumes.
-
-```
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: openebs-jiva-csi-sc
-provisioner: jiva.csi.openebs.io
-allowVolumeExpansion: true
-parameters:
- cas-type: "jiva"
- policy: "example-jivavolumepolicy"
-```
-
diff --git a/docs/main/user-guides/jiva/jiva-launch.md b/docs/main/user-guides/jiva/jiva-launch.md
deleted file mode 100644
index 372f7c255..000000000
--- a/docs/main/user-guides/jiva/jiva-launch.md
+++ /dev/null
@@ -1,107 +0,0 @@
----
-id: jiva-launch
-title: Launch
-keywords:
- - Jiva Applications
- - Jiva Launch
----
-This user guide will guide you in deploying your sample application in a Jiva setup.
-
-### Deploying a sample application
-
-To deploy a sample application using the previously created StorageClass, a PVC, that utilises the created StorageClass, needs to be deployed. Given below is an example YAML for a PVC which uses the SC created earlier.
-
-```
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: example-jiva-csi-pvc
-spec:
- storageClassName: openebs-jiva-csi-sc
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 4Gi
-```
-
-Apply the above PVC yaml to dynamically create volume and verify that the PVC has been successfully created and bound to a PersistentVolume (PV).
-
-```
-kubectl get pvc
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
-example-jiva-csi-pvc Bound pvc-ffc1e885-0122-4b5b-9d36-ae131717a77b 4Gi RWO openebs-jiva-csi-sc 1m
-```
-
-Also, verify if volume is ready to serve IOs.
-
-Sample Command:
-```
-kubectl get jivavolume pvc-ffc1e885-0122-4b5b-9d36-ae131717a77b -n openebs
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME REPLICACOUNT PHASE STATUS
-pvc-ffc1e885-0122-4b5b-9d36-ae131717a77b 1 Ready RW
-```
-
-Now, to deploy an application using the above created PVC specify the `claimName` parameter under volumes.
-
-```
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: busybox
- labels:
- app: busybox
- spec:
- replicas: 1
- strategy:
- type: RollingUpdate
- selector:
- matchLabels:
- app: busybox
- template:
- metadata:
- labels:
- app: busybox
- spec:
- containers:
- - resources:
- limits:
- cpu: 0.5
- name: busybox
- image: busybox
- command: ['sh', '-c', 'echo Container 1 is Running ; sleep 3600']
- imagePullPolicy: IfNotPresent
- ports:
- - containerPort: 3306
- name: busybox
- volumeMounts:
- - mountPath: /var/lib/mysql
- name: demo-vol1
- volumes:
- - name: demo-vol1
- persistentVolumeClaim:
- claimName: example-jiva-csi-pvc
-```
-
-Apply the above YAML. Verify that the pod is running.
-
-```
-kubectl get pods
-```
-
-Sample Output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE
-busybox 1/1 Running 0 97s
-```
\ No newline at end of file
diff --git a/docs/main/user-guides/jiva/jiva-prerequisites.md b/docs/main/user-guides/jiva/jiva-prerequisites.md
deleted file mode 100644
index 48ce93049..000000000
--- a/docs/main/user-guides/jiva/jiva-prerequisites.md
+++ /dev/null
@@ -1,101 +0,0 @@
----
-id: jiva-prerequisites
-title: Jiva User Guide
-keywords:
- - Jiva User Guide
- - Jiva prerequisites
----
-
-
-For details of how Jiva works, see [Jiva overview page](/concepts/jiva)
-
-Jiva is a light weight storage engine that is recommended to use for low capacity workloads. The snapshot and storage management features of the other cStor engine are more advanced and is recommended when snapshots are a need.
-
-## Prerequisites
-
-1. Kubernetes version 1.18 or higher.
-2. iSCSI initiator utils installed on all the worker nodes
-(Click here to view commands to install iSCSI on different OS)
-
-
-3. Access to install RBAC components into kube-system namespace.
-4. OpenEBS localpv-hostpath version 2.6.0 or higher.
- ```
- kubectl apply -f https://openebs.github.io/charts/hostpath-operator.yaml
- ```
- Sample hostpath storage class
- ```
- #Sample storage classes for OpenEBS Local PV
-apiVersion: storage.k8s.io/v1
-kind: StorageClass
-metadata:
- name: openebs-hostpath
- annotations:
- openebs.io/cas-type: local
- cas.openebs.io/config: |
- # hostpath type will create a PV by
- # creating a sub-directory under the
- # BASEPATH provided below.
- - name: StorageType
- value: "hostpath"
- # Specify the location (directory) where
- # where PV(volume) data will be saved.
- # A sub-directory with pv-name will be
- # created. When the volume is deleted,
- # the PV sub-directory will be deleted.
- #Default value is /var/openebs/local
- - name: BasePath
- value: "/var/openebs/local/"
-provisioner: openebs.io/local
-volumeBindingMode: WaitForFirstConsumer
-reclaimPolicy: Delete
- ```
-
-
-
-## See Also:
-
-[Understanding Jiva](/concepts/jiva)
-
diff --git a/docs/main/user-guides/local-engine-user-guide/additional-information/alphafeatures.md b/docs/main/user-guides/local-engine-user-guide/additional-information/alphafeatures.md
index aeb896750..1ff0547e5 100644
--- a/docs/main/user-guides/local-engine-user-guide/additional-information/alphafeatures.md
+++ b/docs/main/user-guides/local-engine-user-guide/additional-information/alphafeatures.md
@@ -9,7 +9,7 @@ keywords:
description: This page provides an overview of OpenEBS components and features that are present in the Alpha version and are under active development. These features are not recommended to be used in production.
---
-This page provides an overview of OpenEBS components and features presently in Alpha version and under active development. These features are not recommended to be used in production. We suggest you to familiarize and try these features on test clusters and reach out to [OpenEBS Community](/introduction/community) if you have any queries, feedback or need help on these features.
+This section provides an overview of OpenEBS components and features available in the Alpha version and under active development. These features are not recommended to be used in production. We suggest you to familiarize and try these features on test clusters and reach out to [OpenEBS Community](../../../community.md) if you have any queries, feedback or need help on these features.
The list of alpha features include:
- [CSI Driver for Local PV - Device](#csi-driver-for-local-pv-device)
@@ -22,47 +22,27 @@ The list of alpha features include:
Upgrade is not supported for features in Alpha version.
:::
-## CSI Driver for Local PV - Device
-
-OpenEBS is developing a CSI driver for provisioning Local PVs that are backed by block devices.
-
-For additional details and detailed instructions on how to get started with OpenEBS Local PV - Device CSI Driver please refer this [Quickstart guide](https://github.com/openebs/device-localpv).
-
-
-## Dynamic NFS Provisioner
-
-OpenEBS is developing a dynamic NFS PV provisioner that can setup a new NFS server on top of any block storage.
-
-For additional details and detailed instructions on how to get started with OpenEBS NFS PV provisioner please refer this [Quickstart guide](https://github.com/openebs/dynamic-nfs-provisioner).
-
## OpenEBS CLI
OpenEBS is developing a kubectl plugin for openebs called `openebsctl` that can help perform administrative tasks on OpenEBS volumes and pools.
-For additional details and detailed instructions on how to get started with OpenEBS CLI please refer this [Quickstart guide](https://github.com/openebs/openebsctl).
+For additional details and detailed instructions on how to get started with OpenEBS CLI, see [here](https://github.com/openebs/openebsctl).
## OpenEBS Monitoring Add-on
-OpenEBS is developing a monitoring add-on package that can be installed via helm or kubectl for setting up a default prometheus, grafana and alert manager stack. The package also will include default service monitors, dashboards and alert rules.
+OpenEBS is developing a monitoring add-on package that can be installed via helm for setting up a default prometheus, grafana, and alert manager stack. The package also will include default service monitors, dashboards, and alert rules.
-For additional details and detailed instructions on how to get started with OpenEBS Monitoring Add-on please refer this [Quickstart guide](https://github.com/openebs/monitoring).
+For additional details and detailed instructions on how to get started with OpenEBS Monitoring Add-on, see [here](https://github.com/openebs/monitoring).
## Data Populator
-The Data populator can be used to load seed data into a Kubernetes persistent volume from another such volume. The data populator internally uses Rsync, which is a volume populator having the ability to create a volume from any rsync source.
+The Data populator can be used to load seed data into a Kubernetes persistent volume from another such volume. The data populator internally uses Rsync, which is a volume populator having the ability to create a volume from any rsync source.
-### Use cases
+### Use Cases
1. Decommissioning of a node in the cluster: In scenarios where a Kubernetes node needs to be decommissioned whether for upgrade or maintenance, a data populator can be used to migrate the data saved in the local storage of the node, that has to be decommissioned.
2. Loading seed data to Kubernetes volumes: Data populator can be used to scale applications without using read-write many operation. The application can be pre-populated with the static content available in an existing PV.
-To get more details about Data Populator, [click here](https://github.com/openebs/data-populator#data-populator).
-
-For instructions on the installation and usage of Data Populator, please refer to this [Quickstart guide](https://github.com/openebs/data-populator#install).
-
-
-
-
-
-
+To get more details about Data Populator, see [here](https://github.com/openebs/data-populator#data-populator).
+For instructions on the installation and usage of Data Populator, see [here](https://github.com/openebs/data-populator#install).
diff --git a/docs/main/user-guides/local-engine-user-guide/additional-information/backupandrestore.md b/docs/main/user-guides/local-engine-user-guide/additional-information/backupandrestore.md
new file mode 100644
index 000000000..3baece4ab
--- /dev/null
+++ b/docs/main/user-guides/local-engine-user-guide/additional-information/backupandrestore.md
@@ -0,0 +1,99 @@
+---
+id: backupandrestore
+title: Backup and Restore
+keywords:
+ - Backup
+ - Restore
+ - Backup and Restore
+description: This section explains how to backup and restore local engines.
+---
+
+## Backup and Restore
+
+OpenEBS Local Volumes can be backed up and restored along with the application using [Velero](https://velero.io).
+
+:::note
+The following steps assume that you already have Velero with Restic integration is configured. If not, follow the [Velero Documentation](https://velero.io/docs/) to proceed with install and setup of Velero. If you encounter any issues or have questions, talk to us on the [#openebs channel on the Kubernetes Slack server](https://kubernetes.slack.com/messages/openebs/).
+:::
+
+### Backup
+
+The following steps will help you to prepare and backup the data from the volume created for the example pod (`hello-local-hostpath-pod`), with the volume mount (`local-storage`).
+
+1. Prepare the application pod for backup. Velero uses Kubernetes labels to select the pods that need to be backed up. Velero uses annotation on the pods to determine which volumes need to be backed up. For the example pod launched above, you can inform Velero to backup by specifying the following label and annotation.
+
+ ```
+ kubectl label pod hello-local-hostpath-pod app=test-velero-backup
+ kubectl annotate pod hello-local-hostpath-pod backup.velero.io/backup-volumes=local-storage
+ ```
+2. Create a Backup using Velero.
+
+ ```
+ velero backup create bbb-01 -l app=test-velero-backup
+ ```
+
+3. Verify that backup is successful.
+
+ ```
+ velero backup describe bbb-01 --details
+ ```
+
+ On successful completion of the backup, the output of the backup describe command will show the following:
+ ```shell hideCopy
+ ...
+ Restic Backups:
+ Completed:
+ default/hello-local-hostpath-pod: local-storage
+ ```
+
+### Restore
+
+1. Install and setup Velero, with the same provider where backups were saved. Verify that backups are accessible.
+
+ ```
+ velero backup get
+ ```
+
+ The output of should display the backups that were taken successfully.
+ ```shell hideCopy
+ NAME STATUS CREATED EXPIRES STORAGE LOCATION SELECTOR
+ bbb-01 Completed 2020-04-25 15:49:46 +0000 UTC 29d default app=test-velero-backup
+ ```
+
+2. Restore the application.
+
+ :::note
+ Local PVs are created with node affinity. As the node names will change when a new cluster is created, create the required PVC(s) prior to proceeding with restore.
+ :::
+
+ Replace the path to the PVC yaml in the below commands, with the PVC that you have created.
+ ```
+ kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pvc.yaml
+ velero restore create rbb-01 --from-backup bbb-01 -l app=test-velero-backup
+ ```
+
+3. Verify that application is restored.
+
+ ```
+ velero restore describe rbb-01
+ ```
+
+ Depending on the data, it may take a while to initialize the volume. On successful restore, the output of the above command should show:
+ ```shell hideCopy
+ ...
+ Restic Restores (specify --details for more information):
+ Completed: 1
+ ```
+
+4. Verify that data has been restored. The application pod used in this example, write periodic messages (greetings) to the volume.
+
+ ```
+ kubectl exec hello-local-hostpath-pod -- cat /mnt/store/greet.txt
+ ```
+
+ The output will show that backed up data as well as new greetings that started appearing after application pod was restored.
+ ```shell hideCopy
+ Sat Apr 25 15:41:30 UTC 2020 [hello-local-hostpath-pod] Hello from OpenEBS Local PV.
+ Sat Apr 25 15:46:30 UTC 2020 [hello-local-hostpath-pod] Hello from OpenEBS Local PV.
+ Sat Apr 25 16:11:25 UTC 2020 [hello-local-hostpath-pod] Hello from OpenEBS Local PV.
+ ```
diff --git a/docs/main/user-guides/local-engine-user-guide/additional-information/k8supgrades.md b/docs/main/user-guides/local-engine-user-guide/additional-information/k8supgrades.md
index 2094e49a9..f4e415a4e 100644
--- a/docs/main/user-guides/local-engine-user-guide/additional-information/k8supgrades.md
+++ b/docs/main/user-guides/local-engine-user-guide/additional-information/k8supgrades.md
@@ -9,17 +9,17 @@ keywords:
description: Kubernetes upgrades do need to happen to new features that roll out and to get minimum requirements satisfied for the applications upgrade running on Kubernetes.
---
-There are few reasons why nodes in a Kubernetes cluster get rebooted
+There are few reasons why nodes in a Kubernetes cluster get rebooted:
- Kubernetes upgrades do need to happen to new features that roll out and to get minimum requirements satisfied for the applications upgrade running on Kubernetes. The upgrade process of Kubernetes cluster involves upgrading the nodes one by one. This process may involve rebooting of the nodes of the cluster.
-- Kubernetes nodes go through hardware changes
+- Kubernetes nodes go through hardware changes.
-### Volume replica quorum requirement
+### Volume Replica Quorum Requirement
In either case, when the nodes are rebooted, the OpenEBS volume targets lose access to the replicas hosted on that node. OpenEBS volume replicas need to be in quorum for the volume to be online. When a Kubernetes node is rebooted, and the node comes back online, the rebuilding process of the volume replicas may take few minutes. If the other node is rebooted before the volume replicas are completely rebuilt, the volume replicas may loose quorum and the corresponding volumes may be marked read-only, which results in the unavailability of data to the application.
It is recommended that before a Kubernetes node is rebooted, make sure all the replicas of all OpenEBS volumes are healthy/online and there is no rebuild process is ongoing.
-## See Also:
+## See Also
-[Seeking help](/introduction/community)
+[Seeking help](../../../community.md)
diff --git a/docs/main/user-guides/local-engine-user-guide/additional-information/kb.md b/docs/main/user-guides/local-engine-user-guide/additional-information/kb.md
index bc5136fb4..9817e767d 100644
--- a/docs/main/user-guides/local-engine-user-guide/additional-information/kb.md
+++ b/docs/main/user-guides/local-engine-user-guide/additional-information/kb.md
@@ -10,36 +10,8 @@ description: The knowledge base include summaries, manuals, troubleshooting guid
---
-## Summary
-
-[How do I reuse an existing PV - after re-creating Kubernetes StatefulSet and its PVC](#reuse-pv-after-recreating-sts)
-
-[How to scale up Jiva replica?](#how-to-scale-up-jiva-replica)
-
-[How to install OpenEBS in OpenShift 4.x?](#openshift-install)
-
-[How to enable Admission-Controller in OpenShift environment?](#enable-admission-controller-in-openshift)
-
-[How to setup default PodSecurityPolicy to allow the OpenEBS pods to work with all permissions?](#how-to-setup-default-podsecuritypolicy-to-allow-the-openebs-pods-to-work-with-all-permissions)
-
-[How to prevent container logs from exhausting disk space?](#enable-log-rotation-on-cluster-nodes)
-
-[How to create a BlockDeviceClaim for a particular BlockDevice?](#create-bdc-for-a-blockdevice)
-
-[How to provision Local PV on K3OS?](#provision-localpv-on-k3os)
-
-[How to make cStor volume online if 2 replicas of 3 are lost?](#how-to-make-cstor-volume-online-if-replicas-2-of-are-lost)
-
-[How to reconstruct data from healthy replica to replaced one?](#how-to-reconstruct-data-from-healthy-replica-to-replaced-ones)
-
-[How to verify whether cStor volume is running fine?](#how-to-verify-whether-cstor-volume-is-running-fine)
-
-[Expanding Jiva Storage Volumes](#expanding-jiva-storage-volumes)
-
-
### How do I reuse an existing PV - after re-creating Kubernetes StatefulSet and its PVC {#reuse-pv-after-recreating-sts}
-
There are some cases where it had to delete the StatefulSet and re-install a new StatefulSet. In the process you may have to delete the PVCs used by the StatefulSet and retain PV policy by ensuring the Retain as the "Reclaim Policy". In this case, following are the procedures for re-using an existing PV in your StatefulSet application.
1. Get the PV name by following command and use it in Step 2.
@@ -151,84 +123,6 @@ There are some cases where it had to delete the StatefulSet and re-install a new
[Go to top](#top)
-### How to scale up Jiva replica?
-
-
-From 0.9.0 OpenEBS version, Jiva pod deployment are scheduling with nodeAffinity. For scaling up Jiva replica count, the following steps has to be performed.
-
-1. Get the deployment details of replica of corresponding Jiva volume using the following command. If it is deployed in `openebs` namespace, use corresponding namespace appropriately in the following commands.
-
- ```
- kubectl get deploy
- ```
-
- Following is an example output.
-
- ```shell hideCopy
- NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
- percona 1 1 1 1 54s
- pvc-4cfacfdd-76d7-11e9-9319-42010a800230-ctrl 1 1 1 1 53s
- pvc-4cfacfdd-76d7-11e9-9319-42010a800230-rep 1 1 1 1 53s
- ```
-
-2. Edit the corresponding replica deployment of the Jiva volume using the following command.
-
- ```
- kubectl edit deploy
- ```
-
- **Example:**
-
- ```
- kubectl edit deploy pvc-4cfacfdd-76d7-11e9-9319-42010a800230-rep
- ```
-
- Perform Step 3 and 4 and then save the changes. It is required to modify the fields of replica count and hostname details where the replica pods has to be scheduled.
-
-3. Edit `replicas` value under `spec` with the required number. In this example, it was `replicas: 1` during the initial deployment. With following change, replicas count will change to 2.
-
- **Example:**
-
- ```
- replicas: 2
- ```
-
-4. Add corresponding hostnames under value in `spec.template.spec.affinity.nodeAffinity.nodeSelectorTerms.key.values`. The following is the sample snippet for adding the required hostnames. In the following snippet, it is added the hostname of second node in the mentioned path.
-
- ```
- spec:
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: kubernetes.io/hostname
- operator: In
- values:
- - gke-md-jiva-default-pool-15a2475b-bxr5
- - gke-md-jiva-default-pool-15a2475b-gzx3
- ```
-
-5. After modifying the above changes, save the configuration. With this change , new replica pods will be running and following command will get the details of replica pods.
-
- ```
- kubectl get pod -o wide
- ```
-
- The following is an example output.
-
- ```shell hideCopy
- NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
- percona-66b4fd4ddf-xvswn 1/1 Running 0 32m
- pvc-4cfacfdd-76d7-11e9-9319-42010a800230-ctrl-68d94478df-drj6r 2/2 Running 0 32m
- pvc-4cfacfdd-76d7-11e9-9319-42010a800230-rep-f9ff69c6-6lcfz 1/1 Running 0 25s
- pvc-4cfacfdd-76d7-11e9-9319-42010a800230-rep-f9ff69c6-9jbfm 1/1 Running 0 25s
- ```
-
-[Go to top](#top)
-
-
-
### How to install OpenEBS in OpenShift 4.x {#openshift-install}
#### Tested versions
@@ -543,7 +437,7 @@ the node to show up as `Not Ready` until the daemon has restarted successfully.
### How to create a BlockDeviceClaim for a particular BlockDevice? {#create-bdc-for-a-blockdevice}
-There are certain use cases where the user does not need some of the BlockDevices discovered by OpenEBS to be used by any of the storage engines (cStor, LocalPV, etc.). In such scenarios, users can manually create a BlockDeviceClaim to claim that particular BlockDevice, so that it won't be used by cStor or Local PV. The following steps can be used to claim a particular BlockDevice:
+There are certain use cases where the user does not need some of the BlockDevices discovered by OpenEBS to be used by any of the storage engines. In such scenarios, users can manually create a BlockDeviceClaim to claim that particular BlockDevice, so that it won't be used by Local PV. The following steps can be used to claim a particular BlockDevice:
1. Download the BDC CR YAML from `node-disk-manager` repository.
@@ -667,9 +561,7 @@ The detailed information of each steps are provided below.
```
NAME PROVISIONER AGE
- openebs-device openebs.io/local 57m
openebs-hostpath openebs.io/local 57m
- openebs-jiva-default openebs.io/provisioner-iscsi 57m
openebs-snapshot-promoter volumesnapshot.external-storage.k8s.io/snapshot-promoter 57m
```
@@ -677,1196 +569,4 @@ The detailed information of each steps are provided below.
**Note:** OpenEBS local PV will not be bound until the application pod is scheduled as its **volumeBindingMode** is set to **WaitForFirstConsumer.** Once the application pod is scheduled on a certain node, OpenEBS Local PV will be bound on that node.
-### How to make cStor volume online if 2 replicas of 3 are lost?
-
-Application that is using cStor volume can run IOs, if at least 2 out of 3 replicas (i.e., > 50% of ReplicationFactor) have data with them. If 2 out of 3 replicas lost data, cStor volume goes into RO (read-only) mode, and, application can get into crashed state.
-This section is to provide the steps to bring back volume online by scaling down replicas to 1 with a consideration of ReplicationFactor as 3 in the examples.
-
-Perform the following steps to make the cStor volume online:
-
-1. Check the current state of CVRs of a cStor volume.
-2. Scaling down the replica count to 1 from 3.
-3. Make the volume mount point into RW mode from the Worker node where application is scheduled.
-
-The detailed information of each steps are provided below.
-
-1. **Check the current state of CVRs of a cStor volume**
-
- - Run the following command to get the current state of CVR of a cStor volume.
-
- ```
- kubectl get cvr -n openebs
- ```
-
- Output will be similar to the following:
-
- ```
- NAME USED ALLOCATED STATUS AGE
- pvc-5c52d001-..........-cstor-sparse-pool-1irk 7.07M 4.12M Degraded 12m
- pvc-5c52d001-..........-cstor-sparse-pool-a1ud 6K 6K Degraded 12m
- pvc-5c52d001-..........-cstor-sparse-pool-sb1v 8.15M 4.12M Healthy 12m
- ```
-
- - Check the details of the corresponding cStor volume using the following command:
-
- ```
- kubectl get cstorvolume -n openebs -o yaml
- ```
-
- The following is a snippet of the output of the above command:
-
- ```
- apiVersion: v1
- items:
- - apiVersion: openebs.io/v1alpha1
- kind: CStorVolume
- metadata:
- …..
- name: pvc-5c52d001-c6a1-11e9-be30-42010a800094
- namespace: openebs
- uid: 5c767ae5-c6a1-11e9-be30-42010a800094
- spec:
- …..
- iqn: iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094
- targetPortal: 10.108.4.158:3260
- status:
- ......
- **phase: Offline**
- replicaStatuses:
- - checkpointedIOSeq: "0"
- ......
- mode: **Healthy**
- quorum: **"1"**
- replicaId: "10135959964398189975"
- - checkpointedIOSeq: "0"
- ......
- mode: **Degraded**
- quorum: **"0"**
- replicaId: "9431770906853612612"
- - checkpointedIOSeq: "0"
- ......
- mode: **Degraded**
- quorum: **"0"**
- replicaId: "3920180363968537568"
- ```
-
- In the above snippet, quorum value of one replica is in `ON` and quorum value of other two are `OFF` mode and cStor volume is in `Offline` state. Running IOs now on this cStor volume will get IO error on Node as below:
-
- ```
- / # touch /mnt/store1/file2
- touch: /mnt/store1/file2: Input/output error
- ```
-
-2. **Scaling down the replica count to 1 from 3.**
-
- At this point, user can make the target pod healthy with the single replica which is in quorum.
-
- **Note:** Target pod is made healthy on the assumption that the replica which is in quorum have the latest data with it. In other words, only the data that is available with the replica which is in quorum will be served.
-
- The following steps will help to make the target pod healthy with a single replica which is in quorum.
-
- 1. Edit CStorVolume CR to set ReplicationFactor and ConsistencyFactor to 1.
-
- - Check the details of the corresponding cStor volume using the following command:
-
- ```
- kubectl get cstorvolume -n openebs -o yaml
- ```
-
- The following is a snippet of the output of the above command:
-
- ```shell hideCopy
- apiVersion: v1
- items:
- - apiVersion: openebs.io/v1alpha1
- kind: CStorVolume
- metadata:
- …..
- name: pvc-5c52d001-c6a1-11e9-be30-42010a800094
- namespace: openebs
- uid: 5c767ae5-c6a1-11e9-be30-42010a800094
- spec:
- …..
- replicationFactor: 1
- consistencyFactor: 1
- iqn: iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094
- targetPortal: 10.108.4.158:3260
- status:
- ......
- **phase: Healthy**
- replicaStatuses:
- - checkpointedIOSeq: "0"
- ......
- mode: **Healthy**
- quorum: **"1"**
- replicaId: **"10135959964398189975"**
- ```
-
- 2. Restart target pod by running following command
-
- ```
- kubectl delete pod -n openebs
- ```
-
- Now the cStor volume will be running with a single replica.
-
-3. **Make the volume mount point into RW mode from the Worker node where application is scheduled.**
-
- Next, user should make the volume mount point into RW mode using the following steps.
-
- - If mount point of the volume went to RO, restart the application pod to bring back the mount point to RW state.
-
- - If application still remains in `CrashLoopback` state due to RO mode of mount point (`kubectl describe` of application pod will show the reason), follow below steps to convert it into RW:
-
- - Login to node where application pod is running.
-
- Get the node details where the application pod is running using the following command.
-
- ```
- Kubectl get pod -n -o wide
- ```
-
- After identifying the node, ssh into the node.
-
- - Find the iSCSI disk related to this PVC.
-
- Run following command inside the node to get the iSCSI disk related to the PVC.
-
- ```
- sudo iscsiadm -m session -P 3
- ```
-
- The output will be similar to the following:
-
- ```shell hideCopy
- iSCSI Transport Class version 2.0-870
- version 2.0-874
- **Target: iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094** (non-flash)
- **Current Portal: 10.108.4.158:3260,1**
- ……
- ************************
- Attached SCSI devices:
- ************************
- Host Number: 1 State: running
- scsi1 Channel 00 Id 0 Lun: 0
- **Attached scsi disk sdb State: running**
-
- ```
-
- From the above output, user can obtain the iSCSI disk related to this PV. In this example, SCSI disk related to this PV is `sdb`.
-
- - Unmount the mount points related to this PVC that are in RO.
-
- Next, perform unmount operation on mount points that are related to sdb. The following output will help to get the mount details related to disk sdb:
-
- ```
- sudo mount | grep sdb
- ```
-
- The output will be similar to the following:
-
- ```
- **/dev/sdb** on /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.108.4.158:3260-iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094-lun-0 type ext4
- **/dev/sdb** on /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.108.4.158:3260-iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094-lun-0 type ext4 (ro,relatime,stripe=256,data=ordered)
- **/dev/sdb** on /var/lib/kubelet/pods/5cb0af5a-c6a1-11e9-be30-42010a800094/volumes/kubernetes.io~iscsi/pvc-5c52d001-c6a1-11e9-be30-42010a800094 type ext4 (ro,relatime,stripe=256,data=ordered)
- **/dev/sdb** on /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/pods/5cb0af5a-c6a1-11e9-be30-42010a800094/volumes/kubernetes.io~iscsi/pvc-5c52d001-c6a1-11e9-be30-42010a800094 type ext4 (ro,relatime,stripe=256,data=ordered)
- ```
-
- Perform unmount on the above found mountpoints.
-
- - Perform `iscsiadm logout` and `iscsiadm login` of the iSCSI session related to this PVC.
-
- From the node related to application pod, perform below command to logout:
-
- ```
- sudo iscsiadm -m node -t iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094 -p 10.108.4.158:3260 -u
- ```
-
- From the node related to application pod, perform below command to login:
-
- ```
- sudo iscsiadm -m node -t iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094 -p 10.108.4.158:3260 -l
- ```
-
- - Find the new iSCSI disk related to this PVC.
-
- Get the new iSCSI disk name after login using the following command:
-
- ```
- sudo iscsiadm -m session -P 3
- ```
-
- Output will be similar to the following:
-
- ```shell hideCopy
- iSCSI Transport Class version 2.0-870
- version 2.0-874
- **Target: iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094** (non-flash)
- ……..
- ************************
- Attached SCSI devices:
- ************************
- Host Number: 1 State: running
- scsi1 Channel 00 Id 0 Lun: 0
- **Attached scsi disk sdc State: running**
- ```
-
- - Mount the mount points that are unmounted in 3rd step .
-
- Perform mount of the SCSI disk on the mount points which are unmounted in 3rd step.
- The application may still remain in RO state. If so, restart the application pod.
-
-
-
-[Go to top](#top)
-
-### How to reconstruct data from healthy replica to replaced ones?
-
-Consider the case where cStorVolumes have replication enabled, and one/few of its replicas got replaced, i.e., they are new and lost the data. In this case, cStor volume will be in Offline state and unable to recover data to the replaced replicas from healthy replica automatically.
-
-Reconstructing data from healthy replica to the replaced ones can be done using the following steps. To perform the following steps, cStor volume should be in `Online`. If cStor volume is not in `Online`, make it online using the steps mentioned [here](#how-to-make-cstor-volume-online-if-replicas-2-of-are-lost).
-
-Run the following command to get the current state of CVR of a cStor volume.
-
-```
-kubectl get cvr -n openebs
-```
-
-Output will be similar to the following:
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-5c52d001-..........-cstor-sparse-pool-1irk 7.07M 4.12M Degraded 12m
-pvc-5c52d001-..........-cstor-sparse-pool-a1ud 6K 6K Degraded 12m
-pvc-5c52d001-..........-cstor-sparse-pool-sb1v 8.15M 4.12M Healthy 12m
-```
-
-For easy representation, healthy replica will be referred to as R1, and offline replica that needs to be reconstructed with data will be referred to as R2. User should keep the information of healthy replica and replaced replica and associated pool in handy.
-
-In this example, R1 is `pvc-5c52d001-c6a1-11e9-be30-42010a800094-cstor-sparse-pool-sb1v` and R2 is `pvc-5c52d001-c6a1-11e9-be30-42010a800094-cstor-sparse-pool-a1ud` . Pool name associated to R1 is `cstor-sparse-pool-sb1v-77658f4c85-jcgwc` and pool name related to R2 is `cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md`. Pool name can be found by doing `zpool list` in the pool pod of that particular node.
-
-The following are the steps to reconstruct data from healthy replica to a replaced replica:
-
-1. Take base snapshot on R1.
-2. Copy the base snapshot to R2’s node.
-3. Apply the base snapshot to the pool of R2.
-4. Take incremental snapshot on R1.
-5. Copy the incremental snapshot to R2’s node.
-6. Apply the above incremental snapshot to the pool of R2.
-7. Repeat steps 4, 5 and 6 till incremental snapshot is of lesser size.
-8. Scale down spec.replicas to 0 of client application so that final changes can be transferred.
-9. Scale down spec.replicas of target pod deployment to 0.
-10. Perform steps 4, 5 and 6.
-11. Set TargetIP and Quorum properties on R2.
-12. Edit cStorVolume CR of this PVC to increase `ReplicationFactor` by 1 and to set `ConsistencyFactor` to (ReplicationFactor/2 + 1).
-13. Scale up `spec.replicas` to 1 of target pod deployment.
-14. Scale up `spec.replicas` of client application
-15. Edit cStorVolume CR of this PVC to increase `ReplicationFactor` by 1 and to set `ConsistencyFactor` to (ReplicationFactor/2 + 1).
-16. Restart the cStor target pod deployment.
-
-**Step1:** Take base snapshot on R1.
-
-- Exec into cstor-pool-mgt container of pool pod of R1 to run snapshot command:
-
- ```
- kubectl exec -it -n openebs -c cstor-pool-mgmt -- bash
- ```
-
-- Run the following command inside the pool pod container to take the base snapshot
-
- ```
- zfs snapshot /@
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-sb1v-77658f4c85-jcgwc:/# zfs snapshot cstor-231fca0f-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1
- ```
-
-**Step2:** Copy the base snapshot to R2’s node.
-
-There are multiple ways to do this. In this article, above created snapshot is streamed to a file. This streamed file is copied to node related to R2. As /tmp directory of the pool pod is mounted on the host node at `/var/openebs/sparse/shared-`, streamed file will be created at R1 and copied at R2 to this location.
-
-- Stream snapshot to a file:
-
- Exec into cstor-pool-mgmt container of pool pod of R1 to run the below command:
-
- ```
- zfs send /@ > /tmp/base_snap_file
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-sb1v-77658f4c85-jcgwc:/# zfs send cstor-231fca0f-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1 > /tmp/pvc-5c52d001-c6a1-11e9-be30-42010a800094_snap_data1
- ```
-
-- Copy the streamed file to local machine:
-
- ```
- gcloud compute --project "" scp --zone "us-central1-a" @gke-test-default-pool-0337597c-3b5d:/var/openebs/sparse/shared-cstor-sparse-pool/pvc-5c52d001-c6a1-11e9-be30-42010a800094_snap_data1
- ```
-
-- Copy the local copy of streamed file to another node:
-
- ```
- gcloud beta compute --project "" scp --zone "us-central1-a" pvc-5c52d001-c6a1-11e9-be30-42010a800094_snap_data1 @gke-test-default-pool-0337597c-vdg1:/var/openebs/sparse/shared-cstor-sparse-pool/
- ```
-
-**Step3:** Apply the base snapshot to the pool of R2
-
-Applying base snapshot to pool related to R2 involves setting a few parameters. Below are the steps:
-
-- Exec into `cstor-pool-mgmt` container of pool pod related to R2.
-
- ```
- kubectl exec -it -c cstor-pool-mgmt -n -- bash
- ```
-
- Example command:
-
- ```
- kubectl exec -it -c cstor-pool-mgmt -n openebs cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md -- bash
- ```
-
-- Identify the PV related datasets.
-
- ```
- zfs list -t all
- ```
-
- The output will be similar to the following:
-
- ```shell hideCopy
- NAME USED AVAIL REFER MOUNTPOINT
- cstor-2292c294-c6a1-11e9-be30-42010a800094 9.86M 9.62G 512B /cstor-2292c294-c6a1-11e9-be30-42010a800094
- **cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094** 6K 9.62G 6K -
- **cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@rebuild_snap** 0B - 6K -
- **cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094_rebuild_clone** 0B 9.62G 6K -
- ```
-
- Look for PV name in the above list output. In this example, datasets that are in bold are related to this PV.
-
- **Note:** Dataset which is `/` is the main replica. Dataset that ends with `@rebuild_snap` is the internally created snapshot, and the one that ends with `_rebuild_clone` is internally created clone.
-
-- Unset target IP on the main volume.
-
- Run the following command to unset target ip on the main volume:
-
- ```
- zfs set io.openebs:targetip= /
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# zfs set io.openebs:targetip= cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094
- ```
-
-- Delete rebuild_snap snapshot.
-
- **Note:** Below are destructive steps and need to be performed on verifying that they are the correct ones to be deleted.
-
- ```
- zfs destroy /
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# zfs destroy cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094_rebuild_clone
- ```
-
-- Delete internally created clone related to this PV with suffix as _rebuild_clone.
-
- **Note:** Below are destructive steps and need to be performed on verifying that they are the correct ones to be deleted.
-
- ```
- zfs destroy /@rebuild_snap
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# zfs destroy cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@rebuild_snap
- ```
-
-- Apply the streamed file to offline pool:
-
- ```
- cat | zfs recv -F /
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# cat /tmp/pvc-5c52d001-c6a1-11e9-be30-42010a800094_snap_data1 | zfs recv -F cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a8000
- 94
- ```
-
- Capacity “USED” can be verified by doing `zfs list -t all`
-
-**Step4:** Take incremental snapshot on R1
-
-- From `cstor-pool-mgmt` container of pool pod related to R1, perform following command:
-
- ```
- zfs snapshot /@
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-sb1v-77658f4c85-jcgwc:/# zfs snapshot cstor-231fca0f-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1_data2
- ```
-
- Please note that snapshot name which is as mentioned above need to be different for each incremental snapshot.
-
-**Step5:** Copy the data in incremental snapshot to R2’s node
-
-This involves streaming the incremental data to a file, copying it to R2.
-
-- Stream incremental snapshot to a file:
-
- From cstor-pool-mgmt container of pool pod of R1, run below command:
-
- ```
- zfs send -i /@/@ > /tmp/
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-sb1v-77658f4c85-jcgwc:/# zfs send -i cstor-231fca0f-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1 cstor-231fca0f-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1_data2 > /tmp/pvc-5c52d001-c6a1-11e9-be30-42010a800094_snap_data1_data2
- ```
-
- Copy the streamed file to R2 following the steps similar to copying the streamed file related to base snapshot.
-
-**Step6:** Apply the above incremental snapshot to pool of R2
-
-- Exec into `cstor-pool-mgmt` container of pool pod of R2 to run:
-
- ```
- cat /tmp/ | zfs recv /
- ```
-
- Example command to apply incremental snapshot:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# cat /tmp/pvc-5c52d001-c6a1-11e9-be30-42010a800094_snap_data1_data2 | zfs recv cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094
- ```
-
- `zfs list -t all` should show the dataset related to this PVC with increased "USED" space.
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# zfs list -t all
- ```
-
- Output will be similar to the following:
-
- ```shell hideCopy
- NAME USED AVAIL REFER MOUNTPOINT
- cstor-2292c294-c6a1-11e9-be30-42010a800094 85.7M 9.54G 512B /cstor-2292c294-c6a1-11e9-be30-42010a800094
- **cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094** 84.9M 9.54G 84.9M -
- cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1 41.5K - 4.12M -
- cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094@snap_data1_data2 0B - 84.9M -
- ```
-
-**Step7:** Repeat steps 4, 5 and 6 till incremental snapshot is of lesser size
-
-**Step 8**: Scale down `spec.replicas` to 0 of client application so that final changes can be transferred.
-
-**Step 9**:Scale down `spec.replicas` of target pod deployment to 0.
-
-**Step 10**: Perform steps 4, 5 and 6
-
-**Step 11**: Set TargetIP and Quorum properties on R2
-
-Once **steps 8,9 and 10** are followed, set `targetIP` and quorum properties of newly reconstructed R2.
-
-- Set quorum on the newly reconstructed dataset on R2.
-
- In the cstor-pool-mgmt container of pool pod related to R2, perform the following command:
-
- ```
- zfs set quorum=on /
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# zfs set quorum=on cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094
- ```
-
-- Set `targetIP` on the newly rebuilt dataset on R2
-
- ```
- zfs set io.openebs:targetip=/
- ```
-
- Example command:
-
- ```
- root@cstor-sparse-pool-a1ud-5dd8bb6fb6-f54md:/# zfs set io.openebs:targetip=10.108.4.158 cstor-2292c294-c6a1-11e9-be30-42010a800094/pvc-5c52d001-c6a1-11e9-be30-42010a800094
- ```
-
-**Step 12**: Edit ‘CStorVolume’ CR of this PVC to increase `ReplicationFactor` by 1 and to set `ConsistencyFactor` to (ReplicationFactor/2 + 1)
-
-**Step 13**: Scale up `spec.replicas` to 1 of target pod deployment
-
-On performing step 12 and 13, this newly reconstructed replica gets added to the cStor volume.
-
-Status of CStorVolume and CVRs related to this PV looks like:
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-5c52d001-...........-cstor-sparse-pool-1irk 7.07M 4.12M Offline 3d
-pvc-5c52d001-...........-cstor-sparse-pool-a1ud 90.8M 85.0M Healthy 3d
-pvc-5c52d001-...........-cstor-sparse-pool-sb1v 90.8M 85.0M Healthy 3d
-```
-
-Check the details of the corresponding cStor volume using the following command:
-
-```
-kubectl get cstorvolume -n openebs -o yaml
-```
-
-The following is a snippet of the output of the above command:
-
-```
-apiVersion: v1
-items:
-- apiVersion: openebs.io/v1alpha1
- kind: CStorVolume
- metadata:
-…….
- name: pvc-5c52d001-c6a1-11e9-be30-42010a800094
- uid: 5c767ae5-c6a1-11e9-be30-42010a800094
- spec:
-…….
- consistencyFactor: 2
- iqn: iqn.2016-09.com.openebs.cstor:pvc-5c52d001-c6a1-11e9-be30-42010a800094
- replicationFactor: 2
- targetPortal: 10.108.4.158:3260
- status:
-…….
- phase: **Healthy**
- replicaStatuses:
- - checkpointedIOSeq: "1049425"
-…….
- mode: **Healthy**
- quorum: **"1"**
- replicaId: "10135959964398189975"
- - checkpointedIOSeq: "1049282"
-…….
- mode: **Healthy**
- quorum: **"1"**
- replicaId: "3920180363968537568"
-```
-
-**Step 14**: Scale up `spec.replicas` of client application.
-
-**Step 15**: To make third replica healthy, edit ‘CStorVolume’ CR of this PVC to increase `ReplicationFactor` by 1 and to set `ConsistencyFactor` to (ReplicationFactor/2 + 1). After doing this change, `ReplicationFactor` will be 3 and `ConsistencyFactor` to 2.
-
-**Step 16:** Restart the cStor target pod deployment of the volume using the following command:
-
-```
-kubectl delete pod -n openebs
-```
-
-**Step 17:** After sometime, the third replica will come to healthy state. Status of CVRs related to this PV will looks like below:
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-5c52d001-...........-cstor-sparse-pool-1irk 90.8M 85.0M Healthy 3d
-pvc-5c52d001-...........-cstor-sparse-pool-a1ud 90.8M 85.0M Healthy 3d
-pvc-5c52d001-...........-cstor-sparse-pool-sb1v 90.8M 85.0M Healthy 3d
-```
-
-[Go to top](#top)
-
-### How to verify whether cStor volume is running fine?
-
-### Overview {#overview-cstor-volume}
-
-The following items will be discussed:
-
-1. Verification of cStor Storage Pool(CSP)
-2. Verification of cStor Volume
-3. Verification of cStor Volume Replica(CVR)
-
-
-### Verification of cStor Storage Pool
-
-cStor Storage Pool(CSP) resources are cluster scoped. Status of CSPs can be obtained using the following way.
-
-```
-kubectl get csp
-```
-
-Example output:
-
-```shell hideCopy
-NAME ALLOCATED FREE CAPACITY STATUS TYPE AGE
-cstor-disk-pool-g5go 270K 9.94G 9.94G Healthy striped 2m
-cstor-disk-pool-srj3 270K 9.94G 9.94G Healthy striped 2m
-cstor-disk-pool-tla4 270K 9.94G 9.94G Healthy striped 2m
-```
-
-Status of each cStor pool can be found under `STATUS` column. The following are the different type of `STATUS` information of cStor pools and their meaning.
-
-**Healthy:** This state represents cStor pool is online and running.
-
-**Offline:** cStor pool status is offline due to the following cases:
-- when pool creation or pool import is failed.
-- when a disk is unavailable in case of the pool is created in a striped manner.
-- when tampering happens on CSP resource and invalid values are set then CSP will be updated to offline.
-
-**Degraded:** cStor pool status is degraded due to the following cases:
-- when any one of the disks is unavailable on the node where the pool is created either Mirror, Raidz or Raidz2 manner.
-
-**Error:** This means cstor-pool container in cStor pool pod is not in running state.
-
-**DeletionFailed:** There could be an internal error occurred when CSP is deleted.
-
-**Note:** Status of CSPs are updated only if its corresponding cStor pool pod is Running. If the cStor pool pod of corresponding cStor pool is not running, then the status of cStor pool shown in the above output may be stale.
-
-
-### Verification of cStor Volume
-cStor Volume is namespace scoped resource. You have to provide the same namespace where openebs is installed. Status of cStor Volume can be obtained using the following way.
-
-```
-kubectl get cstorvolume -n
-```
-
-Example command:
-
-```
-kubectl get cstorvolume -n openebs
-```
-
-Example output:
-
-```shell hideCopy
-NAME STATUS AGE CAPACITY
-pvc-4c3baced-c020-11e9-ad45-42010a8001c8 Healthy 1h 5G
-```
-
-Status of each cStor volume can be found under `STATUS` field.
-
-**Note:** If the target pod of corresponding cStor volume is not running, then the status of cStor volume shown in the above output may be stale.
-
-The following are the different type of STATUS information of cStor volumes and their definition.
-
-**Init:** Init status of cStor volume is due to the following cases:
-
-- when the cStor volume is created.
-- when the replicas are not connected to target pod.
-
-**Healthy:** Healthy status of cStor volume represents that 51% of healthy replicas are connected to the target and volume is ready IO operations.
-
-**Degraded:** Minimum 51% of replicas are connected and some of these replicas are in degraded state, then volume will be running as degraded state and IOs are operational in this state.
-
-**Offline:** When number of replicas which is equal to Consistency Factor are not yet connected to the target due to network issues or some other reasons In this case, volume is not ready to perform IOs.
-
-For getting the number of replicas connected to the target pod of the cStor volume, use following command:
-
-```
-kubectl get cstorvolume -n -o yaml.
-```
-
-Example output:
-
-In this case, replicationFactor is 3.
-
-```shell hideCopy
-status:
- capacity: 5G
- lastTransitionTime: "2019-08-16T12:22:21Z"
- lastUpdateTime: "2019-08-16T13:36:51Z"
- phase: Healthy
- replicaStatuses:
- - checkpointedIOSeq: "0"
- inflightRead: "0"
- inflightSync: "0"
- inflightWrite: "0"
- mode: Healthy
- quorum: "1"
- replicaId: "15881113332075879720"
- upTime: 4516
- - checkpointedIOSeq: "0"
- inflightRead: "0"
- inflightSync: "0"
- inflightWrite: "0"
- mode: Healthy
- quorum: "1"
- replicaId: "1928348327271356191"
- upTime: 4515
- - checkpointedIOSeq: "0"
- inflightRead: "0"
- inflightSync: "0"
- inflightWrite: "0"
- mode: Healthy
- quorum: "1"
- replicaId: "3816436440075944405"
- upTime: 4514
-```
-
-
-In the above output, if `quorum: **0**` then data written to that replica is lost(not ready to read). If `quorum: **1**` then that replica is ready for Read and Write operation.
-
-If anything went wrong then the error can be seen in cStor volume events `kubectl describe cstorvolume -n `
-
-### Verification of cStor Volume Replica
-
-
-cStor Volume Replica is namespace scoped resource. You have to provide the same namespace where OpenEBS is installed. Status of cStor Volume Replica can be obtained using the following way.
-
-```
-kubectl get cvr -n
-```
-
-Example command:
-
-```
-kubectl get cvr -n openebs
-```
-
-Example output:
-
-```shell hideCopy
-NAME USED ALLOCATED STATUS AGE
-pvc-4c3baced-c020-11e9-ad45-42010a8001c8-cstor-disk-pool-g5go 6K 6K Offline 21s
-pvc-4c3baced-c020-11e9-ad45-42010a8001c8-cstor-disk-pool-srj3 6K 6K Offline 21s
-pvc-4c3baced-c020-11e9-ad45-42010a8001c8-cstor-disk-pool-tla4 6K 6K Offline 21s
-```
-
-Status of each cStor volume Replica can be found under `STATUS` field.
-
-**Note:** If the pool pod of corresponding cStor volume replica is not running, then the status of CVR shown in the output of the above command may be stale.
-
-The following are the different type of STATUS information of cStor Volumes Replica and their definition.
-
-**Healthy:** Healthy state represents volume is healthy and volume data existing on this replica is up to date.
-
-**Offline:** cStor volume replica status is offline due to the following cases:
-- when the corresponding cStor pool is not available to create volume.
-- when the creation of cStor volume fails.
-- when the replica is not yet connected to the target.
-
-**Degraded:** cStor volume replica status is degraded due to the following case
-- when the cStor volume replica is connected to the target and rebuilding is not yet started on this replica.
-
-**Rebuilding:** cStor volume replica status is rebuilding when the cStor volume replica is undergoing rebuilding, that means, data sync with another replica.
-
-**Error:** cStor volume replica status is in error state due to the following cases:
-- when the volume replica data set is not existing in the pool.
-- when an error occurs while getting the stats of cStor volume.
-- when the unit of size is not mentioned in PVC spec. For example, if the size is 5 instead of 5G.
-
-**DeletionFailed:** cStor volume replica status will be in `deletion failed` when destroying corresponding cStor volume fails.
-
-**Invalid:** cStor volume replica status is invalid when a new cstor-pool-mgmt container in a new pod is communicating with the old cstor-pool container in an old pod.
-
-**Init:** cStor volume replica status init represents the volume is not yet created.
-
-**Recreate:** cStor volume replica status recreate represents an intermediate state before importing the volume(this can happen only when pool pod got restarted) in case of a non-ephemeral disk. If the disk is ephemeral then this status represents volume is going to recreate.
-
-**NewReplicaDegraded:** cStor volume replica is newly created and it make successful connection with the target pod.
-
-**ReconstructingNewReplica:** cStor volume replica is newly created and it started reconstructing entire data from another healthy replica.
-
-**Note:** If cStor Volume Replica `STATUS` is in `NewReplicaDegraded` or `NewReplicaReconstructing`, then the corresponding volume replica will not be part of the quorum decisions.
-
-
-
-[Go to top](#top)
-
-
-### Expanding Jiva Storage Volumes
-
-
-You can resize/expand the OpenEBS volume using the following procedure. Execute the commands from step 2 to 8 as root user on the node where application pod is running.
-
-**Step 1:** Identify the node where application pod is running. Also note down the IP address of corresponding Jiva controller pod. This IP address is needed in step 7. The above details can be obtained by running the following command:
-
-```
-kubectl get pod -n -o wide
-```
-
-Example:
-
-```
-kubectl get pod -n default -o wide
-```
-
-Example output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-percona-66db7d9b88-ltdsf 1/1 Running 0 9m47s 10.16.0.8 gke-ranjith-jiva-resize-default-pool-ec5045bf-mzf4
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-ctrl-798dcd69d8-k5v29 2/2 Running 0 9m46s 10.16.1.8 gke-ranjith-jiva-resize-default-pool-ec5045bf-rq1b
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-5fwxr 1/1 Running 0 9m41s 10.16.1.9 gke-ranjith-jiva-resize-default-pool-ec5045bf-rq1b
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-8rclm 1/1 Running 0 9m41s 10.16.0.7 gke-ranjith-jiva-resize-default-pool-ec5045bf-mzf4
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-sjvtq 1/1 Running 0 9m41s 10.16.2.10 gke-ranjith-jiva-resize-default-pool-ec5045bf-24f1
-```
-
-In the above sample output, application pod is running on node ` gke-ranjith-jiva-resize-default-pool-ec5045bf-mzf4` and Jiva controller pod IP is `10.16.1.8`.
-
-**Step 2:** SSH to the node where application pod is running and run the following command.
-
-```
-lsblk
-```
-
-Example output:
-
-```shell hideCopy
-NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
-sda 8:0 0 40G 0 disk
-├─sda1 8:1 0 39.9G 0 part /
-├─sda14 8:14 0 4M 0 part
-└─sda15 8:15 0 106M 0 part /boot/efi
-sdb 8:16 0 5G 0 disk /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-```
-
-In the above sample output, `sdb` is the target volume with `5G` capacity. Similarly, find out the volume which needs to be resized.
-
-**Step 3:** Obtain the iSCSI target and disk details using the following command.
-
-```
-iscsiadm -m session -P 3
-```
-
-Example output:
-
-```shell hideCopy
-iSCSI Transport Class version 2.0-870
-version 2.0-874
-Target: iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093 (non-flash)
- Current Portal: 10.20.23.99:3260,1
- Persistent Portal: 10.20.23.99:3260,1
- **********
- Interface:
- **********
- Iface Name: default
- Iface Transport: tcp
- Iface Initiatorname: iqn.1993-08.org.debian:01:f6771fccb5af
- Iface IPaddress: 10.128.0.103
- Iface HWaddress:
- Iface Netdev:
- SID: 1
- iSCSI Connection State: Unknown
- iSCSI Session State: LOGGED_IN
- Internal iscsid Session State: Unknown
- *********
- Timeouts:
- *********
- Recovery Timeout: 120
- Target Reset Timeout: 30
- LUN Reset Timeout: 30
- Abort Timeout: 15
- *****
- CHAP:
- *****
- username:
- password: ********
- username_in:
- password_in: ********
- ************************
- Negotiated iSCSI params:
- ************************
- HeaderDigest: None
- DataDigest: None
- MaxRecvDataSegmentLength: 262144
- MaxXmitDataSegmentLength: 65536
- FirstBurstLength: 65536
- MaxBurstLength: 262144
- ImmediateData: Yes
- InitialR2T: Yes
- MaxOutstandingR2T: 1
- ************************
- Attached SCSI devices:
- ************************
- Host Number: 1 State: running
- scsi1 Channel 00 Id 0 Lun: 0
- Attached scsi disk sdb State: running
-```
-
-In the above sample output, there is only one volume present on the node. So it is easy to get the iSCSI target IP address and the disk details. In this example disk is `sdb`, iSCSI target IP is `10.20.23.99:3260`, and target iqn is `iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093`. Similarly, find out the target IP address, IQN and the disk name of the volume that has to be resized and note down these information for future use. If there are many volumes attached on the node, then identify the disk using the PV name.
-
-**Step 4** Check the mount path on disk `sdb` using the following command.
-
-```
-mount | grep /dev/sdb | more
-```
-
-Example snippet of Output:
-
-```shell hideCopy
-/dev/sdb on /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20.23.99:3260-iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-lun-0 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20.23.99:3260-iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-lun-0 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20.23.99:3260-iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-lun-0 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20.23.99:3260-iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-lun-0 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093 type ext4 (rw,relatime,data=ordered)
-/dev/sdb on /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093 type ext4 (rw,relatime,data=ordered)
-```
-
-**Step 5:** Unmount the file system using following command. The following is an example command with respect to the above output. Update correct mount path according to your deployment. Ensure that you are running following commands as super user.
-
-```
-umount /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20.23.99:3260-iqn.2016-09.co
-m.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-lun-0
-umount /var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-```
-
-**Step 6:** Logout from the particular iSCSI target using the following command:
-
-```
-iscsiadm -m node -u -T
-```
-
-Example:
-
-```
-iscsiadm -m node -u -T iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-```
-
-Example output:
-
-```shell hideCopy
-Logging out of session [sid: 1, target: iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093, portal: 10.20.23.99,3260]
-Logout of [sid: 1, target: iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093, portal: 10.20.23.99,3260] successful
-```
-
-
-**Step 7:** Get the volume ID using the following command:
-
-```
-curl http://:9501/v1/volumes
-```
-
-Example:
-
-```
-curl http://10.16.1.8:9501/v1/volumes
-```
-
-Example output:
-
-```shell hideCopy
-{"data":[{"actions":{"deleteSnapshot":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=deleteSnapshot","revert":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=revert","shutdown":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=shutdown","snapshot":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=snapshot"},"id":"cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==","links":{"self":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw=="},"name":"pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093","readOnly":"false","replicaCount":3,"type":"volume"}],"links":{"self":"http://10.16.1.8:9501/v1/volumes"},"resourceType":"volume","type":"collection"}
-```
-
-From above example output, You can find volume id is `cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==`, Jiva target pod IP is `10.16.1.8`, and the Volume name is `pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093`. These parameters are required in the next step.
-
-**Step 8:** Specify desired size of volume and the above parameters in the following command.
-
-```
-curl -H "Content-Type: application/json" -X POST -d '{"name":"pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093","size":"8G"}' http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=resize
-```
-Example output:
-
-```shell hideCopy
-{"actions":{"deleteSnapshot":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=deleteSnapshot","revert":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=revert","shutdown":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=shutdown","snapshot":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==?action=snapshot"},"id":"cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw==","links":{"self":"http://10.16.1.8:9501/v1/volumes/cHZjLTI1ZThmNmYxLWViMmQtMTFlOS1iOGQxLTQyMDEwYTgwMDA5Mw=="},"name":"pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093","readOnly":"false","replicaCount":3,"type":"volume"}
-```
-
-From the above output, volume has been expanded to `8G`.
-
-**Step 9:** Run step 9 and step 10 from Kubernetes master node. Get the Jiva pod details using the following command:
-
-```
-kubectl get pods -n
-```
-
-Example output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE
-percona-66db7d9b88-ltdsf 1/1 Running 0 6h38m
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-ctrl-798dcd69d8-k5v29 2/2 Running 0 6h38m
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-5fwxr 1/1 Running 0 6h38m
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-8rclm 1/1 Running 0 6h38m
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-sjvtq 1/1 Running 0 6h38m
-```
-
-**Step 10:** Restart all replica pods of the corresponding volume using the following command. If the replica count of Jiva volume is more than 1, then you must delete all the replica pods of corresponding Jiva volume simultaneously using single command.
-
-```
-kubectl delete pods ... -n
-```
-
-Example:
-
-```
-kubectl delete pod pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-5fwxr pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-8rclm pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-sjvtq -n default
-```
-
-Example output:
-
-```shell hideCopy
-pod "pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-5fwxr" deleted
-pod "pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-8rclm" deleted
-pod "pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-sjvtq" deleted
-```
-
-Verify if new Jiva pods are running successfully using:
-
-```
-kubectl get pods -n
-```
-
-**Step 11:** Perform step 11 to 15 as root user on node where the application pod is running. Perform iSCSI target login using the following commands.
-
-```
-iscsiadm -m discovery -t st -p :3260
-```
-
-Example:
-
-```
-iscsiadm -m discovery -t st -p 10.16.1.8:3260
-```
-
-Example output:
-
-```shell hideCopy
-10.20.23.99:3260,1 iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-```
-
-From above output, iSCSI target IP is `10.20.23.99:3260`.
-
-Now, Login to the iSCSI target using the following command:
-
-```
-iscsiadm -m discovery -t st -p 10.20.23.99:3260 -l
-```
-
-Example output:
-
-```shell hideCopy
-10.20.23.99:3260,1 iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-Logging in to [iface: default, target: iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093, portal: 10.20.23.99,3260] (multiple)
-Login to [iface: default, target: iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093, portal: 10.20.23.99,3260] successful.
-```
-
-**Step 12:** Verify the resized disk details using the following command:
-
-```
-lsblk
-```
-
-Example output:
-
-```shell hideCopy
-NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
-sda 8:0 0 40G 0 disk
-├─sda1 8:1 0 39.9G 0 part /
-├─sda14 8:14 0 4M 0 part
-└─sda15 8:15 0 106M 0 part /boot/efi
-sdc 8:32 0 8G 0 disk
-```
-
-From the above output, You can see the resized disk is `sdc` with 8G capacity.
-
-**Step 13:** Check the file system consistency using the following command:
-
-```
-e2fsck -f
-```
-
-In following example, /dev/sdc is the newly expanded disk.
-
-```
-e2fsck -f /dev/sdc
-```
-
-Example output:
-
-```shell hideCopy
-e2fsck 1.44.1 (24-Mar-2018)
-/dev/sdc: recovering journal
-Pass 1: Checking inodes, blocks, and sizes
-Pass 2: Checking directory structure
-Pass 3: Checking directory connectivity
-Pass 4: Checking reference counts
-Pass 5: Checking group summary information
-Free blocks count wrong (1268642, counted=1213915).
-Fix? yes
-Free inodes count wrong (327669, counted=327376).
-Fix? yes
-
-/dev/sdc: ***** FILE SYSTEM WAS MODIFIED *****
-/dev/sdc: 304/327680 files (7.2% non-contiguous), 96805/1310720 blocks
-```
-
-**Step 14:** Expand the file system using the following command. In the following example, `/dev/sdc` is the newly expanded disk.
-
-```
-resize2fs /dev/sdc
-```
-
-Example output:
-
-```shell hideCopy
-resize2fs 1.44.1 (24-Mar-2018)
-Resizing the filesystem on /dev/sdc to 2097152 (4k) blocks.
-The filesystem on /dev/sdc is now 2097152 (4k) blocks long.
-```
-
-**Step 15:** Mount the file system using the following commands as root user:
-
-```
-mount /dev/sdc /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface-default/10.20.23.99:3260-iqn.2016-09.com.openebs.jiva:pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-lun-0
-mount /dev/sdc /var/lib/kubelet/pods/25abb7fa-eb2d-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-```
-
-**Step 16:** Execute the below command in Kubernetes master node. Restart the application pod using the following command:
-
-```
-kubectl delete pod
-```
-
-Verify if the application pod is running using the following command. Note down the node where the application pod is running now.
-
-```
-kubectl get pod -n -o wide
-```
-Example output:
-
-```shell hideCopy
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-percona-66db7d9b88-bnr8w 1/1 Running 0 81m 10.16.2.12 gke-ranjith-jiva-resize-default-pool-ec5045bf-24f1
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-ctrl-798dcd69d8-k5v29 2/2 Running 0 8h 10.16.1.8 gke-ranjith-jiva-resize-default-pool-ec5045bf-rq1b
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-65c8z 1/1 Running 0 94m 10.16.1.10 gke-ranjith-jiva-resize-default-pool-ec5045bf-rq1b
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-6znbr 1/1 Running 1 94m 10.16.0.9 gke-ranjith-jiva-resize-default-pool-ec5045bf-mzf4
-pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093-rep-56866d8696-m9lrx 1/1 Running 0 94m 10.16.2.11 gke-ranjith-jiva-resize-default-pool-ec5045bf-24f1
-```
-
-**Step 17:** Identify the node where new application pod is running. Then SSH to the node to verify the expanded size and execute the following command:
-
-```
-lsblk
-```
-
-Example output:
-
-```shell hideCopy
-NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
-sda 8:0 0 40G 0 disk
-├─sda1 8:1 0 39.9G 0 part /
-├─sda14 8:14 0 4M 0 part
-└─sda15 8:15 0 106M 0 part /boot/efi
-sdb 8:16 0 8G 0 disk /home/kubernetes/containerized_mounter/rootfs/var/lib/kubelet/pods/164201d0-ebcc-11e9-b8d1-42010a800093/volumes/kubernetes.io~iscsi/pvc-25e8f6f1-eb2d-11e9-b8d1-42010a800093
-```
-
-**Step 18:** Verify the expanded size from application pod.
-
-**Note:** After the volume expansion, size will not be reflected on `kubectl get pv` for the corresponding volume.
-
[Go to top](#top)
diff --git a/docs/main/user-guides/local-engine-user-guide/additional-information/performance.md b/docs/main/user-guides/local-engine-user-guide/additional-information/performance.md
deleted file mode 100644
index 0974f5318..000000000
--- a/docs/main/user-guides/local-engine-user-guide/additional-information/performance.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-id: performance-testing
-title: Performance testing of OpenEBS
-keywords:
- - Performance testing
- - Testing
- - Setup cStorPool
- - Setup StorageClass
-description: A section to guide the user the steps for performance testing of OpenEBS.
----
-
-## Steps for performance testing
-
-**Setup cStorPool and StorageClass**
-
-Choose the appropriate disks (SSDs or SAS or Cloud disks) and [create pool](/deprecated/spc-based-cstor#creating-cStor-storage-pools) and [create StorageClass](/deprecated/spc-based-cstor#creating-cStor-storage-class). There are some performance tunings available and this configuration can be added in the corresponding StorageClass before provisioning the volume. The tunings are available in the [StorageClass](/deprecated/spc-based-cstor#setting-performance-tunings) section.
-
-For performance testing, performance numbers vary based on the following factors.
-
-- The number of OpenEBS replicas (1 vs 3) (latency between cStor target and cStor replica)
-- Whether all the replicas are in one zone or across multiple zones
-- The network latency between the application pod and iSCSI target (cStor target)
-
-The steps for running FIO based Storage benchmarking and viewing the results are explained in detail [here](https://github.com/openebs/performance-benchmark/tree/master/fio-benchmarks).
-
-## See Also:
-
-[Seeking help](/introduction/community)
\ No newline at end of file
diff --git a/docs/main/user-guides/local-engine-user-guide/localpv-hostpath.md b/docs/main/user-guides/local-engine-user-guide/localpv-hostpath.md
new file mode 100644
index 000000000..c42535ed7
--- /dev/null
+++ b/docs/main/user-guides/local-engine-user-guide/localpv-hostpath.md
@@ -0,0 +1,151 @@
+---
+id: localpv-hostpath
+title: Local PV Hostpath User Guide
+keywords:
+ - OpenEBS Local PV Hostpath
+ - Local PV Hostpath
+ - Prerequisites
+ - Install
+ - Create StorageClass
+ - Support
+description: This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by Hostpath.
+---
+
+# Local PV Hostpath User Guide
+
+This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by Hostpath.
+
+*OpenEBS Dynamic Local PV provisioner* can create Kubernetes Local Persistent Volumes using a unique Hostpath (directory) on the node to persist data, hereafter referred to as *OpenEBS Local PV Hostpath* volumes.
+
+*OpenEBS Local PV Hostpath* volumes have the following advantages compared to native Kubernetes hostpath volumes.
+- OpenEBS Local PV Hostpath allows your applications to access hostpath via StorageClass, PVC, and PV. This provides you the flexibility to change the PV providers without having to redesign your Application YAML.
+- Data protection using the Velero Backup and Restore.
+- Protect against hostpath security vulnerabilities by masking the hostpath completely from the application YAML and pod.
+
+OpenEBS Local PV uses volume topology aware pod scheduling enhancements introduced by [Kubernetes Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local)
+
+## Prerequisites
+
+Setup the directory on the nodes where Local PV Hostpaths will be created. This directory will be referred to as `BasePath`. The default location is `/var/openebs/local`.
+
+`BasePath` can be any of the following:
+- A directory on root disk (or `os disk`). (Example: `/var/openebs/local`).
+- In the case of bare-metal Kubernetes nodes, a mounted directory using the additional drive or SSD. (Example: An SSD available at `/dev/sdb`, can be formatted with Ext4 and mounted as `/mnt/openebs-local`)
+- In the case of cloud or virtual instances, a mounted directory created from attaching an external cloud volume or virtual disk. (Example, in GKE, a Local SSD can be used which will be available at `/mnt/disk/ssd1`.)
+
+:::note air-gapped environment
+If you are running your Kubernetes cluster in an air-gapped environment, make sure the following container images are available in your local repository.
+- openebs/localpv-provisioner
+- openebs/linux-utils
+:::
+
+:::note Rancher RKE cluster
+If you are using the Rancher RKE cluster, you must configure kubelet service with `extra_binds` for `BasePath`. If your `BasePath` is the default directory `/var/openebs/local`, then extra_binds section should have the following details:
+```
+services:
+ kubelet:
+ extra_binds:
+ - /var/openebs/local:/var/openebs/local
+```
+:::
+
+## Install
+
+For installation instructions, see [here](../../quickstart-guide/installation.md).
+
+## Configuration
+
+This section will help you to configure Local PV Hostpath.
+
+### Create StorageClass
+
+You can skip this section if you would like to use default OpenEBS Local PV Hostpath StorageClass created by OpenEBS.
+
+The default Storage Class is called `openebs-hostpath` and its `BasePath` is configured as `/var/openebs/local`.
+
+1. To create your own StorageClass with custom `BasePath`, save the following StorageClass definition as `local-hostpath-sc.yaml`
+
+ ```
+ apiVersion: storage.k8s.io/v1
+ kind: StorageClass
+ metadata:
+ name: local-hostpath
+ annotations:
+ openebs.io/cas-type: local
+ cas.openebs.io/config: |
+ - name: StorageType
+ value: hostpath
+ - name: BasePath
+ value: /var/local-hostpath
+ provisioner: openebs.io/local
+ reclaimPolicy: Delete
+ volumeBindingMode: WaitForFirstConsumer
+ ```
+ #### (Optional) Custom Node Labelling
+
+ In Kubernetes, Hostpath LocalPV identifies nodes using labels such as `kubernetes.io/hostname=`. However, these default labels might not ensure each node is distinct across the entire cluster. To solve this, you can make custom labels. As an admin, you can define and set these labels when configuring a **StorageClass**. Here's a sample storage class:
+
+ ```
+ apiVersion: storage.k8s.io/v1
+ kind: StorageClass
+ metadata:
+ name: local-hostpath
+ annotations:
+ openebs.io/cas-type: local
+ cas.openebs.io/config: |
+ - name: StorageType
+ value: "hostpath"
+ - name: NodeAffinityLabels
+ list:
+ - "openebs.io/custom-node-unique-id"
+ provisioner: openebs.io/local
+ volumeBindingMode: WaitForFirstConsumer
+
+ ```
+ :::note
+ Using NodeAffinityLabels does not influence scheduling of the application Pod. Use kubernetes [allowedTopologies](https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/tutorials/hostpath/allowedtopologies.md) to configure scheduling options.
+ :::
+
+2. Edit `local-hostpath-sc.yaml` and update with your desired values for `metadata.name` and `cas.openebs.io/config.BasePath`.
+
+ :::note
+ If the `BasePath` does not exist on the node, *OpenEBS Dynamic Local PV Provisioner* will attempt to create the directory, when the first Local Volume is scheduled on to that node. You MUST ensure that the value provided for `BasePath` is a valid absolute path.
+ :::
+
+3. Create OpenEBS Local PV Hostpath Storage Class.
+ ```
+ kubectl apply -f local-hostpath-sc.yaml
+ ```
+
+4. Verify that the StorageClass is successfully created.
+ ```
+ kubectl get sc local-hostpath -o yaml
+ ```
+
+## Deploy an Application
+
+For deployment instructions, see [here](../../quickstart-guide/deploy-a-test-application.md).
+
+## Cleanup
+
+Delete the Pod, the PersistentVolumeClaim and StorageClass that you might have created.
+
+```
+kubectl delete pod hello-local-hostpath-pod
+kubectl delete pvc local-hostpath-pvc
+kubectl delete sc local-hostpath
+```
+
+Verify that the PV that was dynamically created is also deleted.
+```
+kubectl get pv
+```
+
+## Support
+
+If you encounter issues or have a question, file an [Github issue](https://github.com/openebs/openebs/issues/new), or talk to us on the [#openebs channel on the Kubernetes Slack server](https://kubernetes.slack.com/messages/openebs/).
+
+## See Also
+
+[Installation](../../quickstart-guide/installation.md)
+[Deploy an Application](../../quickstart-guide/deploy-a-test-application.md)
diff --git a/docs/main/user-guides/local-engine-user-guide/lvm-localpv.md b/docs/main/user-guides/local-engine-user-guide/lvm-localpv.md
new file mode 100644
index 000000000..5353b1bdb
--- /dev/null
+++ b/docs/main/user-guides/local-engine-user-guide/lvm-localpv.md
@@ -0,0 +1,167 @@
+---
+id: lvm-localpv
+title: LVM Local PV User Guide
+keywords:
+ - OpenEBS LVM Local PV
+ - LVM Local PV
+ - Prerequisites
+ - Install
+ - Create StorageClass
+ - Install verification
+ - Create a PersistentVolumeClaim
+description: This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by LVM Local PV.
+---
+
+# LVM Local PV User Guide
+
+This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by LVM Local PV.
+
+## Prerequisites
+
+Before installing LVM driver, make sure your Kubernetes Cluster must meet the following prerequisites:
+
+1. All the nodes must have lvm2 utils installed and the dm-snapshot kernel module loaded.
+2. You have access to install RBAC components into kube-system namespace. The OpenEBS LVM driver components are installed in kube-system namespace to allow them to be flagged as system critical components.
+
+## Setup Volume Group
+
+Find the disk which you want to use for the LVM, for testing you can use the loopback device
+
+```
+truncate -s 1024G /tmp/disk.img
+sudo losetup -f /tmp/disk.img --show
+```
+
+Create the Volume group on all the nodes, which will be used by the LVM Driver for provisioning the volumes
+
+```
+sudo pvcreate /dev/loop0
+sudo vgcreate lvmvg /dev/loop0 ## here lvmvg is the volume group name to be created
+```
+
+## Installation
+
+For installation instructions, see [here](../../quickstart-guide/installation.md).
+
+## Configuration
+
+This section will help you to configure LVM Local PV.
+
+### Create StorageClass
+
+```
+$ cat sc.yaml
+
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-lvmpv
+parameters:
+ storage: "lvm"
+ volgroup: "lvmvg"
+provisioner: local.csi.openebs.io
+```
+
+Check the doc on [storageclasses](https://github.com/openebs/lvm-localpv/blob/develop/docs/storageclasses.md) to know all the supported parameters for LVM-LocalPV.
+
+#### VolumeGroup Availability
+
+If LVM volume group is available on certain nodes only, then make use of topology to tell the list of nodes where we have the volgroup available. As shown in the below storage class, we can use allowedTopologies to describe volume group availability on nodes.
+
+```
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-lvmpv
+allowVolumeExpansion: true
+parameters:
+ storage: "lvm"
+ volgroup: "lvmvg"
+provisioner: local.csi.openebs.io
+allowedTopologies:
+- matchLabelExpressions:
+ - key: kubernetes.io/hostname
+ values:
+ - lvmpv-node1
+ - lvmpv-node2
+```
+
+The above storage class tells that volume group "lvmvg" is available on nodes lvmpv-node1 and lvmpv-node2 only. The LVM driver will create volumes on those nodes only.
+
+ :::note
+ The provisioner name for LVM driver is "local.csi.openebs.io", we have to use this while creating the storage class so that the volume provisioning/deprovisioning request can come to LVM driver.
+ :::
+
+ ### Create PersistentVolumeClaim
+
+ ```
+ $ cat pvc.yaml
+
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: csi-lvmpv
+spec:
+ storageClassName: openebs-lvmpv
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 4Gi
+ ```
+
+ Create a PVC using the storage class created for the LVM driver.
+
+ ## Deploy the Application
+
+ Create the deployment yaml using the pvc backed by LVM storage.
+
+ ```
+ $ cat fio.yaml
+
+apiVersion: v1
+kind: Pod
+metadata:
+ name: fio
+spec:
+ restartPolicy: Never
+ containers:
+ - name: perfrunner
+ image: openebs/tests-fio
+ command: ["/bin/bash"]
+ args: ["-c", "while true ;do sleep 50; done"]
+ volumeMounts:
+ - mountPath: /datadir
+ name: fio-vol
+ tty: true
+ volumes:
+ - name: fio-vol
+ persistentVolumeClaim:
+ claimName: csi-lvmpv
+ ```
+
+ After the deployment of the application, we can go to the node and see that the lvm volume is being used by the application for reading/writting the data and space is consumed from the LVM. Please note that to check the provisioned volumes on the node, we need to run pvscan --cache command to update the lvm cache and then we can use lvdisplay and all other lvm commands on the node.
+
+ ## Deprovisioning
+
+To deprovision the volume we can delete the application which is using the volume and then we can go ahead and delete the pv, as part of deletion of pv this volume will also be deleted from the volume group and data will be freed.
+
+```
+$ kubectl delete -f fio.yaml
+pod "fio" deleted
+$ kubectl delete -f pvc.yaml
+persistentvolumeclaim "csi-lvmpv" deleted
+```
+
+## Limitation
+
+Resize of volumes with snapshot is not supported.
+
+## Support
+
+If you encounter issues or have a question, file an [Github issue](https://github.com/openebs/openebs/issues/new), or talk to us on the [#openebs channel on the Kubernetes Slack server](https://kubernetes.slack.com/messages/openebs/).
+
+## See Also
+
+[Installation](../../quickstart-guide/installation.md)
+[Deploy an Application](../../quickstart-guide/deploy-a-test-application.md)
diff --git a/docs/main/user-guides/local-engine-user-guide/prerequisites.mdx b/docs/main/user-guides/local-engine-user-guide/prerequisites.mdx
deleted file mode 100644
index a6f70ea23..000000000
--- a/docs/main/user-guides/local-engine-user-guide/prerequisites.mdx
+++ /dev/null
@@ -1,507 +0,0 @@
----
-id: prerequisites
-title: Local Engine Prerequisites
-keywords:
- - Local Engine Prerequisites
- - prerequisites
- - Linux platforms
- - Managed Kubernetes Services on Public Cloud
- - Kubernetes On-Prem Solutions
-description: This guide will help you to verify that your Kubernetes worker nodes have the required prerequisites to install OpenEBS and use OpenEBS Volumes to run your Kubernetes Stateful Workloads. In addition, you will learn about how to customize the installer according to your managed Kubernetes provider.
----
-
-import ImgCard from "@site/src/components/ImgCard";
-
-This guide will help you to verify that your Kubernetes worker nodes have the required prerequisites to install OpenEBS and use OpenEBS Volumes to run your Kubernetes Stateful Workloads. In addition, you will learn about how to customize the installer according to your managed Kubernetes provider.
-
-OpenEBS provides block volume support through the iSCSI protocol. Therefore,
-the iSCSI client (initiator) presence on all Kubernetes nodes is required.
-Choose the platform below to find the steps to verify if the iSCSI client
-is installed and running or to find the steps to install the iSCSI client.
-
-#### Choose the platform for iSCSI client settings
-
-
-
-
-
-[Provide feedback](https://github.com/openebs/openebs-docs/edit/staging/docs/quickstart.md) if a platform is missing in the above list.
-
-## Linux platforms
-
-Installation of the iSCSI initiator service and tools depends on your host O/S or the kubelet container. You can follow the below steps for installation/verification of the required packages. It is a mandatory step to verify the iSCSI services and make sure that it is running on all the worker nodes. OpenEBS uses iSCSI protocol to connect to the block volumes.
-
-### Ubuntu
-
-**Verify iSCSI services are configured**
-
-If iSCSI initiator is already installed on your node, check that the initiator name is configured and iSCSI service is running using the following commands.
-
-```
-sudo cat /etc/iscsi/initiatorname.iscsi
-systemctl status iscsid
-```
-
-If the service status is shown as `Inactive` , then you may have to enable and start iscsid service using the following command.
-
-```
-sudo systemctl enable --now iscsid
-```
-
-The following is the expected output.
-
-```
-systemctl status iscsid
-● iscsid.service - iSCSI initiator daemon (iscsid)
- Loaded: loaded (/lib/systemd/system/iscsid.service; disabled; vendor preset: enabled)
- Active: active (running) since Mon 2019-02-18 11:00:07 UTC; 1min 51s ago
- Docs: man:iscsid(8)
- Process: 11185 ExecStart=/sbin/iscsid (code=exited, status=0/SUCCESS)
- Process: 11170 ExecStartPre=/lib/open-iscsi/startup-checks.sh (code=exited, status=0/SUCCESS)
- Main PID: 11187 (iscsid)
- Tasks: 2 (limit: 4915)
- CGroup: /system.slice/iscsid.service
- ├─11186 /sbin/iscsid
- └─11187 /sbin/iscsid
-```
-
-**Install iSCSI tools**
-
-If iSCSI initiator is not installed on your node, install
-`open-iscsi` packages using the following commands.
-
-```
-sudo apt-get update
-sudo apt-get install open-iscsi
-sudo systemctl enable --now iscsid
-```
-
-You can verify the iSCSI installation from above section.
-
-
-### Red Hat Enterprise Linux
-
-**Verify iSCSI services are configured**
-
-In Red Hat Enterprise Linux 7, the iSCSI service is lazily started by default: the service starts after running the `iscsiadm` command. If an iSCSI initiator is already installed on the node, check that the initiator name is configured using the following command.
-
-```
- cat /etc/iscsi/initiatorname.iscsi
-```
-
-Check iSCSI service is running using the following command.
-
-```
- systemctl status iscsid
-```
-
-If the status is shown as `Inactive` , then you may have to enable and start the iscsid service using the following command.
-
-```
-sudo systemctl enable --now iscsid
-```
-
-The following is the expected output.
-
-```
-systemctl status iscsid
-● iscsid.service - Open-iSCSI
- Loaded: loaded (/usr/lib/systemd/system/iscsid.service; enabled; vendor preset: disabled)
- Active: active (running) since Tue 2019-02-19 12:19:08 IST; 2h 37min ago
- Docs: man:iscsid(8)
- man:iscsiadm(8)
- Main PID: 2138 (iscsid)
- Tasks: 2
- CGroup: /system.slice/iscsid.service
- ├─2137 /usr/sbin/iscsid
- └─2138 /usr/sbin/iscsid
-
-Feb 19 12:19:08 master-1550555590.mayalab.com systemd[1]: Starting Open-iSCSI...
-Feb 19 12:19:08 master-1550555590.mayalab.com iscsid[2136]: iSCSI logger with pid=2137 started!
-Feb 19 12:19:08 master-1550555590.mayalab.com systemd[1]: Failed to read PID from file /var/run/iscsid.pid: Invalid argument
-Feb 19 12:19:08 master-1550555590.mayalab.com systemd[1]: Started Open-iSCSI.
-Feb 19 12:19:09 master-1550555590.mayalab.com iscsid[2137]: iSCSI daemon with pid=2138 started!
-```
-
-**Install iSCSI tools**
-
-If iSCSI initiator is not installed on your node, install `iscsi-initiator-utils` packages using the following command.
-
-```
-yum install iscsi-initiator-utils -y
-```
-
-You can verify the iSCSI installation from above section.
-
-### CentOS
-
-**Verify iSCSI services are configured**
-
-If iSCSI initiator is already installed on your node, check that the initiator name is configured using the following commands.
-
-```
- cat /etc/iscsi/initiatorname.iscsi
-```
-
-Check iSCSI service is running using the following command.
-
-```
- systemctl status iscsid
-```
-
-If the status is showing as `Inactive` , then you may have to enable and start the iscsid service using the following command.
-
-```
-sudo systemctl enable --now iscsid
-```
-
-**Install iSCSI tools**
-
-If an iSCSI initiator is not installed on your node, install `iscsi-initiator-utils` packages using the following command.
-
-```
-yum install iscsi-initiator-utils -y
-```
-
-You can verify the iSCSI installation from the above section.
-
------
-
-## Managed Kubernetes Services on Public Cloud
-
-Amazon Elastic Container Service for Kubernetes (EKS)
-
-Amazon EKS clusters can be brought up with either an AmazonLinux AMI or an Ubuntu 18.04 AMI.
-
-For clusters running with the AmazonLinux AMI
-
-**Verify iSCSI services are configured**
-
-If iSCSI initiator is already installed on your node, check that the initiator name is configured using the following commands.
-
-```
- cat /etc/iscsi/initiatorname.iscsi
-```
-
-Check the iSCSI service is running using the following command.
-
-```
- systemctl status iscsid
-```
-
-If the status is shown as `Inactive` , then you may have to enable and start the iscsid service using the following command.
-
-```
-sudo systemctl enable --now iscsid
-```
-
-**Install iSCSI tools**
-
-If iSCSI initiator is not installed on your node, install `iscsi-initiator-utils` packages using the following command.
-
-```
-yum install iscsi-initiator-utils -y
-```
-
-You can verify the iSCSI installation from the above section.
-
-#### For clusters running with the Ubuntu 18.04 AMI
-
-For setting up iSCSI clients on Ubuntu nodes, see the instructions [here.](#ubuntu)
-
-### Google Kubernetes Engine (GKE)
-
-GKE Container-Optimized OS does not come with an iSCSI client preinstalled and does not allow installation of iSCSI client. Therefore, OpenEBS does not work on Kubernetes clusters which are running GKE Container-Optimized OS on the worker nodes.
-
-Select Ubuntu as the image version for the node pools in the custom settings. For setting up iSCSI clients on Ubuntu nodes, see the instructions [here.](#ubuntu)
-
-### Azure Kubernetes Service (AKS)
-
-On Azure Kubernetes Service you need to verify that the `open-iscsi`
-
-packages are installed and running the service on the kubelet. This can be checked by connecting to the nodes through SSH using the public IP addresses by running the following command.
-
-```
-ssh azureuser@40.xx.yyy.zzz
-```
-
-**Note**: `azureuser` is a default username.
-
-**Verify iSCSI services are configured**
-
-Obtain the container ID of the hyperkube kubelet on each node using the following command.
-
-```
-sudo docker ps | grep "hyperkube kubelet"
-```
-
-Following is the example output:
-
-```
-3aab0f9a48e2 k8s-gcrio.azureedge.net/hyperkube-amd64:v1.8.7 "/hyperkube kubelet..." 48 minutes ago Up 48 minutes eager_einstein
-```
-
-Once kubelet container ID is obtained, you need to get to the shell of this container using the following command.
-
-```
-sudo docker exec -it bash
-```
-
-Example:
-
-```
-sudo docker exec -it 3aab0f9a48e2 bash
-```
-
-Check the status of the iSCSI service by using the following command.
-
-```
-service open-iscsi status
-```
-
-**Install iSCSI tools**
-
-You have to get the kubelet container ID using the steps mentioned in the above section. Once kubelet container ID is obtained, you need to get into the shell of this container using the following command.
-
-```
-sudo docker exec -it bash
-```
-
-Example:
-
-```
-sudo docker exec -it 3aab0f9a48e2 bash
-```
-
-Run the following commands to install and configure iSCSI service in the kubelet.
-
-```
-apt-get update
-apt install -y open-iscsi
-exit
-```
-
-You can verify the iSCSI installation from the above section.
-
-### DigitalOcean
-
-**Add extra_binds in Kubelet Service**
-
- Add the following lines (volume mounts) to the file `/etc/systemd/system/kubelet.service` on each of the nodes
-
-```
- -v /sbin/iscsiadm:/usr/bin/iscsiadm \
- -v /lib/x86_64-linux-gnu/libisns-nocrypto.so.0:/lib/x86_64-linux-gnu/libisns-nocrypto.so.0 \
- ```
-
-So, the updated Kubelet Service File is as below:
-
-```
-[Unit]
-Description=Kubernetes Kubelet Server
-Documentation=https://kubernetes.io/docs/concepts/overview/components/#kubelet
-After=docker.service sys-fs-bpf.mount
-Requires=docker.service sys-fs-bpf.mount
-[Service]
-OOMScoreAdjust=-999
-ExecStartPre=/bin/mkdir -p /var/lib/kubelet
-ExecStartPre=/bin/mount — bind /var/lib/kubelet /var/lib/kubelet
-ExecStartPre=/bin/mount — make-shared /var/lib/kubelet
-ExecStart=/usr/bin/docker run — rm — net=host — pid=host — privileged — name kubelet \
--v /dev:/dev \
--v /sys:/sys \
--v /var:/var \
--v /var/lib/kubelet:/var/lib/kubelet:shared \
--v /etc:/etc \
--v /run:/run \
--v /opt:/opt \
--v /sbin/iscsiadm:/usr/bin/iscsiadm \
--v /lib/x86_64-linux-gnu/libisns-nocrypto.so.0:/lib/x86_64-linux-gnu/libisns-nocrypto.so.0 \
-gcr.io/google-containers/hyperkube:v1.15.3 \
-/hyperkube kubelet \
-— config=/etc/kubernetes/kubelet.conf \
-— feature-gates=”RuntimeClass=false” \
-— logtostderr=true \
-— image-pull-progress-deadline=2m \
-— kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
-— bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
-— rotate-certificates \
-— register-node=true \
-— node-labels=”doks.digitalocean.com/node-id=32559d91-cc04–4aac-bdc4–0566fa066802,doks.digitalocean.com/node-pool-id=d5714f37–627d-435a-b1c7-f0373ecd7593,doks.digitalocean.com/node-pool=pool-nuyzam6e8,doks.digitalocean.com/version=1.15.3-do.2" \
-— root-dir=/var/lib/kubelet \
-— v=2 \
-— cloud-provider=external \
-— network-plugin=cni \
-— provider-id=”digitalocean://160254521"
-Restart=on-failure
-RestartSec=5
-KillMode=process
-[Install]
-WantedBy=multi-user.target
-```
-
- Next, you need to restart the Kubelet Service on each node using the following commands
-
-```
-systemctl daemon-reload
-service kubelet restart
-```
-
-## Kubernetes On-Prem Solutions
-
-### Red Hat OpenShift
-
-Detailed [installation instructions for OpenEBS on OpenShift](/additional-info/kb#openshift-install) are in the Knowledge
-Base.
-
-### Rancher
-
-You will need to enable iSCSI services on all of the hosts of your Rancher-based Kubernetes cluster.
-
-#### iSCSI services on RKE/Rancher 2.x
-
-**Step 1:** Verify iSCSI initiator is installed and services are running on all of the hosts (control plane/etcd/worker).
-
-| Operating system | iSCSI Package | Commands |
-| --------------------- | --------------------- | ------------------------------------------------------------ |
-| RHEL / CentOS | iscsi-initiator-utils | sudo yum install iscsi-initiator-utils -y sudo systemctl enable --now iscsid |
-| Ubuntu / Debian | open-iscsi | sudo apt install -y open-iscsi sudo systemctl enable --now iscsid |
-
-**Step 2:** Add `extra_binds` under `services.kubelet` in cluster YAML
-After installing the initiator tool on your nodes, edit the YAML for your cluster. Edit the kubelet configuration section to mount the iSCSI binary and configuration, as shown in the sample below.
-
-For an RKE cluster, you can add the `extra_binds` to your cluster.yml file. For a Rancher 2.x cluster, you can edit your cluster's configuration options and add the `extra_binds` there.
-
-```yaml
-services:
- kubelet:
- extra_binds:
- - "/etc/iscsi:/etc/iscsi"
- - "/sbin/iscsiadm:/sbin/iscsiadm"
- - "/var/lib/iscsi:/var/lib/iscsi"
- - "/lib/modules"
- - "/var/openebs/local:/var/openebs/local"
- - "/usr/lib64/libcrypto.so.10:/usr/lib/libcrypto.so.10"
- - "/usr/lib64/libopeniscsiusr.so.0.2.0:/usr/lib/libopeniscsiusr.so.0.2.0"
-```
-
-In the above snippet, default hostpath for Local PV (/var/openebs/local), which will be created on the worker node using `openebs-hostpath` StorageClass, is added under `extra_binds` . This configuration will help to create default hostpath directory on worker node for provisioning `openebs-hostpath` volume.
-
-#### iSCSI services on RancherOS
-
-To run iSCSI services, execute the following commands on each of the cluster hosts or nodes.
-
-```sh
-sudo ros s enable open-iscsi
-sudo ros s up open-iscsi
-```
-
-Run below commands on all of the nodes to make sure the below directories are persistent. By default these directories are ephemeral.
-
-```sh
-ros config set rancher.services.user-volumes.volumes [/home:/home,/opt:/opt,/var/lib/kubelet:/var/lib/kubelet,/etc/kubernetes:/etc/kubernetes,/var/openebs]
-system-docker rm all-volumes
-reboot
-```
-
-Edit the kubelet configuration section of your RKE/Rancher 2.x cluster to mount the OpenEBS Local PV hostpath default directory on to the kubelet container.
-
-```yaml
-services:
- kubelet:
- extra_binds:
- - "/var/openebs/local:/var/openebs/local"
-```
-
-In the above snippet, default hostpath for Local PV (/var/openebs/local), which will be created on the worker node using `openebs-hostpath` StorageClass, is added under `extra_binds` . This configuration will help to create default hostpath directory on worker node for provisioning `openebs-hostpath` volume.
-
-:::note
-The CSI driver based implementation of cStor storage engine is **not compatible** with RancherOS.
-:::
-
-### Konvoy
-
-Konvoy is a managed Kubernetes platform for operation and lifecycle management from D2iQ. CentOS 7.6 is used as the underlying node OS by default. Only prerequisite for setting up OpenEBS in Konvoy is to have iSCSI client on the CentOS nodes. For setting up iSCSI client on CentOS nodes, see the instructions [here](#centos). More details about setting up of OpenEBS in Konvoy can be found [here](https://docs.d2iq.com/ksphere/konvoy/partner-solutions/openebs/).
-
-### IBM Cloud Private (ICP)
-
-OpenEBS can be installed using ICP on the following Operating Systems. Latest tested ICP versions are 2.1.0.3 and 3.1.1.
-
-1. On RHEL 7
-
-2. On Ubuntu
-
-#### On RHEL
-Latest tested RHEL versions are 7.5, 7.6.
-
-For setting up iSCSI clients on RHEL nodes, see the
-instructions [here](#rhel).
-
-#### On Ubuntu
-Latest tested Ubuntu version are Ubuntu 16.04 LTS and 18.04 LTS.
-
-For setting up iSCSI clients on Ubuntu nodes, see the
-instructions [here](#ubuntu).
-
-## See Also:
-
-[OpenEBS Installation](/user-guides/installation) [OpenEBS Architecture](/concepts/architecture)
diff --git a/docs/main/user-guides/local-engine-user-guide/zfs-localpv.md b/docs/main/user-guides/local-engine-user-guide/zfs-localpv.md
new file mode 100644
index 000000000..00ced2df0
--- /dev/null
+++ b/docs/main/user-guides/local-engine-user-guide/zfs-localpv.md
@@ -0,0 +1,364 @@
+---
+id: zfs-localpv
+title: ZFS Local PV User Guide
+keywords:
+ - OpenEBS ZFS Local PV
+ - ZFS Local PV
+ - Prerequisites
+ - Install
+ - Create StorageClass
+ - Install verification
+ - Create a PersistentVolumeClaim
+description: This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by ZFS Local PV.
+---
+
+# ZFS Local PV User Guide
+
+This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by ZFS Local PV.
+
+## Prerequisites
+
+Before installing ZFS driver, make sure your Kubernetes Cluster must meet the following prerequisites:
+
+1. All the nodes must have zfs utils installed.
+2. ZPOOL has been setup for provisioning the volume.
+3. You have access to install RBAC components into kube-system namespace. The OpenEBS ZFS driver components are installed in kube-system namespace to allow them to be flagged as system critical components.
+
+## Setup
+
+Setup
+All the node should have zfsutils-linux installed. We should go to the each node of the cluster and install zfs utils:
+
+```
+$ apt-get install zfsutils-linux
+```
+
+Go to each node and create the ZFS Pool, which will be used for provisioning the volumes. You can create the Pool of your choice, it can be striped, mirrored or raidz pool.
+
+If you have the disk(say /dev/sdb) then you can use the below command to create a striped pool :
+
+```
+$ zpool create zfspv-pool /dev/sdb
+```
+
+You can also create mirror or raidz pool as per your need. Check https://github.com/openzfs/zfs for more information.
+
+If you do not have the disk, then you can create the zpool on the loopback device which is backed by a sparse file. Use this for testing purpose only.
+
+```
+$ truncate -s 100G /tmp/disk.img
+$ zpool create zfspv-pool `losetup -f /tmp/disk.img --show`
+```
+
+Once the ZFS Pool is created, verify the pool via zpool status command, you should see something like this:
+
+```
+$ zpool status
+ pool: zfspv-pool
+ state: ONLINE
+ scan: none requested
+config:
+
+ NAME STATE READ WRITE CKSUM
+ zfspv-pool ONLINE 0 0 0
+ sdb ONLINE 0 0 0
+
+errors: No known data errors
+```
+
+Configure the custom topology keys (if needed). This can be used for many purposes like if we want to create the PV on nodes in a particuler zone or building. We can label the nodes accordingly and use that key in the storageclass for taking the scheduling decesion:
+
+https://github.com/openebs/zfs-localpv/blob/HEAD/docs/faq.md#6-how-to-add-custom-topology-key
+
+## Installation
+
+For installation instructions, see [here](../../quickstart-guide/installation.md).
+
+## Configuration
+
+This section will help you to configure ZFS Local PV.
+
+### Create StorageClass
+
+```
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-zfspv
+parameters:
+ recordsize: "128k"
+ compression: "off"
+ dedup: "off"
+ fstype: "zfs"
+ poolname: "zfspv-pool"
+provisioner: zfs.csi.openebs.io
+```
+
+The storage class contains the volume parameters like recordsize(should be power of 2), compression, dedup and fstype. You can select what are all parameters you want. In case, ZFS properties paramenters are not provided, the volume will inherit the properties from the ZFS Pool.
+
+The poolname is the must argument. It should be noted that poolname can either be the root dataset or a child dataset e.g.
+
+```
+poolname: "zfspv-pool"
+poolname: "zfspv-pool/child"
+```
+
+Also the dataset provided under `poolname` must exist on all the nodes with the name given in the storage class. Check the doc on storageclasses to know all the supported parameters for ZFS-LocalPV
+
+**ext2/3/4 or xfs or btrfs as FsType**
+If we provide fstype as one of ext2/3/4 or xfs or btrfs, the driver will create a ZVOL, which is a blockdevice carved out of ZFS Pool. This blockdevice will be formatted with corresponding filesystem before it's used by the driver.
+
+:::note
+There will be a filesystem layer on top of ZFS volume and applications may not get optimal performance.
+:::
+
+A sample storage class for ext4 fstype is provided below:
+
+```
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-zfspv
+parameters:
+ volblocksize: "4k"
+ compression: "off"
+ dedup: "off"
+ fstype: "ext4"
+ poolname: "zfspv-pool"
+provisioner: zfs.csi.openebs.io
+```
+
+:::note
+We are providing `volblocksize` instead of `recordsize` since we will create a ZVOL, for which we can select the blocksize with which we want to create the block device. Also, note that for ZFS, volblocksize should be power of 2.
+:::
+
+**ZFS as FsType**
+
+In case if we provide "zfs" as the fstype, the ZFS driver will create ZFS DATASET in the ZFS Pool, which is the ZFS filesystem. Here, there will not be any extra layer between application and storage, and applications can get the optimal performance.
+
+The sample storage class for ZFS fstype is provided below:
+
+```
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-zfspv
+parameters:
+ recordsize: "128k"
+ compression: "off"
+ dedup: "off"
+ fstype: "zfs"
+ poolname: "zfspv-pool"
+provisioner: zfs.csi.openebs.io
+```
+
+:::note
+We are providing `recordsize` which will be used to create the ZFS datasets, which specifies the maximum block size for files in the zfs file system. The recordsize has to be power of 2 for ZFS datasets.
+:::
+
+**ZPOOL Availability**
+
+If ZFS pool is available on certain nodes only, then make use of topology to tell the list of nodes where we have the ZFS pool available. As shown in the below storage class, we can use allowedTopologies to describe ZFS pool availability on nodes.
+
+```
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-zfspv
+allowVolumeExpansion: true
+parameters:
+ recordsize: "128k"
+ compression: "off"
+ dedup: "off"
+ fstype: "zfs"
+ poolname: "zfspv-pool"
+provisioner: zfs.csi.openebs.io
+allowedTopologies:
+- matchLabelExpressions:
+ - key: kubernetes.io/hostname
+ values:
+ - zfspv-node1
+ - zfspv-node2
+```
+
+The above storage class tells that ZFS pool "zfspv-pool" is available on nodes zfspv-node1 and zfspv-node2 only. The ZFS driver will create volumes on those nodes only.
+
+:::note
+The provisioner name for ZFS driver is "zfs.csi.openebs.io", we have to use this while creating the storage class so that the volume provisioning/deprovisioning request can come to ZFS driver.
+:::
+
+**Scheduler**
+
+The ZFS driver has its own scheduler which will try to distribute the PV across the nodes so that one node should not be loaded with all the volumes. Currently the driver supports two scheduling algorithms: VolumeWeighted and CapacityWeighted, in which it will try to find a ZFS pool which has less number of volumes provisioned in it or less capacity of volume provisioned out of a pool respectively, from all the nodes where the ZFS pools are available. To know about how to select scheduler via storage-class See [this](https://github.com/openebs/zfs-localpv/blob/HEAD/docs/storageclasses.md#storageclass-with-k8s-scheduler). Once it is able to find the node, it will create a PV for that node and also create a ZFSVolume custom resource for the volume with the NODE information. The watcher for this ZFSVolume CR will get all the information for this object and creates a ZFS dataset(zvol) with the given ZFS property on the mentioned node.
+
+The scheduling algorithm currently only accounts for either the number of ZFS volumes or total capacity occupied from a zpool and does not account for other factors like available cpu or memory while making scheduling decisions.
+
+So if you want to use node selector/affinity rules on the application pod, or have cpu/memory constraints, kubernetes scheduler should be used. To make use of kubernetes scheduler, you can set the `volumeBindingMode` as `WaitForFirstConsumer` in the storage class.
+
+This will cause a delayed binding, i.e kubernetes scheduler will schedule the application pod first and then it will ask the ZFS driver to create the PV.
+
+The driver will then create the PV on the node where the pod is scheduled:
+
+```
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: openebs-zfspv
+allowVolumeExpansion: true
+parameters:
+ recordsize: "128k"
+ compression: "off"
+ dedup: "off"
+ fstype: "zfs"
+ poolname: "zfspv-pool"
+provisioner: zfs.csi.openebs.io
+volumeBindingMode: WaitForFirstConsumer
+```
+
+:::note
+Once a PV is created for a node, application using that PV will always get scheduled to that particular node only, as PV will be sticky to that node.
+:::
+
+The scheduling algorithm by ZFS driver or kubernetes will come into picture only during the deployment time. Once the PV is created, the application can not move anywhere as the data is there on the node where the PV is.
+
+### Create PersistentVolumeClaim
+
+```
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: csi-zfspv
+spec:
+ storageClassName: openebs-zfspv
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 4Gi
+```
+
+Create a PVC using the storage class created for the ZFS driver. Here, the allocated volume size will be rounded off to the nearest Mi or Gi notation, check the [faq section](../../faqs/faqs.md) for more details.
+
+If we are using the immediate binding in the storageclass then we can check the kubernetes resource for the corresponding ZFS volume, otherwise in late binding case, we can check the same after pod has been scheduled:
+
+```
+$ kubectl get zv -n openebs
+NAME ZPOOL NODE SIZE STATUS FILESYSTEM AGE
+pvc-34133838-0d0d-11ea-96e3-42010a800114 zfspv-pool zfspv-node1 4294967296 Ready zfs 4s
+```
+
+```
+$ kubectl describe zv pvc-34133838-0d0d-11ea-96e3-42010a800114 -n openebs
+Name: pvc-34133838-0d0d-11ea-96e3-42010a800114
+Namespace: openebs
+Labels: kubernetes.io/nodename=zfspv-node1
+Annotations:
+API Version: openebs.io/v1alpha1
+Kind: ZFSVolume
+Metadata:
+ Creation Timestamp: 2019-11-22T09:49:29Z
+ Finalizers:
+ zfs.openebs.io/finalizer
+ Generation: 1
+ Resource Version: 2881
+ Self Link: /apis/openebs.io/v1alpha1/namespaces/openebs/zfsvolumes/pvc-34133838-0d0d-11ea-96e3-42010a800114
+ UID: 60bc4df2-0d0d-11ea-96e3-42010a800114
+Spec:
+ Capacity: 4294967296
+ Compression: off
+ Dedup: off
+ Fs Type: zfs
+ Owner Node ID: zfspv-node1
+ Pool Name: zfspv-pool
+ Recordsize: 4k
+ Volume Type: DATASET
+Status:
+ State: Ready
+Events:
+```
+
+The ZFS driver will create a ZFS dataset (or zvol as per fstype in the storageclass) on the node zfspv-node1 for the mentioned ZFS pool and the dataset name will same as PV name.
+
+Go to the node zfspv-node1 and check the volume:
+
+```
+$ zfs list
+NAME USED AVAIL REFER MOUNTPOINT
+zfspv-pool 444K 362G 96K /zfspv-pool
+zfspv-pool/pvc-34133838-0d0d-11ea-96e3-42010a800114 96K 4.00G 96K legacy
+```
+
+## Deploy the Application
+
+Create the deployment yaml using the pvc backed by ZFS-LocalPV storage.
+
+```
+apiVersion: v1
+kind: Pod
+metadata:
+ name: fio
+spec:
+ restartPolicy: Never
+ containers:
+ - name: perfrunner
+ image: openebs/tests-fio
+ command: ["/bin/bash"]
+ args: ["-c", "while true ;do sleep 50; done"]
+ volumeMounts:
+ - mountPath: /datadir
+ name: fio-vol
+ tty: true
+ volumes:
+ - name: fio-vol
+ persistentVolumeClaim:
+ claimName: csi-zfspv
+```
+
+After the deployment of the application, we can go to the node and see that the zfs volume is being used by the application for reading/writting the data and space is consumed from the ZFS pool.
+
+## ZFS Property Change
+
+ZFS Volume Property can be changed like compression on/off can be done by just simply editing the kubernetes resource for the corresponding zfs volume by using below command:
+
+```
+$ kubectl edit zv pvc-34133838-0d0d-11ea-96e3-42010a800114 -n openebs
+```
+You can edit the relevant property like make compression on or make dedup on and save it. This property will be applied to the corresponding volume and can be verified using below command on the node:
+
+```
+$ zfs get all zfspv-pool/pvc-34133838-0d0d-11ea-96e3-42010a800114
+```
+
+## Deprovisioning
+
+To deprovision the volume we can delete the application which is using the volume and then we can go ahead and delete the pv, as part of deletion of pv this volume will also be deleted from the ZFS pool and data will be freed.
+
+```
+$ kubectl delete -f fio.yaml
+pod "fio" deleted
+$ kubectl delete -f pvc.yaml
+persistentvolumeclaim "csi-zfspv" deleted
+```
+
+:::Warning
+If you are running kernel ZFS on the same set of nodes, the following two points are recommended:
+
+- Disable zfs-import-scan.service service that will avoid importing all pools by scanning all the available devices in the system, disabling scan service will avoid importing pools that are not created by kernel.
+
+- Disabling scan service will not cause harm since zfs-import-cache.service is enabled and it is the best way to import pools by looking at cache file during boot time.
+
+```
+$ systemctl stop zfs-import-scan.service
+$ systemctl disable zfs-import-scan.service
+```
+
+Always maintain upto date /etc/zfs/zpool.cache while performing operations on zfs pools(zpool set cachefile=/etc/zfs/zpool.cache).
+
+## Support
+
+If you encounter issues or have a question, file an [Github issue](https://github.com/openebs/openebs/issues/new), or talk to us on the [#openebs channel on the Kubernetes Slack server](https://kubernetes.slack.com/messages/openebs/).
+
+## See Also
+
+[Installation](../../quickstart-guide/installation.md)
+[Deploy an Application](../../quickstart-guide/deploy-a-test-application.md)
\ No newline at end of file
diff --git a/docs/main/user-guides/localpv-hostpath.md b/docs/main/user-guides/localpv-hostpath.md
deleted file mode 100644
index 48792e353..000000000
--- a/docs/main/user-guides/localpv-hostpath.md
+++ /dev/null
@@ -1,522 +0,0 @@
----
-id: localpv-hostpath
-title: OpenEBS Local PV Hostpath User Guide
-keywords:
- - OpenEBS Local PV Hostpath
- - Local PV Hostpath
- - Prerequisites
- - Install
- - Create StorageClass
- - Install verification
- - Create a PersistentVolumeClaim
- - Create Pod to consume OpenEBS Local PV Hostpath Storage
- - Cleanup
- - Backup and Restore
- - Troubleshooting
- - Support
-description: This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by Hostpath.
----
-
-This guide will help you to set up and use OpenEBS Local Persistent Volumes backed by Hostpath.
-
-*OpenEBS Dynamic Local PV provisioner* can create Kubernetes Local Persistent Volumes using a unique Hostpath (directory) on the node to persist data, hereafter referred to as *OpenEBS Local PV Hostpath* volumes.
-
-*OpenEBS Local PV Hostpath* volumes have the following advantages compared to native Kubernetes hostpath volumes.
-- OpenEBS Local PV Hostpath allows your applications to access hostpath via StorageClass, PVC, and PV. This provides you the flexibility to change the PV providers without having to redesign your Application YAML.
-- Data protection using the Velero Backup and Restore.
-- Protect against hostpath security vulnerabilities by masking the hostpath completely from the application YAML and pod.
-
-OpenEBS Local PV uses volume topology aware pod scheduling enhancements introduced by [Kubernetes Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local)
-
-:::tip QUICKSTART
-
-OpenEBS Local PV Hostpath volumes will be created under `/var/openebs/local` directory. You can customize the location by [configuring install parameters](#install) or by creating new [StorageClass](#create-storageclass).
-
-If you have OpenEBS already installed, you can create an example pod that persists data to *OpenEBS Local PV Hostpath* with following kubectl commands.
-```
-kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pvc.yaml
-kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pod.yaml
-```
-
-Verify using below kubectl commands that example pod is running and is using a OpenEBS Local PV Hostpath.
-```
-kubectl get pod hello-local-hostpath-pod
-kubectl get pvc local-hostpath-pvc
-```
-
-For a more detailed walkthrough of the setup, follow along the rest of this document.
-:::
-
-## Minimum Versions
-
-- Kubernetes 1.12 or higher is required
-- OpenEBS 1.0 or higher is required.
-
-## Prerequisites
-
-Setup the directory on the nodes where Local PV Hostpaths will be created. This directory will be referred to as `BasePath`. The default location is `/var/openebs/local`.
-
-`BasePath` can be any of the following:
-- A directory on root disk (or `os disk`). (Example: `/var/openebs/local`).
-- In the case of bare-metal Kubernetes nodes, a mounted directory using the additional drive or SSD. (Example: An SSD available at `/dev/sdb`, can be formatted with Ext4 and mounted as `/mnt/openebs-local`)
-- In the case of cloud or virtual instances, a mounted directory created from attaching an external cloud volume or virtual disk. (Example, in GKE, a Local SSD can be used which will be available at `/mnt/disk/ssd1`.)
-
-:::note air-gapped environment
-If you are running your Kubernetes cluster in an air-gapped environment, make sure the following container images are available in your local repository.
-- openebs/localpv-provisioner
-- openebs/linux-utils
-:::
-
-:::note Rancher RKE cluster
-If you are using the Rancher RKE cluster, you must configure kubelet service with `extra_binds` for `BasePath`. If your `BasePath` is the default directory `/var/openebs/local`, then extra_binds section should have the following details:
-```
-services:
- kubelet:
- extra_binds:
- - /var/openebs/local:/var/openebs/local
-```
-:::
-
-## Install
-
-You can skip this section if you have already installed OpenEBS.
-
-1. Prepare to install OpenEBS by providing custom values for configurable parameters.
-
- *OpenEBS Dynamic Local Provisioner* offers some configurable parameters that can be applied during the OpenEBS Installation. Some key configurable parameters available for OpenEBS Dynamic Local Provisioner are:
-
- - The location of the *OpenEBS Dynamic Local PV provisioner* container image.
- ```shell hideCopy
- Default value: openebs/provisioner-localpv
- YAML specification: spec.image on Deployment(localpv-provisioner)
- Helm key: localprovisioner.image
- ```
-
- - The location of the *Provisioner Helper* container image. *OpenEBS Dynamic Local Provisioner* create a *Provisioner Helper* pod to create and delete hostpath directories on the nodes.
- ```shell hideCopy
- Default value: openebs/linux-utils
- YAML specification: Environment Variable (OPENEBS_IO_HELPER_IMAGE) on Deployment(localpv-provisioner)
- Helm key: helper.image
- ```
-
- - The absolute path on the node where the Hostpath directory of a Local PV Volume will be created.
- ```shell hideCopy
- Default value: /var/openebs/local
- YAML specification: Environment Variable (OPENEBS_IO_LOCALPV_HOSTPATH_DIR) on Deployment(maya-apiserver)
- Helm key: localprovisioner.basePath
- ```
-
-2. You can proceed to install OpenEBS either using kubectl or helm using the steps below.
-
- - Install using kubectl
-
- If you would like to change the default values for any of the configurable parameters mentioned in the previous step, download the `openebs-operator.yaml` and make the necessary changes before applying.
- ```
- kubectl apply -f https://openebs.github.io/charts/openebs-operator.yaml
- ```
-
- :::note
- If you would like to use only Local PV (hostpath and device), you can install a lite version of OpenEBS using the following command.
-
- kubectl apply -f https://openebs.github.io/charts/openebs-operator-lite.yaml
- kubectl apply -f https://openebs.github.io/charts/openebs-lite-sc.yaml
- :::
-
- - Install using OpenEBS helm charts
-
- If you would like to change the default values for any of the configurable parameters mentioned in the previous step, specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
-
- ```
- helm repo add openebs https://openebs.github.io/charts
- helm repo update
- helm install --namespace openebs --name openebs openebs/openebs
- ```
-
-## Create StorageClass
-
-You can skip this section if you would like to use default OpenEBS Local PV Hostpath StorageClass created by OpenEBS.
-
-The default Storage Class is called `openebs-hostpath` and its `BasePath` is configured as `/var/openebs/local`.
-
-1. To create your own StorageClass with custom `BasePath`, save the following StorageClass definition as `local-hostpath-sc.yaml`
-
- ```
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: local-hostpath
- annotations:
- openebs.io/cas-type: local
- cas.openebs.io/config: |
- - name: StorageType
- value: hostpath
- - name: BasePath
- value: /var/local-hostpath
- provisioner: openebs.io/local
- reclaimPolicy: Delete
- volumeBindingMode: WaitForFirstConsumer
- ```
- #### (Optional) Custom Node Labelling
-
- In Kubernetes, Hostpath LocalPV identifies nodes using labels such as `kubernetes.io/hostname=`. However, these default labels might not ensure each node is distinct across the entire cluster. To solve this, you can make custom labels. As an admin, you can define and set these labels when configuring a **StorageClass**. Here's a sample storage class:
-
- ```
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: local-hostpath
- annotations:
- openebs.io/cas-type: local
- cas.openebs.io/config: |
- - name: StorageType
- value: "hostpath"
- - name: NodeAffinityLabels
- list:
- - "openebs.io/custom-node-unique-id"
- provisioner: openebs.io/local
- volumeBindingMode: WaitForFirstConsumer
-
- ```
- :::note
- Using NodeAffinityLabels does not influence scheduling of the application Pod. Use kubernetes [allowedTopologies](https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/tutorials/hostpath/allowedtopologies.md) to configure scheduling options.
- :::
-
-2. Edit `local-hostpath-sc.yaml` and update with your desired values for `metadata.name` and `cas.openebs.io/config.BasePath`.
-
- :::note
- If the `BasePath` does not exist on the node, *OpenEBS Dynamic Local PV Provisioner* will attempt to create the directory, when the first Local Volume is scheduled on to that node. You MUST ensure that the value provided for `BasePath` is a valid absolute path.
- :::
-
-3. Create OpenEBS Local PV Hostpath Storage Class.
- ```
- kubectl apply -f local-hostpath-sc.yaml
- ```
-
-4. Verify that the StorageClass is successfully created.
- ```
- kubectl get sc local-hostpath -o yaml
- ```
-
-
-## Install verification
-
-Once you have installed OpenEBS, verify that *OpenEBS Local PV provisioner* is running and Hostpath StorageClass is created.
-
-1. To verify *OpenEBS Local PV provisioner* is running, execute the following command. Replace `-n openebs` with the namespace where you installed OpenEBS.
-
- ```
- kubectl get pods -n openebs -l openebs.io/component-name=openebs-localpv-provisioner
- ```
-
- The output should indicate `openebs-localpv-provisioner` pod is running.
- ```shell hideCopy
- NAME READY STATUS RESTARTS AGE
- openebs-localpv-provisioner-5ff697f967-nb7f4 1/1 Running 0 2m49s
- ```
-
-2. To verify *OpenEBS Local PV Hostpath* StorageClass is created, execute the following command.
-
- ```
- kubectl get sc
- ```
-
- The output should indicate either the default StorageClass `openebs-hostpath` and/or custom StorageClass `local-hostpath` are displayed.
- ```shell hideCopy
- NAME PROVISIONER AGE
- local-hostpath openebs.io/local 5h26m
- openebs-hostpath openebs.io/local 6h4m
- ```
-
-## Create a PersistentVolumeClaim
-
-The next step is to create a PersistentVolumeClaim. Pods will use PersistentVolumeClaims to request Hostpath Local PV from *OpenEBS Dynamic Local PV provisioner*.
-
-1. Here is the configuration file for the PersistentVolumeClaim. Save the following PersistentVolumeClaim definition as `local-hostpath-pvc.yaml`
-
- ```
- kind: PersistentVolumeClaim
- apiVersion: v1
- metadata:
- name: local-hostpath-pvc
- spec:
- storageClassName: openebs-hostpath
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 5G
- ```
-
-2. Create the PersistentVolumeClaim
-
- ```
- kubectl apply -f local-hostpath-pvc.yaml
- ```
-
-3. Look at the PersistentVolumeClaim:
-
- ```
- kubectl get pvc local-hostpath-pvc
- ```
-
- The output shows that the `STATUS` is `Pending`. This means PVC has not yet been used by an application pod. The next step is to create a Pod that uses your PersistentVolumeClaim as a volume.
-
- ```shell hideCopy
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- local-hostpath-pvc Pending openebs-hostpath 3m7s
- ```
-
-## Create Pod to consume OpenEBS Local PV Hostpath Storage
-
-1. Here is the configuration file for the Pod that uses Local PV. Save the following Pod definition to `local-hostpath-pod.yaml`.
-
- ```
- apiVersion: v1
- kind: Pod
- metadata:
- name: hello-local-hostpath-pod
- spec:
- volumes:
- - name: local-storage
- persistentVolumeClaim:
- claimName: local-hostpath-pvc
- containers:
- - name: hello-container
- image: busybox
- command:
- - sh
- - -c
- - 'while true; do echo "`date` [`hostname`] Hello from OpenEBS Local PV." >> /mnt/store/greet.txt; sleep $(($RANDOM % 5 + 300)); done'
- volumeMounts:
- - mountPath: /mnt/store
- name: local-storage
- ```
-
- :::note
- As the Local PV storage classes use `waitForFirstConsumer`, do not use `nodeName` in the Pod spec to specify node affinity. If `nodeName` is used in the Pod spec, then PVC will remain in `pending` state. For more details refer https://github.com/openebs/openebs/issues/2915.
- :::
-
-2. Create the Pod:
-
- ```
- kubectl apply -f local-hostpath-pod.yaml
- ```
-
-3. Verify that the container in the Pod is running.
-
- ```
- kubectl get pod hello-local-hostpath-pod
- ```
-4. Verify that the data is being written to the volume.
-
- ```
- kubectl exec hello-local-hostpath-pod -- cat /mnt/store/greet.txt
- ```
-
-5. Verify that the container is using the Local PV Hostpath.
- ```
- kubectl describe pod hello-local-hostpath-pod
- ```
-
- The output shows that the Pod is running on `Node: gke-user-helm-default-pool-3a63aff5-1tmf` and using the persistent volume provided by `local-hostpath-pvc`.
-
- ```shell hideCopy
- Name: hello-local-hostpath-pod
- Namespace: default
- Priority: 0
- Node: gke-user-helm-default-pool-3a63aff5-1tmf/10.128.0.28
- Start Time: Thu, 16 Apr 2020 17:56:04 +0000
- ...
- Volumes:
- local-storage:
- Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
- ClaimName: local-hostpath-pvc
- ReadOnly: false
- ...
- ```
-
-6. Look at the PersistentVolumeClaim again to see the details about the dynamically provisioned Local PersistentVolume
- ```
- kubectl get pvc local-hostpath-pvc
- ```
-
- The output shows that the `STATUS` is `Bound`. A new Persistent Volume `pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425` has been created.
-
- ```shell hideCopy
- NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
- local-hostpath-pvc Bound pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425 5G RWO openebs-hostpath 28m
- ```
-
-7. Look at the PersistentVolume details to see where the data is stored. Replace the PVC name with the one that was displayed in the previous step.
- ```
- kubectl get pv pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425 -o yaml
- ```
- The output shows that the PV was provisioned in response to PVC request `spec.claimRef.name: local-hostpath-pvc`.
-
- ```shell hideCopy
- apiVersion: v1
- kind: PersistentVolume
- metadata:
- name: pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425
- annotations:
- pv.kubernetes.io/provisioned-by: openebs.io/local
- ...
- spec:
- accessModes:
- - ReadWriteOnce
- capacity:
- storage: 5G
- claimRef:
- apiVersion: v1
- kind: PersistentVolumeClaim
- name: local-hostpath-pvc
- namespace: default
- resourceVersion: "291148"
- uid: 864a5ac8-dd3f-416b-9f4b-ffd7d285b425
- ...
- ...
- local:
- fsType: ""
- path: /var/openebs/local/pvc-864a5ac8-dd3f-416b-9f4b-ffd7d285b425
- nodeAffinity:
- required:
- nodeSelectorTerms:
- - matchExpressions:
- - key: kubernetes.io/hostname
- operator: In
- values:
- - gke-user-helm-default-pool-3a63aff5-1tmf
- persistentVolumeReclaimPolicy: Delete
- storageClassName: openebs-hostpath
- volumeMode: Filesystem
- status:
- phase: Bound
- ```
-
-
-:::note
-A few important characteristics of a *OpenEBS Local PV* can be seen from the above output:
-- `spec.nodeAffinity` specifies the Kubernetes node where the Pod using the Hostpath volume is scheduled.
-- `spec.local.path` specifies the unique subdirectory under the `BasePath (/var/local/openebs)` defined in corresponding StorageClass.
-:::
-
-## Cleanup
-
-Delete the Pod, the PersistentVolumeClaim and StorageClass that you might have created.
-
-```
-kubectl delete pod hello-local-hostpath-pod
-kubectl delete pvc local-hostpath-pvc
-kubectl delete sc local-hostpath
-```
-
-Verify that the PV that was dynamically created is also deleted.
-```
-kubectl get pv
-```
-
-## Backup and Restore
-
-OpenEBS Local Volumes can be backed up and restored along with the application using [Velero](https://velero.io).
-
-:::note
-The following steps assume that you already have Velero with Restic integration is configured. If not, please follow the [Velero Documentation](https://velero.io/docs/) to proceed with install and setup of Velero. If you encounter any issues or have questions, talk to us on the [#openebs channel on the Kubernetes Slack server](https://kubernetes.slack.com/messages/openebs/).
-:::
-
-### Backup
-
-The following steps will help you to prepare and backup the data from the volume created for the example pod (`hello-local-hostpath-pod`), with the volume mount (`local-storage`).
-
-1. Prepare the application pod for backup. Velero uses Kubernetes labels to select the pods that need to be backed up. Velero uses annotation on the pods to determine which volumes need to be backed up. For the example pod launched in this guide, you can inform velero to backup by specifying the following label and annotation.
-
- ```
- kubectl label pod hello-local-hostpath-pod app=test-velero-backup
- kubectl annotate pod hello-local-hostpath-pod backup.velero.io/backup-volumes=local-storage
- ```
-2. Create a Backup using velero.
-
- ```
- velero backup create bbb-01 -l app=test-velero-backup
- ```
-
-3. Verify that backup is successful.
-
- ```
- velero backup describe bbb-01 --details
- ```
-
- On successful completion of the backup, the output of the backup describe command will show the following:
- ```shell hideCopy
- ...
- Restic Backups:
- Completed:
- default/hello-local-hostpath-pod: local-storage
- ```
-
-### Restore
-
-1. Install and Setup Velero, with the same provider where backups were saved. Verify that backups are accessible.
-
- ```
- velero backup get
- ```
-
- The output of should display the backups that were taken successfully.
- ```shell hideCopy
- NAME STATUS CREATED EXPIRES STORAGE LOCATION SELECTOR
- bbb-01 Completed 2020-04-25 15:49:46 +0000 UTC 29d default app=test-velero-backup
- ```
-
-2. Restore the application.
-
- :::note
- Local PVs are created with node affinity. As the node names will change when a new cluster is created, create the required PVC(s) prior to proceeding with restore.
- :::
-
- Replace the path to the PVC yaml in the below commands, with the PVC that you have created.
- ```
- kubectl apply -f https://openebs.github.io/charts/examples/local-hostpath/local-hostpath-pvc.yaml
- velero restore create rbb-01 --from-backup bbb-01 -l app=test-velero-backup
- ```
-
-3. Verify that application is restored.
-
- ```
- velero restore describe rbb-01
- ```
-
- Depending on the data, it may take a while to initialize the volume. On successful restore, the output of the above command should show:
- ```shell hideCopy
- ...
- Restic Restores (specify --details for more information):
- Completed: 1
- ```
-
-4. Verify that data has been restored. The application pod used in this example, write periodic messages (greetings) to the volume.
-
- ```
- kubectl exec hello-local-hostpath-pod -- cat /mnt/store/greet.txt
- ```
-
- The output will show that backed up data as well as new greetings that started appearing after application pod was restored.
- ```shell hideCopy
- Sat Apr 25 15:41:30 UTC 2020 [hello-local-hostpath-pod] Hello from OpenEBS Local PV.
- Sat Apr 25 15:46:30 UTC 2020 [hello-local-hostpath-pod] Hello from OpenEBS Local PV.
- Sat Apr 25 16:11:25 UTC 2020 [hello-local-hostpath-pod] Hello from OpenEBS Local PV.
- ```
-
-## Troubleshooting
-
-Review the logs of the OpenEBS Local PV provisioner. OpenEBS Dynamic Local Provisioner logs can be fetched using.
-
-```
-kubectl logs -n openebs -l openebs.io/component-name=openebs-localpv-provisioner
-```
-
-## Support
-
-If you encounter issues or have a question, file an [Github issue](https://github.com/openebs/openebs/issues/new), or talk to us on the [#openebs channel on the Kubernetes Slack server](https://kubernetes.slack.com/messages/openebs/).
-
-## See Also:
-
-[Understand OpenEBS Local PVs ](/concepts/localpv)
diff --git a/docs/main/user-guides/ndm.md b/docs/main/user-guides/ndm.md
deleted file mode 100644
index 037d2fc43..000000000
--- a/docs/main/user-guides/ndm.md
+++ /dev/null
@@ -1,231 +0,0 @@
----
-id: ndm
-title: Node Disk Manager User Guide
-keywords:
- - ndm
- - Node Disk Manager
- - Admin Operations
- - Create blockdevice CRs for unsupported disks
-description: This section provides the operations need to performed by the Admin for configuring NDM.
----
-
-[![OpenEBS configuration flow](../assets/2-config-sequence.svg)](../assets/2-config-sequence.svg)
-
-This section provides the operations need to performed by the Admin for configuring NDM.
-
-## Admin operations
-
-[Include filters](#Include-filters)
-
-[Exclude filters](#Exclude-filters)
-
-[Create blockdevice CRs for unsupported disks](#create-blockdevice-CRs-for-partitioned-disks)
-
-## Admin Operations
------
-
-### Include filters
-
-Users can include only selected block device for the creation of blockdevice CR and then use only the created blockdevice CR for cStor pool creation or using for provisioning Local PV based on device. For including the selected blockdevices, update the operator YAML file with the required blockdevices under NDM configuration section so that only these blockdevice will be taken for the creation of blockdevice CR. Add the blockdevice path in the following configuration for specifying particular disks. This configuration must be added in `openebs-ndm-config` under `Configmap` in `openebs-operator.yaml`.
-
-This change must be done in the `openebs-operator.yaml` file that you have downloaded before OpenEBS installation. If the change is performed after the OpenEBS installation, then user must restart corresponding NDM DaemonSet pods to update the NDM configuration.
-
-
-```
-filterconfigs:
-- key: path-filter
- name: path filter
- state: true
- include: "/dev/sda"
- exclude: ""
-```
-
-When the above configuration is used, only the block device `/dev/sda` will be used for creating the block device custom resource. All other disks will be excluded.
-
-**Note**:
-
-- Regex support is not available on the `filterconfigs` in NDM `Configmap` and the `Configmap` is applicable to cluster level. This means, if user provide `/dev/sdb` in `filterconfigs` as an include filter, then all `/dev/sdb` blockdevices from all nodes in the cluster will be used for the creation of blockdevice CR by NDM.
-
-### Exclude filters
-
-NDM do some filtering on the disks to exclude, for example boot disk. By default, NDM excludes the following device path while creating block device CR. This configuration is added in `openebs-ndm-config` under `Configmap` in `openebs-operator.yaml`.
-
-```
-/dev/loop - loop devices.
-/dev/fd - file descriptors.
-/dev/sr - CD-ROM devices.
-/dev/ram - ramdisks.
-/dev/dm -lvm.
-/dev/md -multiple device ( software RAID devices).
-/dev/rbd - ceph RBD devices
-/dev/zd - zfs volumes
-```
-
-The following is the snippet of NDM configuration file from openebs operator YAML which excludes the provided disks/paths.
-
-```
-filterconfigs:
- - key: os-disk-exclude-filter
- name: os disk exclude filter
- state: true
- exclude: "/,/etc/hosts,/boot"
- - key: vendor-filter
- name: vendor filter
- state: true
- include: ""
- exclude: "CLOUDBYT,OpenEBS"
- - key: path-filter
- name: path filter
- state: true
- include: ""
- exclude: "loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd"
-```
-
-It is also possible to customize by adding more disk types associated with your nodes. For example, used disks, unwanted disks and so on.
-
-This change must be done in the `openebs-operator.yaml` file that you have downloaded before OpenEBS installation. If the change is performed after the OpenEBS installation, then user must restart corresponding NDM DaemonSet pods to update the NDM configuration.
-
-```
-filterconfigs:
- - key: path-filter
- name: path filter
- state: true
- include: ""
- exclude: "loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md,/dev/sdb"
-```
-
-**Note:**
-
-- Regex support is not available on the `filterconfigs` in NDM `Configmap` and the Configmap is applicable to cluster level. This means, if user provide `/dev/sdb` in configmap as an excluded filter, then all `/dev/sdb` blockdevices from all nodes in the cluster will be excluded by NDM.
-
-- It is recommended to use OpenEBS provisioner alone in the cluster. If you are using other storage provider provisioner like `gce-pd` along with OpenEBS, use exclude filters to avoid those disks from being consumed by OpenEBS. For example, if you are using the `standard` storage class in GKE with storage provisioner as **kubernetes.io/gce-pd**, and when it creates a PVC, a GPD is attached to the node. This GPD will be detected by NDM and it may be used by OpenEBS for provisioning volume. To avoid this scenario, it is recommended to put the associated device path created on the node in the **exclude** field under **path-filter**. If GPD is attached as `/dev/sdc` , then add `/dev/sdc` in the above mentioned field.
-
- **Example snippet:**
-
- In the downloaded openebs-operator.yaml, find *openebs-ndm-config* configmap and update the values for **path-filter** and any other filters if required.
-
- ```
- ---
- # This is the node-disk-manager related config.
- # It can be used to customize the disks probes and filters
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: openebs-ndm-config
- namespace: openebs
- data:
- # udev-probe is default or primary probe which should be enabled to run ndm
- # filterconfigs contains configs of filters - in their form fo include
- # and exclude comma separated strings
- node-disk-manager.config: |
- probeconfigs:
- - key: udev-probe
- name: udev probe
- state: true
- - key: seachest-probe
- name: seachest probe
- state: false
- - key: smart-probe
- name: smart probe
- state: true
- filterconfigs:
- - key: os-disk-exclude-filter
- name: os disk exclude filter
- state: true
- exclude: "/,/etc/hosts,/boot"
- - key: vendor-filter
- name: vendor filter
- state: true
- include: ""
- exclude: "CLOUDBYT,OpenEBS"
- - key: path-filter
- name: path filter
- state: true
- include: ""
- exclude: "loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md,/dev/sdc"
- ---
- ```
-
-### Create blockdevice CRs for unsupported disks
-
-Currently, NDM out of the box manages disks, partitions, lvm, crypt and other dm devices. If the user need to have blockdevice for other device types like md array or
-any other unsupported device types, the blockdevice resource can be manually created using the following steps:
-
-1. Create the sample block device CR YAML using the following spec. Following is the sample block device CR YAML.
-
- ```
- apiVersion: openebs.io/v1alpha1
- kind: BlockDevice
- metadata:
- name: example-blockdevice-1
- labels:
- kubernetes.io/hostname: # like gke-openebs-user-default-pool-044afcb8-bmc0
- ndm.io/managed: "false" # for manual blockdevice creation put false
- ndm.io/blockdevice-type: blockdevice
- status:
- claimState: Unclaimed
- state: Active
- spec:
- capacity:
- logicalSectorSize: 512
- storage: #like 53687091200
- details:
- deviceType: # like disk, partition, lvm, crypt, md
- firmwareRevision:
- model: # like PersistentDisk
- serial: # like google-disk-2
- compliance: #like "SPC-4"
- vendor: #like Google
- devlinks:
- - kind: by-id
- links:
- - # like /dev/disk/by-id/scsi-0Google_PersistentDisk_disk-2
- - # like /dev/disk/by-id/google-disk-2
- - kind: by-path
- links:
- - # like /dev/disk/by-path/virtio-pci-0000:00:03.0-scsi-0:0:2:0
- nodeAttributes:
- nodeName: # output of `kubectl get nodes` can be used
- path: # like /dev/md0
-
- ```
-
-2. Modify the created block device CR sample YAML with the disk information. In the above block device CR sample spec, following fields must be filled before applying the YAML.
-
- - name
- - Provide unique name for the blockdevice CR. In the above YAML spec, given name for the blockdevice CR is `example-blockdevice-1`
- - kubernetes.io/hostname
- - Hostname of the node where the blockdevice is attached.
- - storage
- - Provide the storage capacity in `bytes` like `53687091200`.
- - logicalSectorSize
- - logical sector size of blockdevice. For example, 512, 4096 etc. Provided 512 in the above example snippet. This value can be changed as per the logical sector size of the device.
- - deviceType
- - Type of the device. This can be obtained from `lsblk` output. eg: lvm, crypt, nbd, md etc
- - links
- - This field should be filled for by-id and by-path. These details can be obtained from worker node by running the following command `udevadm info -q property -n `
- - nodeName
- - Name of the Node where the blockdevice is attached. The output of `kubectl get nodes` can be used to obtain this value.
- - path
- - The value should be like `/dev/dm-0` or `/dev/md0`.
-
-3. Apply the modified YAML file to create the blockdevice CR for the provided device path.
-
- ```
- kubectl apply -f -n
- ```
-
- **Note:** The blockdevice CR should be created on the same namespace where openebs is installed.
-
-4. Repeat the same steps for each unsupported device and create blockdevice CR for the devices.
-
-5. Verify if the blockdevice is created by running the following `kubectl get blockdevice -n openebs` command.
-
-**Note:** If you are creating a block device CR for an unsupported device, then you must add the corresponding disk under **exclude** filter so that NDM will not select the particular disk for BD creation. See [here](#Exclude-filters) for customizing the exclude filter in NDM configuration.
-
-
-## See Also:
-
-[Understanding Node Disk Manager](/user-guides/ndm)
-
-
diff --git a/docs/main/user-guides/replicated-engine-user-guide/additional-information/i-o-path-description.md b/docs/main/user-guides/replicated-engine-user-guide/additional-information/i-o-path-description.md
index 8fa33b89e..ed4166e13 100644
--- a/docs/main/user-guides/replicated-engine-user-guide/additional-information/i-o-path-description.md
+++ b/docs/main/user-guides/replicated-engine-user-guide/additional-information/i-o-path-description.md
@@ -164,7 +164,7 @@ The disk devices' response to the I/O request is returns back along the same pat
If the StorageClass on which a volume is based specifies a replication factor of greater than one, then a synchronous mirroring scheme is employed to maintain multiple redundant data copies. For a replicated volume, creation and configuration of the volume's nexus requires additional orchestration steps. Prior to creating the nexus, not only must a local replica be created and exported as for the non-replicated case, but the requisite count of additional remote replicas required to meet the replication factor must be created and exported from Mayastor instances other than that hosting the nexus itself. The control plane core-agent component will select appropriate pool candidates, which includes ensuring sufficient available capacity and that no two replicas are sited on the same Mayastor instance \(which would compromise availability during co-incident failures\). Once suitable replicas have been successfully exported, the control plane completes the creation and configuration of the volume's nexus, with the replicas as its children. In contrast to their local counterparts, remote replicas are exported, and so connected to by the nexus, over NVMe-F using a user-mode initiator and target implementation from the SPDK.
-Write I/O requests to the nexus are handled synchronously; the I/O is dispatched to all \(healthy\) children and only when completion is acknowledged by all is the I/O acknowledged to the calling initiator via the nexus front-end. Read I/O requests are similarly issued to all children, with just the first response returned to the caller.
+Write I/O requests to the nexus are handled synchronously; the I/O is dispatched to all \(healthy\) children and only when completion is acknowledged by all is the I/O acknowledged to the calling initiator via the nexus front-end. Reads are round-robin distributed across healthy children.
diff --git a/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/HA.md b/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/HA.md
index 74b036a65..2490d4582 100644
--- a/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/HA.md
+++ b/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/HA.md
@@ -8,18 +8,21 @@ description: This guide will help you to enhance High Availability (HA) of the v
---
## High Availability
-Mayastor 2.0 enhances High Availability (HA) of the volume target with the nexus switch-over feature. In the event of the target failure, the switch-over feature quickly detects the failure and spawns a new nexus to ensure I/O continuity.
-The HA feature consists of two components: the HA node agent (which runs in each csi- node) and the cluster agent (which runs alongside the agent-core). The HA node agent looks for io-path failures from applications to their corresponding targets. If any such broken path is encountered, the HA node agent informs the cluster agent. The cluster-agent then creates a new target on a different (live) node. Once the target is created, the `node-agent` establishes a new path between the application and its corresponding target. The HA feature restores the broken path within seconds, ensuring negligible downtime.
+Replicated Engine 2.0 enhances High Availability (HA) of the volume target with the nexus switch-over feature. In the event of the target failure, the switch-over feature quickly detects the failure and spawns a new nexus to ensure I/O continuity.
+The HA feature consists of two components:
+- HA node agent (which runs in each csi- node) and
+- Cluster agent (which runs alongside the agent-core).
-{% hint style="warning" %}
-The volume's replica count must be higher than 1 for a new target to be established as part of switch-over.
-{% endhint %}
+The HA node agent looks for I/O-path failures from applications to their corresponding targets. If any such broken path is encountered, the HA node agent informs the cluster agent. The cluster-agent then creates a new target on a different (live) node. Once the target is created, the `node-agent` establishes a new path between the application and its corresponding target. The HA feature restores the broken path within seconds, ensuring negligible downtime.
+:::warning
+The volume's replica count must be higher than one for a new target to be established as part of switch-over.
+:::
### How do I disable this feature?
-{% hint style="info" %}
+:::info
We strongly recommend keeping this feature enabled.
-{% endhint %}
+:::
The HA feature is enabled by default; to disable it, pass the parameter `--set=agents.ha.enabled=false` with the helm install command.
\ No newline at end of file
diff --git a/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/monitoring.md b/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/monitoring.md
index ca7655151..b9fc9aca2 100644
--- a/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/monitoring.md
+++ b/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/monitoring.md
@@ -8,11 +8,11 @@ description: This guide explains about the replicated engine pool metrics export
---
# Monitoring
-## Pool metrics exporter
+## Pool Metrics Exporter
-The Mayastor pool metrics exporter runs as a sidecar container within every io-engine pod and exposes pool usage metrics in Prometheus format. These metrics are exposed on port 9502 using an HTTP endpoint /metrics and are refreshed every five minutes.
+The Replicated Engine pool metrics exporter runs as a sidecar container within every I/O-engine pod and exposes pool usage metrics in Prometheus format. These metrics are exposed on port 9502 using an HTTP endpoint/metrics and are refreshed every five minutes.
-### Supported pool metrics
+### Supported Pool Metrics
| Name | Type | Unit | Description |
| :--- | :--- | :--- | :--- |
@@ -21,8 +21,8 @@ The Mayastor pool metrics exporter runs as a sidecar container within every io-e
| disk_pool_status | Gauge | Integer | Status of the pool (0, 1, 2, 3) = {"Unknown", "Online", "Degraded", "Faulted"} |
| disk_pool_committed_size | Gauge | Integer | Committed size of the pool in bytes |
-{% tab title="Example metrics" %}
-```text
+**Example Metrics**
+```
# HELP disk_pool_status disk-pool status
# TYPE disk_pool_status gauge
disk_pool_status{node="worker-0",name="mayastor-disk-pool"} 1
@@ -36,18 +36,12 @@ disk_pool_used_size_bytes{node="worker-0",name="mayastor-disk-pool"} 2.147483648
# TYPE disk_pool_committed_size_bytes gauge
disk_pool_committed_size_bytes{node="worker-0", name="mayastor-disk-pool"} 9663676416
```
-{% endtab %}
-
-
---------
-
-## Stats exporter metrics
+## Stats Exporter Metrics
When [eventing](../additional-information/call-home.md) is activated, the stats exporter operates within the **obs-callhome-stats** container, located in the **callhome** pod. The statistics are made accessible through an HTTP endpoint at port `9090`, specifically using the `/stats` route.
-
-### Supported stats metrics
+### Supported Stats Metrics
| Name | Type | Unit | Description |
| :--- | :--- | :--- | :--- |
@@ -56,32 +50,27 @@ When [eventing](../additional-information/call-home.md) is activated, the stats
| volumes_created | Guage | Integer | Total successful volume creation attemtps |
| volumes_deleted | Guage | Integer | Total successful volume deletion attempts |
-
-----
-
-## Integrating exporter with Prometheus monitoring stack
+## Integrating Exporter with Prometheus Monitoring Stack
1. To install, add the Prometheus-stack helm chart and update the repo.
-{% tab title="Command" %}
-```text
+**Command**
+```
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
```
-{% endtab %}
-Then, install the Prometheus monitoring stack and set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues to false. This enables Prometheus to discover custom ServiceMonitor for Mayastor.
+Then, install the Prometheus monitoring stack and set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues to false. This enables Prometheus to discover custom ServiceMonitor for Replicated Engine.
-{% tab title="Command" %}
-```text
+**Command**
+```
helm install mayastor prometheus-community/kube-prometheus-stack -n mayastor --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
```
-{% endtab %}
-2. Next, install the ServiceMonitor resource to select services and specify their underlying endpoint objects.
+2. Install the ServiceMonitor resource to select services and specify their underlying endpoint objects.
-{% tab title="ServiceMonitor YAML" %}
-```text
+**ServiceMonitor YAML**
+```
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@@ -95,15 +84,12 @@ spec:
endpoints:
- port: metrics
```
-{% endtab %}
-{% hint style="info" %}
+:::info
Upon successful integration of the exporter with the Prometheus stack, the metrics will be available on the port 9090 and HTTP endpoint /metrics.
-{% endhint %}
-
----
+:::
-## CSI metrics exporter
+## CSI Metrics Exporter
| Name | Type | Unit | Description |
| :--- | :--- | :--- | :--- |
diff --git a/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/node-cordon.md b/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/node-cordon.md
index 2f34e18bc..0ee8d1c71 100644
--- a/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/node-cordon.md
+++ b/docs/main/user-guides/replicated-engine-user-guide/advanced-operations/node-cordon.md
@@ -12,34 +12,31 @@ Cordoning a node marks or taints the node as unschedulable. This prevents the sc
This feature is in line with the node-cordon functionality of Kubernetes.
To add a label and cordon a node, execute:
-{% tab title="Command" %}
-```text
+**Command**
+```
kubectl-mayastor cordon node