From 2bf74b0adc45a81e60813b82667d8c7d36e8ba95 Mon Sep 17 00:00:00 2001
From: yuanyuan zhang
Date: Mon, 30 Dec 2024 16:08:43 +0800
Subject: [PATCH 1/6] docs: update kbcli cluster create, yaml, and monitoring
docs in release-1.0
---
...e-and-connect-an-apecloud-mysql-cluster.md | 92 ++--
.../delete-mysql-cluster.md | 7 +-
.../kubeblocks-for-apecloud-mysql.md | 2 +-
.../manage-elasticsearch.md | 94 ++--
.../create-a-kafka-cluster.md | 316 ++++++-------
.../delete-kafka-cluster.md | 7 +-
.../kubeblocks-for-milvus/manage-milvus.md | 422 ++++++++++--------
...create-and-connect-to-a-mongodb-cluster.md | 75 ++--
.../delete-mongodb-cluster.md | 7 +-
.../kubeblocks-for-mongodb.md | 2 +-
.../create-and-connect-a-mysql-cluster.md | 89 ++--
.../delete-mysql-cluster.md | 7 +-
.../kubeblocks-for-mysql-community-edition.md | 2 +-
...create-and-connect-a-postgresql-cluster.md | 90 ++--
.../delete-a-postgresql-cluster.md | 7 +-
.../kubeblocks-for-postgresql.md | 2 +-
.../create-pulsar-cluster-on-kubeblocks.md | 214 +++++----
.../delete-a-pulsar-cluster.md | 7 +-
.../kubeblocks-for-pulsar.md | 2 +-
.../kubeblocks-for-qdrant/manage-qdrant.md | 92 ++--
.../manage-rabbitmq.md | 95 ++--
.../create-and-connect-a-redis-cluster.md | 106 +++--
.../delete-a-redis-cluster.md | 7 +-
.../kubeblocks-for-redis.md | 2 +-
.../manage-starrocks.md | 66 ++-
.../observability/monitor-database.md | 135 +++++-
...e-and-connect-an-apecloud-mysql-cluster.md | 92 ++--
.../delete-mysql-cluster.md | 3 +-
.../kubeblocks-for-apecloud-mysql.md | 2 +-
.../manage-elasticsearch.md | 84 ++--
.../create-a-kafka-cluster.md | 272 +++++------
.../delete-kafka-cluster.md | 3 +-
.../kubeblocks-for-kafka.md | 2 +-
.../kubeblocks-for-milvus/manage-milvus.md | 416 +++++++++--------
...create-and-connect-to-a-mongodb-cluster.md | 73 +--
.../delete-a-mongodb-cluster.md | 3 +-
.../kubeblocks-for-mongodb.md | 2 +-
.../create-and-connect-a-mysql-cluster.md | 93 ++--
.../delete-mysql-cluster.md | 3 +-
.../kubeblocks-for-mysql-community-edition.md | 2 +-
...create-and-connect-a-postgresql-cluster.md | 86 ++--
.../delete-a-postgresql-cluster.md | 3 +-
.../kubeblocks-for-postgresql.md | 2 +-
.../create-pulsar-cluster-on-kb.md | 210 +++++----
.../delete-pulsar-cluster.md | 3 +-
.../kubeblocks-for-pulsar.md | 2 +-
.../kubeblocks-for-qdrant/manage-qdrant.md | 90 ++--
.../manage-rabbitmq.md | 79 +++-
.../create-and-connect-to-a-redis-cluster.md | 100 +++--
.../delete-a-redis-cluster.md | 3 +-
.../kubeblocks-for-redis.md | 2 +-
.../manage-starrocks.md | 59 +--
.../observability/monitor-database.md | 133 +++++-
53 files changed, 1988 insertions(+), 1781 deletions(-)
diff --git a/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md b/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
index fa35d59de86..8768cd5f446 100644
--- a/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
+++ b/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
@@ -109,67 +109,55 @@ KubeBlocks supports creating two types of ApeCloud MySQL clusters: Standalone an
```yaml
cat <
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating an Elasticsearch cluster.
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating an Elasticsearch cluster with single node. For more examples, refer to [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/elasticsearch).
```yaml
cat <
-1. Create a Kafka cluster. If you only have one node for deploying a cluster with multiple replicas, set `spec.affinity.topologyKeys` as `null`. But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+1. Create a Kafka cluster. If you only have one node for deploying a cluster with multiple replicas, set `spec.componentSpecs.affinity.topologyKeys` as `null`. But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+
+ For more cluster examples, refer to [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/kafka).
* Create a Kafka cluster in combined mode.
- ```yaml
- # create kafka in combined mode
- kubectl apply -f - <-postgresql`. For example, if your cluster name is `mycluster`, the value would be `mycluster-postgresql`. Replace `mycluster` with your actual cluster name as needed. |
+ | `spec.componentSpecs.replicas` | It specifies the number of replicas of the component. |
+ | `spec.componentSpecs.resources` | It specifies the resources required by the Component. |
+ | `spec.componentSpecs.volumeClaimTemplates` | It specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. |
+ | `spec.componentSpecs.volumeClaimTemplates.name` | It refers to the name of a volumeMount defined in `componentDefinition.spec.runtime.containers[*].volumeMounts`. |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.storageClassName` | It is the name of the StorageClass required by the claim. If not specified, the StorageClass annotated with `storageclass.kubernetes.io/is-default-class=true` will be used by default. |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.resources.storage` | You can set the storage size as needed. |
+
+ For more API fields and descriptions, refer to the [API Reference](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster).
KubeBlocks operator watches for the `Cluster` CRD and creates the cluster and all dependent resources. You can get all the resources created by the cluster with `kubectl get all,secret,rolebinding,serviceaccount -l app.kubernetes.io/instance=mycluster -n demo`.
@@ -218,7 +214,7 @@ KubeBlocks supports creating two types of PostgreSQL clusters: Standalone and Re
If you only have one node for deploying a Replication Cluster, set the `--topology-keys` as `null` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
```bash
- kbcli cluster create postgresql mycluster --replicas=2 --availability-policy='none' -n demo
+ kbcli cluster create postgresql mycluster --replicas=2 --topology-keys=null -n demo
```
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md b/docs/user_docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md
index fc852418627..cbf896fb860 100644
--- a/docs/user_docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md
+++ b/docs/user_docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md
@@ -21,10 +21,9 @@ The termination policy determines how a cluster is deleted.
| **terminationPolicy** | **Deleting Operation** |
|:----------------------|:-------------------------------------------------|
-| `DoNotTerminate` | `DoNotTerminate` blocks delete operation. |
-| `Halt` | `Halt` deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. Halt policy is deprecated in v0.9.1 and will have same meaning as DoNotTerminate. |
-| `Delete` | `Delete` extends the Halt policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. |
-| `WipeOut` | `WipeOut` deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, especially in non-production environments, to avoid irreversible data loss. |
+| `DoNotTerminate` | `DoNotTerminate` prevents deletion of the Cluster. This policy ensures that all resources remain intact. |
+| `Delete` | `Delete` deletes Cluster resources like Pods, Services, and Persistent Volume Claims (PVCs), leading to a thorough cleanup while removing all persistent data. |
+| `WipeOut` | `WipeOut` is an aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. |
To check the termination policy, execute the following command.
diff --git a/docs/user_docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md b/docs/user_docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md
index e96e3a02960..4030a15dce3 100644
--- a/docs/user_docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md
+++ b/docs/user_docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md
@@ -7,7 +7,7 @@ sidebar_position: 1
# KubeBlocks for PostgreSQL
-This tutorial illustrates how to create and manage a PostgreSQL cluster by `kbcli`, `kubectl` or a YAML file. You can find the YAML examples in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/release-0.9/examples/postgresql).
+This tutorial illustrates how to create and manage a PostgreSQL cluster by `kbcli`, `kubectl` or a YAML file. You can find the YAML examples in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/postgresql).
* [Introduction](./introduction/introduction.md)
* [Cluster Management](./cluster-management/create-and-connect-a-postgresql-cluster.md)
diff --git a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md
index d69106df377..f7f4b0a076b 100644
--- a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md
+++ b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md
@@ -78,107 +78,123 @@ Refer to the [Pulsar official document](https://pulsar.apache.org/docs/3.1.x/) f
## Create Pulsar cluster
-1. Create the Pulsar cluster template file `values-production.yaml` for `helm` locally.
-
- Copy the following information to the local file `values-production.yaml`.
-
- ```bash
- ## Bookies configuration
- bookies:
- resources:
- limits:
- memory: 8Gi
- requests:
- cpu: 2
- memory: 8Gi
-
- persistence:
- data:
- storageClassName: kb-default-sc
- size: 128Gi
- log:
- storageClassName: kb-default-sc
- size: 64Gi
-
- ## Zookeeper configuration
- zookeeper:
- resources:
- limits:
- memory: 2Gi
- requests:
- cpu: 1
- memory: 2Gi
-
- persistence:
- data:
- storageClassName: kb-default-sc
- size: 20Gi
- log:
- storageClassName: kb-default-sc
- size: 20Gi
-
- broker:
- replicaCount: 3
- resources:
- limits:
- memory: 8Gi
- requests:
- cpu: 2
- memory: 8Gi
+1. Create a Pulsar cluster in basic mode. For other cluster modes, check out the examples provided in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/pulsar).
+
+ ```yaml
+ cat <,bookies.persistence.log.storageClassName=,zookeeper.persistence.data.storageClassName=,zookeeper.persistence.log.storageClassName= --namespace=demo
- ```
-
- You can specify the storage name ``.
-
-3. Verify the cluster created.
+ | Field | Definition |
+ |---------------------------------------|--------------------------------------|
+ | `spec.terminationPolicy` | It is the policy of cluster termination. Valid values are `DoNotTerminate`, `Delete`, `WipeOut`. For the detailed definition, you can refer to [Termination Policy](./delete-a-pulsar-cluster.md#termination-policy). |
+ | `spec.clusterDef` | It specifies the name of the ClusterDefinition to use when creating a Cluster. **Note: DO NOT UPDATE THIS FIELD**. The value must be `pulsar` to create a Pulsar Cluster. |
+ | `spec.topology` | It specifies the name of the ClusterTopology to be used when creating the Cluster. |
+ | `spec.services` | It defines a list of additional Services that are exposed by a Cluster. |
+ | `spec.componentSpecs` | It is the list of ClusterComponentSpec objects that define the individual Components that make up a Cluster. This field allows customized configuration of each component within a cluster. |
+ | `spec.componentSpecs.serviceVersion` | It specifies the version of the Service expected to be provisioned by this Component. Valid options are [2.11.2,3.0.2]. |
+ | `spec.componentSpecs.disableExporter` | It determines whether metrics exporter information is annotated on the Component's headless Service. Valid options are [true, false]. |
+ | `spec.componentSpecs.replicas` | It specifies the amount of replicas of the component. |
+ | `spec.componentSpecs.resources` | It specifies the resources required by the Component. |
+ | `spec.componentSpecs.volumeClaimTemplates` | It specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component. |
+ | `spec.componentSpecs.volumeClaimTemplates.name` | It refers to the name of a volumeMount defined in `componentDefinition.spec.runtime.containers[*].volumeMounts`. |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.storageClassName` | It is the name of the StorageClass required by the claim. If not specified, the StorageClass annotated with `storageclass.kubernetes.io/is-default-class=true` will be used by default. |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.resources.storage` | You can set the storage size as needed. |
+
+ For more API fields and descriptions, refer to the [API Reference](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster).
+
+2. Verify the cluster created.
```bash
kubectl get cluster mycluster -n demo
diff --git a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/delete-a-pulsar-cluster.md b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/delete-a-pulsar-cluster.md
index da885db967d..51e4df68fdf 100644
--- a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/delete-a-pulsar-cluster.md
+++ b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/delete-a-pulsar-cluster.md
@@ -21,10 +21,9 @@ The termination policy determines how a cluster is deleted.
| **terminationPolicy** | **Deleting Operation** |
|:----------------------|:-------------------------------------------------|
-| `DoNotTerminate` | `DoNotTerminate` blocks delete operation. |
-| `Halt` | `Halt` deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. Halt policy is deprecated in v0.9.1 and will have same meaning as DoNotTerminate. |
-| `Delete` | `Delete` extends the Halt policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. |
-| `WipeOut` | `WipeOut` deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, especially in non-production environments, to avoid irreversible data loss. |
+| `DoNotTerminate` | `DoNotTerminate` prevents deletion of the Cluster. This policy ensures that all resources remain intact. |
+| `Delete` | `Delete` deletes Cluster resources like Pods, Services, and Persistent Volume Claims (PVCs), leading to a thorough cleanup while removing all persistent data. |
+| `WipeOut` | `WipeOut` is an aggressive policy that deletes all Cluster resources, including volume snapshots and backups in external storage. This results in complete data removal and should be used cautiously, primarily in non-production environments to avoid irreversible data loss. |
To check the termination policy, execute the following command.
diff --git a/docs/user_docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md b/docs/user_docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md
index cb19cd2ec36..8822b4abac8 100644
--- a/docs/user_docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md
+++ b/docs/user_docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md
@@ -7,7 +7,7 @@ sidebar_position: 1
# KubeBlocks for Pulsar
-This tutorial illustrates how to create and manage a Pulsar cluster by `kbcli`, `kubectl` or a YAML file. You can find the YAML examples in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/release-0.9/examples/pulsar).
+This tutorial illustrates how to create and manage a Pulsar cluster by `kbcli`, `kubectl` or a YAML file. You can find the YAML examples in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/pulsar).
* [Cluster Management](./cluster-management/create-pulsar-cluster-on-kubeblocks.md)
* [Configuration](./configuration/configuration.md)
diff --git a/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md b/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md
index 11b76d2ce83..ca6221238aa 100644
--- a/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md
+++ b/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md
@@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem';
The popularity of generative AI (Generative AI) has aroused widespread attention and completely ignited the vector database (Vector Database) market. Qdrant (read: quadrant) is a vector similarity search engine and vector database. It provides a production-ready service with a convenient API to store, search, and manage points—vectors with an additional payload Qdrant is tailored to extended filtering support. It makes it useful for all sorts of neural-network or semantic-based matching, faceted search, and other applications.
-KubeBlocks supports the management of Qdrant. This tutorial illustrates how to create and manage a Qdrant cluster by `kbcli`, `kubectl` or a YAML file. You can find the YAML examples in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/release-0.9/examples/qdrant).
+KubeBlocks supports the management of Qdrant. This tutorial illustrates how to create and manage a Qdrant cluster by `kbcli`, `kubectl` or a YAML file. You can find the YAML examples in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/qdrant).
## Before you start
@@ -38,63 +38,56 @@ KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of
```yaml
cat <
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Standalone.
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Replcation. KubeBlocks also supports creating a Redis cluster in other modes. You can refer to the examples provided in the [GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/redis).
```yaml
cat <
+
+
+
+ You can also find the latest example YAML file in the [KubeBlocks Addons repo](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/apecloud-mysql/pod-monitor.yaml).
+
+ ```yaml
+ kubectl apply -f - <
+
+
+
+ You can also find the latest example YAML file in the [KubeBlocks Addons repo](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/mysql/pod-monitor.yaml).
+
+ ```yaml
+ kubectl apply -f - <
+
+
+
+ You can also find the latest example YAML file in the [KubeBlocks Addons repo](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/postgresql/pod-monitor.yaml).
+
+ ```yaml
+ kubectl apply -f - <
+
+
+
+ You can also find the latest example YAML file in the [KubeBlocks Addons repo](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/redis/pod-monitor.yaml).
```yaml
kubectl apply -f - <
+
+
+
3. Access the Grafana dashboard.
Log in to the Grafana dashboard and import the dashboard.
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
index 6878db39579..345294de0af 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
@@ -109,67 +109,55 @@ KubeBlocks 支持创建两种类型的 ApeCloud MySQL 集群:单机版(Stand
```yaml
cat < - `DoNotTerminate` 会阻止删除操作。
- `Halt` 会删除工作负载资源,如 statefulset 和 deployment 等,但是保留了 PVC 。
- `Delete` 在 `Halt` 的基础上进一步删除了 PVC。
- `WipeOut` 在 `Delete` 的基础上从备份存储的位置完全删除所有卷快照和快照数据。
|
- | `spec.affinity` | 为集群的 Pods 定义了一组节点亲和性调度规则。该字段可控制 Pods 在集群中节点上的分布。 |
- | `spec.affinity.podAntiAffinity` | 定义了不在同一 component 中的 Pods 的反亲和性水平。该字段决定了 Pods 以何种方式跨节点分布,以提升可用性和性能。 |
- | `spec.affinity.topologyKeys` | 用于定义 Pod 反亲和性和 Pod 分布约束的拓扑域的节点标签值。 |
- | `spec.tolerations` | 该字段为数组,用于定义集群中 Pods 的容忍,确保 Pod 可被调度到具有匹配污点的节点上。 |
- | `spec.componentSpecs` | 集群 components 列表,定义了集群 components。该字段允许对集群中的每个 component 进行自定义配置。 |
- | `spec.componentSpecs.componentDefRef` | 表示 cluster definition 中定义的 component definition 的名称,可通过执行 `kubectl get clusterdefinition postgresql -o json \| jq '.spec.componentDefs[].name'` 命令获取 component definition 名称。 |
- | `spec.componentSpecs.name` | 定义了 component 的名称。 |
- | `spec.componentSpecs.disableExporter` | 定义了是否开启监控功能。 |
+ | `spec.terminationPolicy` | 集群终止策略,有效值为 `DoNotTerminate`、`Delete` 和 `WipeOut`。具体定义可参考 [终止策略](./delete-a-postgresql-cluster.md#终止策略)。 |
+ | `spec.clusterDef` | 指定了创建集群时要使用的 ClusterDefinition 的名称。**注意**:**请勿更新此字段**。创建 PostgreSQL 集群时,该值必须为 `postgresql`。 |
+ | `spec.topology` | 指定了在创建集群时要使用的 ClusterTopology 的名称。建议值为 [replication]。 |
+ | `spec.componentSpecs` | 集群 component 列表,定义了集群 components。该字段支持自定义配置集群中每个 component。 |
+ | `spec.componentSpecs.serviceVersion` | 定义了 component 部署的服务版本。有效值为 [12.14.0,12.14.1,12.15.0,14.7.2,14.8.0,15.7.0,16.4.0] |
+ | `spec.componentSpecs.disableExporter` | 定义了是否在 component 无头服务(headless service)上标注指标 exporter 信息,是否开启监控 exporter。有效值为 [true, false]。 |
+ | `spec.componentSpecs.labels` | 指定了要覆盖或添加的标签,这些标签将应用于 component 所拥有的底层 Pod、PVC、账号和 TLS 密钥以及服务。 |
+ | `spec.componentSpecs.labels.apps.kubeblocks.postgres.patroni/scope` | PostgreSQL 的 `ComponentDefinition` 指定了环境变量 `KUBERNETES_SCOPE_LABEL=apps.kubeblocks.postgres.patroni/scope`。该变量定义了 Patroni 用于标记 Kubernetes 资源的标签键,帮助 Patroni 识别哪些资源属于指定的范围(或集群)。**注意**:**不要删除此标签**。该值必须遵循 `-postgresql` 格式。例如,如果你的集群名称是 `mycluster`,则该值应为 `mycluster-postgresql`。可按需将 `mycluster` 替换为你的实际集群名称。 |
| `spec.componentSpecs.replicas` | 定义了 component 中 replicas 的数量。 |
| `spec.componentSpecs.resources` | 定义了 component 的资源要求。 |
+ | `spec.componentSpecs.volumeClaimTemplates` | PersistentVolumeClaim 模板列表,定义 component 的存储需求。 |
+ | `spec.componentSpecs.volumeClaimTemplates.name` | 引用了在 `componentDefinition.spec.runtime.containers[*].volumeMounts` 中定义的 volumeMount 名称。 |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.storageClassName` | 定义了 StorageClass 的名称。如果未指定,系统将默认使用带有 `storageclass.kubernetes.io/is-default-class=true` 注释的 StorageClass。 |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.resources.storage` | 可按需配置存储容量。 |
+
+ 您可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster),查看更多 API 字段及说明。
KubeBlocks operator 监控 `Cluster` CRD 并创建集群和全部依赖资源。您可执行以下命令获取集群创建的所有资源信息。
@@ -221,7 +217,7 @@ KubeBlocks 支持创建两种 PostgreSQL 集群:单机版(Standalone)和
如果您只有一个节点用于部署三节点集群,可在创建集群时将 `topology-keys` 设为 `null`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```bash
- kbcli cluster create postgresql mycluster --replicas=2 --availability-policy='none' -n demo
+ kbcli cluster create postgresql mycluster --replicas=2 --topology-keys=null -n demo
```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md
index 0df42bf62d6..b5fc09cc540 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/delete-a-postgresql-cluster.md
@@ -22,8 +22,7 @@ import TabItem from '@theme/TabItem';
| **终止策略** | **删除操作** |
|:----------------------|:-------------------------------------------------------------------------------------------|
| `DoNotTerminate` | `DoNotTerminate` 禁止删除操作。 |
-| `Halt` | `Halt` 删除集群资源(如 Pods、Services 等),但保留 PVC。停止其他运维操作的同时,保留了数据。但 `Halt` 策略在 v0.9.1 中已启用,设置为 `Halt` 的效果与 `DoNotTerminate` 相同。 |
-| `Delete` | `Delete` 在 `Halt` 的基础上,删除 PVC 及所有持久数据。 |
+| `Delete` | `Delete` 删除 Pod、服务、PVC 等集群资源,删除所有持久数据。 |
| `WipeOut` | `WipeOut` 删除所有集群资源,包括外部存储中的卷快照和备份。使用该策略将会删除全部数据,特别是在非生产环境,该策略将会带来不可逆的数据丢失。请谨慎使用。 |
执行以下命令查看终止策略。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md b/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md
index 4044866e0ab..8051a640ad6 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/kubeblocks-for-postgresql.md
@@ -7,7 +7,7 @@ sidebar_position: 1
# 用 KubeBlocks 管理 PostgreSQL
-本文档展示了如何通过 kbcli、kubectl 或 YAML 文件等当时创建和管理 PostgreSQL 集群。您可以在 [GitHub 仓库](https://github.com/apecloud/kubeblocks-addons/tree/release-0.9/examples/postgresql)查看 YAML 示例。
+本文档展示了如何通过 kbcli、kubectl 或 YAML 文件等当时创建和管理 PostgreSQL 集群。您可以在 [GitHub 仓库](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/postgresql)查看 YAML 示例。
* [简介](./introduction/introduction.md)
* [集群管理](./cluster-management/create-and-connect-a-postgresql-cluster.md)
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kb.md b/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kb.md
index 42869116574..00ce785f594 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kb.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kb.md
@@ -79,103 +79,123 @@ KubeBlocks 可以通过良好的抽象快速集成新引擎,并支持 Pulsar
## 创建 Pulsar 集群
-1. 在本地创建 `helm` 使用的 Pulsar 集群模板文件 `values-production.yaml`。
-
- 将以下信息复制到本地文件 `values-production.yaml` 中。
-
- ```bash
- ## 配置 Bookies
- bookies:
- resources:
- limits:
- memory: 8Gi
- requests:
- cpu: 2
- memory: 8Gi
-
- persistence:
- data:
- storageClassName: kb-default-sc
- size: 128Gi
- log:
- storageClassName: kb-default-sc
- size: 64Gi
-
- ## 配置 Zookeeper
- zookeeper:
- resources:
- limits:
- memory: 2Gi
- requests:
- cpu: 1
- memory: 2Gi
-
- persistence:
- data:
- storageClassName: kb-default-sc
- size: 20Gi
- log:
- storageClassName: kb-default-sc
- size: 20Gi
-
- broker:
- replicaCount: 3
- resources:
- limits:
- memory: 8Gi
- requests:
- cpu: 2
- memory: 8Gi
+1. 创建基础模式的 Pulsar 集群。如需创建其他集群模式,您可查看 [GitHub 仓库中的示例](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/pulsar)。
+
+ ```yaml
+ cat <,bookies.persistence.log.storageClassName=,zookeeper.persistence.data.storageClassName=,zookeeper.persistence.log.storageClassName= --namespace demo
- ```
-
- 您可以指定存储名称 ``。
-
-3. 验证已创建的集群。
+ | 字段 | 定义 |
+ |---------------------------------------|--------------------------------------|
+ | `spec.terminationPolicy` | 集群终止策略,有效值为 `DoNotTerminate`、`Delete` 和 `WipeOut`。具体定义可参考 [终止策略](./delete-pulsar-cluster.md#终止策略)。 |
+ | `spec.clusterDef` | 指定了创建集群时要使用的 ClusterDefinition 的名称。**注意**:**请勿更新此字段**。创建 Pulsar 集群时,该值必须为 `pulsar`。 |
+ | `spec.topology` | 指定了在创建集群时要使用的 ClusterTopology 的名称。 |
+ | `spec.services` | 定义了集群暴露的额外服务列表。 |
+ | `spec.componentSpecs` | 集群 component 列表,定义了集群 components。该字段支持自定义配置集群中每个 component。 |
+ | `spec.componentSpecs.serviceVersion` | 定义了 component 部署的服务版本。有效值为 [2.11.2,3.0.2]。 |
+ | `spec.componentSpecs.disableExporter` | 定义了是否在 component 无头服务(headless service)上标注指标 exporter 信息,是否开启监控 exporter。有效值为 [true, false]。 |
+ | `spec.componentSpecs.replicas` | 定义了 component 中 replicas 的数量。 |
+ | `spec.componentSpecs.resources` | 定义了 component 的资源要求。 |
+ | `spec.componentSpecs.volumeClaimTemplates` | PersistentVolumeClaim 模板列表,定义 component 的存储需求。 |
+ | `spec.componentSpecs.volumeClaimTemplates.name` | 引用了在 `componentDefinition.spec.runtime.containers[*].volumeMounts` 中定义的 volumeMount 名称。 |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.storageClassName` | 定义了 StorageClass 的名称。如果未指定,系统将默认使用带有 `storageclass.kubernetes.io/is-default-class=true` 注释的 StorageClass。 |
+ | `spec.componentSpecs.volumeClaimTemplates.spec.resources.storage` | 可按需配置存储容量。 |
+
+ 您可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster),查看更多 API 字段及说明。
+
+2. 验证已创建的集群。
```bash
kubectl get cluster mycluster -n demo
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/delete-pulsar-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/delete-pulsar-cluster.md
index ff2fc29cc34..9c7369aac3a 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/delete-pulsar-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/cluster-management/delete-pulsar-cluster.md
@@ -22,8 +22,7 @@ import TabItem from '@theme/TabItem';
| **终止策略** | **删除操作** |
|:----------------------|:-------------------------------------------------------------------------------------------|
| `DoNotTerminate` | `DoNotTerminate` 禁止删除操作。 |
-| `Halt` | `Halt` 删除集群资源(如 Pods、Services 等),但保留 PVC。停止其他运维操作的同时,保留了数据。但 `Halt` 策略在 v0.9.1 中已删除,设置为 `Halt` 的效果与 `DoNotTerminate` 相同。 |
-| `Delete` | `Delete` 在 `Halt` 的基础上,删除 PVC 及所有持久数据。 |
+| `Delete` | `Delete` 删除 Pod、服务、PVC 等集群资源,删除所有持久数据。 |
| `WipeOut` | `WipeOut` 删除所有集群资源,包括外部存储中的卷快照和备份。使用该策略将会删除全部数据,特别是在非生产环境,该策略将会带来不可逆的数据丢失。请谨慎使用。 |
执行以下命令查看终止策略。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md b/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md
index d0c9862f207..d30f0f523ae 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-pulsar/kubeblocks-for-pulsar.md
@@ -7,7 +7,7 @@ sidebar_position: 1
# 用 KubeBlocks 管理 Pulsar
-本文档展示了如何通过 kbcli、kubectl 或 YAML 文件等当时创建和管理 Pulsar 集群。您可以在 [GitHub 仓库](https://github.com/apecloud/kubeblocks-addons/tree/release-0.9/examples/pulsar)查看 YAML 示例。
+本文档展示了如何通过 kbcli、kubectl 或 YAML 文件等当时创建和管理 Pulsar 集群。您可以在 [GitHub 仓库](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/pulsar)查看 YAML 示例。
* [集群管理](./cluster-management/create-pulsar-cluster-on-kb.md)
* [配置](./configuration/configuration.md)
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md b/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md
index bcfce4fec33..2ed40bad67c 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md
@@ -15,7 +15,7 @@ import TabItem from '@theme/TabItem';
Qdrant(读作:quadrant)是向量相似性搜索引擎和向量数据库。它提供了生产可用的服务和便捷的 API,用于存储、搜索和管理点(即带有额外负载的向量)。Qdrant 专门针对扩展过滤功能进行了优化,使其在各种神经网络或基于语义的匹配、分面搜索以及其他应用中充分发挥作用。
-目前,KubeBlocks 支持 Qdrant 的管理和运维。本文档展示了如何通过 kbcli、kubectl 或 YAML 文件等当时创建和管理 Qdrant 集群。您可以在 [GitHub 仓库](https://github.com/apecloud/kubeblocks-addons/tree/release-0.9/examples/qdrant)查看 YAML 示例。
+目前,KubeBlocks 支持 Qdrant 的管理和运维。本文档展示了如何通过 kbcli、kubectl 或 YAML 文件等当时创建和管理 Qdrant 集群。您可以在 [GitHub 仓库](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/qdrant)查看 YAML 示例。
## 开始之前
@@ -48,63 +48,56 @@ KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Qdrant 集群的示
```yaml
cat <
+
+
+
+ 您也可以在 [KubeBlocks Addons 仓库](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/apecloud-mysql/pod-monitor.yaml)中查看最新版本示例 YAML 文件。
+
+ ```yaml
+ kubectl apply -f - <
+
+
+
+ 您也可以在 [KubeBlocks Addons 仓库](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/mysql/pod-monitor.yaml)中查看最新版本示例 YAML 文件。
+
+ ```yaml
+ kubectl apply -f - <
+
+
+
+ 您也可以在 [KubeBlocks Addons 仓库](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/postgresql/pod-monitor.yml)中查看最新版本示例 YAML 文件。
```yaml
kubectl apply -f - <
+
+
+
+ 您也可以在 [KubeBlocks Addons 仓库](https://github.com/apecloud/kubeblocks-addons/blob/main/examples/redis/pod-monitor.yaml)中查看最新版本示例 YAML 文件。
+
+ ```yaml
+ kubectl apply -f - <
+
+
+
3. 连接 Grafana 大盘.
登录 Grafana 大盘,并导入大盘。
From 1770f678821726682962cde54de2b47e0a36c2f0 Mon Sep 17 00:00:00 2001
From: yuanyuan zhang
Date: Mon, 30 Dec 2024 16:15:56 +0800
Subject: [PATCH 2/6] docs: update docs
---
.../cluster-management/create-a-kafka-cluster.md | 2 +-
docs/user_docs/observability/monitor-database.md | 6 ------
.../cluster-management/create-a-kafka-cluster.md | 2 +-
i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md | 2 +-
i18n/zh-cn/user-docs/observability/monitor-database.md | 6 ------
5 files changed, 3 insertions(+), 15 deletions(-)
diff --git a/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md b/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
index 0bce2fdab82..8e841acc8a0 100644
--- a/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
+++ b/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
@@ -90,7 +90,7 @@ This document shows how to create a Kafka cluster.
env:
- name: KB_KAFKA_BROKER_HEAP # use this ENV to set BROKER HEAP
value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64"
- - name: KB_KAFKA_CONTROLLER_HEAP # use this ENV to set CONTOLLER_HEAP
+ - name: KB_KAFKA_CONTROLLER_HEAP # use this ENV to set CONTROLLER_HEAP
value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64"
- name: KB_BROKER_DIRECT_POD_ACCESS # set to FALSE for node-port
value: "true"
diff --git a/docs/user_docs/observability/monitor-database.md b/docs/user_docs/observability/monitor-database.md
index c9c03aaaea5..b5de408532e 100644
--- a/docs/user_docs/observability/monitor-database.md
+++ b/docs/user_docs/observability/monitor-database.md
@@ -135,8 +135,6 @@ spec:
componentSpecs:
- name: postgresql
componentDefRef: postgresql
- enabledLogs:
- - running
disableExporter: true # Set to `false` to enable exporter
replicas: 2
resources:
@@ -208,10 +206,6 @@ Edit the value of `disableExporter`.
componentSpecs:
- name: mysql
componentDefRef: mysql
- enabledLogs:
- - error
- - general
- - slow
disableExporter: true # Set to `false` to enable exporter
...
```
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
index eb179e35f07..b2cfc921d71 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
@@ -92,7 +92,7 @@ import TabItem from '@theme/TabItem';
env:
- name: KB_KAFKA_BROKER_HEAP # use this ENV to set BROKER HEAP
value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64"
- - name: KB_KAFKA_CONTROLLER_HEAP # use this ENV to set CONTOLLER_HEAP
+ - name: KB_KAFKA_CONTROLLER_HEAP # use this ENV to set CONTROLLER_HEAP
value: "-XshowSettings:vm -XX:MaxRAMPercentage=100 -Ddepth=64"
- name: KB_BROKER_DIRECT_POD_ACCESS # set to FALSE for node-port
value: "true"
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md b/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md
index 473d8843f87..0b0deef45cc 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md
@@ -270,7 +270,7 @@ EOF
|---------------------------------------|--------------------------------------|
| `spec.terminationPolicy` | 集群终止策略,有效值为 `DoNotTerminate`、`Delete` 和 `WipeOut`。具体定义可参考 [终止策略](#终止策略)。 |
| `spec.clusterDef` | 指定了创建集群时要使用的 ClusterDefinition 的名称。**注意**:**请勿更新此字段**。创建 Milvus 集群时,该值必须为 `milvus`。 |
-| `spec.topology` | 指定了在创建集群时要使用的 ClusterTopology 的名称。可选值为[stanalone, cluster]。 |
+| `spec.topology` | 指定了在创建集群时要使用的 ClusterTopology 的名称。可选值为[standalone, cluster]。 |
| `spec.componentSpecs` | 集群 component 列表,定义了集群 components。该字段支持自定义配置集群中每个 component。 |
| `spec.componentSpecs.serviceRefs` | 定义了 component 的 ServiceRef 列表。 |
| `spec.componentSpecs.serviceRefs.name` | 指定了服务引用声明的标识符,该标识符在 `componentDefinition.spec.serviceRefDeclarations[*].name` 中定义。 |
diff --git a/i18n/zh-cn/user-docs/observability/monitor-database.md b/i18n/zh-cn/user-docs/observability/monitor-database.md
index a09925e4ea7..2b6d99b26ad 100644
--- a/i18n/zh-cn/user-docs/observability/monitor-database.md
+++ b/i18n/zh-cn/user-docs/observability/monitor-database.md
@@ -135,8 +135,6 @@ spec:
componentSpecs:
- name: postgresql
componentDefRef: postgresql
- enabledLogs:
- - running
disableExporter: true # 将参数值设为 `false`,开启 exporter
replicas: 2
resources:
@@ -212,10 +210,6 @@ kubectl edit cluster mycluster -n demo
componentSpecs:
- name: mysql
componentDefRef: mysql
- enabledLogs:
- - error
- - general
- - slow
disableExporter: true # 将参数值设为 `false`,开启 exporter
```
From dccaf07cda255fb4fc79c2fc8de5949ee79d02b8 Mon Sep 17 00:00:00 2001
From: yuanyuan zhang
Date: Tue, 31 Dec 2024 18:40:15 +0800
Subject: [PATCH 3/6] docs: fix bugs
---
...e-and-connect-an-apecloud-mysql-cluster.md | 12 +-
.../manage-elasticsearch.md | 118 +++++++++++++-----
.../create-a-kafka-cluster.md | 4 +-
.../kubeblocks-for-milvus/manage-milvus.md | 17 ++-
...create-and-connect-to-a-mongodb-cluster.md | 4 +-
.../create-and-connect-a-mysql-cluster.md | 8 +-
...create-and-connect-a-postgresql-cluster.md | 8 +-
.../create-pulsar-cluster-on-kubeblocks.md | 2 +-
.../kubeblocks-for-qdrant/manage-qdrant.md | 6 +-
.../manage-rabbitmq.md | 2 +-
.../create-and-connect-a-redis-cluster.md | 6 +-
.../manage-starrocks.md | 3 +-
.../observability/monitor-database.md | 58 ++++-----
...e-and-connect-an-apecloud-mysql-cluster.md | 12 +-
.../manage-elasticsearch.md | 104 +++++++++++----
.../create-a-kafka-cluster.md | 4 +-
.../kubeblocks-for-milvus/manage-milvus.md | 10 +-
...create-and-connect-to-a-mongodb-cluster.md | 8 +-
.../create-and-connect-a-mysql-cluster.md | 10 +-
...create-and-connect-a-postgresql-cluster.md | 8 +-
.../kubeblocks-for-qdrant/manage-qdrant.md | 12 +-
.../manage-rabbitmq.md | 8 +-
.../create-and-connect-to-a-redis-cluster.md | 4 +-
.../manage-starrocks.md | 10 +-
.../observability/monitor-database.md | 58 ++++-----
25 files changed, 276 insertions(+), 220 deletions(-)
diff --git a/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md b/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
index 8768cd5f446..ae574556099 100644
--- a/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
+++ b/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
@@ -105,7 +105,7 @@ KubeBlocks supports creating two types of ApeCloud MySQL clusters: Standalone an
KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a RaftGroup Cluster.
- If you only have one node for deploying a RaftGroup Cluster, set `spec.affinity.topologyKeys` as `null`. But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a RaftGroup Cluster, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
```yaml
cat <
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating an Elasticsearch cluster with single node. For more examples, refer to [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/elasticsearch).
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating an Elasticsearch cluster with multiple nodes. For more examples, refer to [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/elasticsearch).
+
+If you only have one node for deploying a cluster with multiple nodes, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
```yaml
cat <
-1. Create a Kafka cluster. If you only have one node for deploying a cluster with multiple replicas, set `spec.componentSpecs.affinity.topologyKeys` as `null`. But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+1. Create a Kafka cluster. If you only have one node for deploying a cluster with multiple replicas, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
For more cluster examples, refer to [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/kafka).
@@ -253,6 +253,8 @@ This document shows how to create a Kafka cluster.
kbcli cluster create kafka -h
```
+ If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+
2. Verify whether this cluster is created successfully.
```bash
diff --git a/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md b/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md
index b426f73d6da..027af1af04b 100644
--- a/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md
+++ b/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md
@@ -34,7 +34,8 @@ This tutorial illustrates how to create and manage a Milvus cluster by `kbcli`,
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Milvus cluster.
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Milvus cluster. If you only have one node for deploying a cluster with multiple replicas, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+
```yaml
cat <
-1. Create a PostgreSQL cluster.
+1. Create a PostgreSQL cluster. If you only have one node for deploying a Replication Cluster, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Replication.
@@ -211,11 +211,7 @@ KubeBlocks supports creating two types of PostgreSQL clusters: Standalone and Re
kbcli cluster create postgresql mycluster --replicas=2 -n demo
```
- If you only have one node for deploying a Replication Cluster, set the `--topology-keys` as `null` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
-
- ```bash
- kbcli cluster create postgresql mycluster --replicas=2 --topology-keys=null -n demo
- ```
+ If you only have one node for deploying a RaftGroup Cluster, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md
index f7f4b0a076b..fd6ea875021 100644
--- a/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md
+++ b/docs/user_docs/kubeblocks-for-pulsar/cluster-management/create-pulsar-cluster-on-kubeblocks.md
@@ -78,7 +78,7 @@ Refer to the [Pulsar official document](https://pulsar.apache.org/docs/3.1.x/) f
## Create Pulsar cluster
-1. Create a Pulsar cluster in basic mode. For other cluster modes, check out the examples provided in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/pulsar).
+1. Create a Pulsar cluster in basic mode. For other cluster modes, check out the examples provided in [the GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/pulsar). If you only have one node for deploying a Pulsar Cluster, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
```yaml
cat <
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Qdrant Replication cluster. Primary and Secondary are distributed on different nodes by default. But if you only have one node for deploying a Replication Cluster, set `spec.affinity.topologyKeys` as `null`.
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Qdrant Replication cluster. Primary and Secondary are distributed on different nodes by default. But if you only have one node for deploying a Replication Cluster, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
```yaml
cat <
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Replcation. KubeBlocks also supports creating a Redis cluster in other modes. You can refer to the examples provided in the [GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/redis).
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Replication cluster. KubeBlocks also supports creating a Redis cluster in other modes. You can refer to the examples provided in the [GitHub repository](https://github.com/apecloud/kubeblocks-addons/tree/main/examples/redis).
+
+If you only have one node for deploying a Replication Cluster, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
```yaml
cat <
-KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a StarRocks cluster.
+KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a StarRocks cluster. If you only have one node for deploying a cluster with multiple replicas, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+
```yaml
cat <
-KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Elasticsearch 集群的示例。Pod 默认分布在不同节点。但如果您只有一个节点可用于部署集群,可将 `spec.affinity.topologyKeys` 设置为 `null`。
-
-:::note
-
-生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
-:::
+KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Elasticsearch 集群的示例。KubeBlocks 还支持创建Pod 默认分布在不同节点。如果您只有一个节点可用于部署集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
cat <
-KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Milvus 集群的示例。Pod 默认分布在不同节点。但如果您只有一个节点可用于部署集群,可将 `spec.affinity.topologyKeys` 设置为 `null`。
-
-:::note
-
-生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
-:::
+KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Milvus 集群的示例。Pod 默认分布在不同节点。如果您只有一个节点可用于部署多副本集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
cat <
@@ -192,11 +192,7 @@ KubeBlocks 支持创建两种 MongoDB 集群:单机版(Standalone)和主
kbcli cluster create mongodb mycluster --mode replicaset -n demo
```
- 如果只有一个节点用于部署主备版集群,请在创建集群时将 `topology-keys` 设置为 `null`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
- ```bash
- kbcli cluster create mongodb mycluster --mode replicaset --topology-keys null -n demo
- ```
+ 如果您只有一个节点用于部署主备版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
index 7db741d2322..9f40690a307 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
@@ -103,10 +103,10 @@ KubeBlocks 支持创建两种类型的 MySQL 集群:单机版(Standalone)
1. 创建 MySQL 集群。
-
+
KubeBlocks 通过 `Cluster` 定义集群。以下是创建 MySQL 主备版的示例。
- 如果您只有一个节点可用于部署集群版,可将 `spec.affinity.topologyKeys` 设置为 `null`。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点可用于部署主备版集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
cat <
-KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Qdrant 集群的示例。Pod 默认分布在不同节点。但如果您只有一个节点可用于部署集群,可将 `spec.affinity.topologyKeys` 设置为 `null`。
-
-:::note
-
-生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
-:::
+KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Qdrant 集群的示例。Pod 默认分布在不同节点。如果您只有一个节点可用于部署多副本集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
cat <
@@ -217,6 +217,8 @@ KubeBlocks 支持创建两种 Redis 集群:单机版(Standalone)和主备
kbcli cluster create redis -h
```
+ 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+
2. 验证集群是否创建成功。
```bash
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md b/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md
index 05a90326fe9..1a07a38b58e 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md
@@ -29,13 +29,7 @@ StarRocks 是一款高性能分析型数据仓库,使用向量化、MPP 架构
-KubeBlocks 通过 `Cluster` 定义集群。以下是创建 StarRocks 集群的示例。Pod 默认分布在不同节点。但如果您只有一个节点可用于部署集群,可将 `spec.affinity.topologyKeys` 设置为 `null`。
-
-:::note
-
-生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
-:::
+KubeBlocks 通过 `Cluster` 定义集群。以下是创建 StarRocks 集群的示例。Pod 默认分布在不同节点。如果您只有一个节点可用于部署多副本集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
cat <
Date: Mon, 6 Jan 2025 15:36:58 +0800
Subject: [PATCH 4/6] docs: fix bugs
---
...e-and-connect-an-apecloud-mysql-cluster.md | 15 ++-
.../manage-elasticsearch.md | 10 +-
.../create-a-kafka-cluster.md | 16 +++-
.../kubeblocks-for-milvus/manage-milvus.md | 92 -------------------
...create-and-connect-to-a-mongodb-cluster.md | 11 ++-
.../create-and-connect-a-mysql-cluster.md | 11 ++-
...create-and-connect-a-postgresql-cluster.md | 11 ++-
.../kubeblocks-for-qdrant/manage-qdrant.md | 11 ++-
.../create-and-connect-a-redis-cluster.md | 10 +-
.../manage-starrocks.md | 78 ----------------
...e-and-connect-an-apecloud-mysql-cluster.md | 15 ++-
.../manage-elasticsearch.md | 10 +-
.../create-a-kafka-cluster.md | 14 ++-
.../kubeblocks-for-milvus/manage-milvus.md | 89 ------------------
...create-and-connect-to-a-mongodb-cluster.md | 11 ++-
.../create-and-connect-a-mysql-cluster.md | 11 ++-
...create-and-connect-a-postgresql-cluster.md | 11 ++-
.../kubeblocks-for-qdrant/manage-qdrant.md | 11 ++-
.../create-and-connect-to-a-redis-cluster.md | 10 +-
.../manage-starrocks.md | 67 --------------
20 files changed, 162 insertions(+), 352 deletions(-)
diff --git a/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md b/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
index ae574556099..6ce4662b8a0 100644
--- a/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
+++ b/docs/user_docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
@@ -197,16 +197,25 @@ KubeBlocks supports creating two types of ApeCloud MySQL clusters: Standalone an
Create a Standalone.
```bash
- kbcli cluster create apecloud-mysql mycluster --set mode='standalone' --namespace demo
+ kbcli cluster create apecloud-mysql mycluster --mode='standalone' --namespace demo
```
Create a RaftGroup Cluster.
```bash
- kbcli cluster create apecloud-mysql mycluster --set mode='raftGroup' --namespace demo
+ kbcli cluster create apecloud-mysql mycluster --mode='raftGroup' --namespace demo
```
- If you only have one node for deploying a RaftGroup Cluster, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a RaftGroup Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a RaftGroup Cluster, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a RaftGroup Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create apecloud-mysql mycluster \
+ --mode='raftGroup' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md b/docs/user_docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md
index 66acac2943b..ad88ceb4706 100644
--- a/docs/user_docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md
+++ b/docs/user_docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md
@@ -171,7 +171,15 @@ kubectl get cluster mycluster -n demo -o yaml
kbcli cluster create elasticsearch -h
```
- If you only have one node for deploying a cluster with multiple nodes, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a cluster with multiple nodes and replicas, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create elasticsearch mycluster \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Check whether the cluster is created.
diff --git a/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md b/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
index cde6aa6048d..ed074c1a12e 100644
--- a/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
+++ b/docs/user_docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
@@ -212,7 +212,7 @@ This document shows how to create a Kafka cluster.
| Field | Definition |
|---------------------------------------|--------------------------------------|
| `spec.terminationPolicy` | It is the policy of cluster termination. Valid values are `DoNotTerminate`, `Delete`, `WipeOut`. For the detailed definition, you can refer to [Termination Policy](./delete-kafka-cluster.md#termination-policy). |
- | `spec.clusterDef` | It specifies the name of the ClusterDefinition to use when creating a Cluster. **Note: DO NOT UPDATE THIS FIELD**. The value must be must be `kafaka` to create a Kafka Cluster. |
+ | `spec.clusterDef` | It specifies the name of the ClusterDefinition to use when creating a Cluster. **Note: DO NOT UPDATE THIS FIELD**. The value must be must be `kafka` to create a Kafka Cluster. |
| `spec.topology` | It specifies the name of the ClusterTopology to be used when creating the Cluster. Valid options are: [combined,combined_monitor,separated,separated_monitor]. |
| `spec.componentSpecs` | It is the list of ClusterComponentSpec objects that define the individual Components that make up a Cluster. This field allows customized configuration of each component within a cluster. |
| `spec.componentSpecs.replicas` | It specifies the amount of replicas of the component. |
@@ -239,7 +239,7 @@ This document shows how to create a Kafka cluster.
1. Create a Kafka cluster.
- The cluster creation command is simply `kbcli cluster create`. Further, you can customize your cluster resources as demanded by using the `--set` flag.
+ The cluster creation command is simply `kbcli cluster create`.
```bash
kbcli cluster create kafka mycluster -n demo
@@ -253,7 +253,17 @@ This document shows how to create a Kafka cluster.
kbcli cluster create kafka -h
```
- If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create kafka mycluster \
+ --mode='combined' \
+ --replicas=3 \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md b/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md
index 027af1af04b..f463ed3f40e 100644
--- a/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md
+++ b/docs/user_docs/kubeblocks-for-milvus/manage-milvus.md
@@ -30,13 +30,8 @@ This tutorial illustrates how to create and manage a Milvus cluster by `kbcli`,
## Create a cluster
-
-
-
-
KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a Milvus cluster. If you only have one node for deploying a cluster with multiple replicas, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
-
```yaml
cat <
-
-
-
-***Steps***
-
-1. Execute the following command to create a Milvus cluster.
-
- ```bash
- kbcli cluster create mycluster --cluster-definition=milvus-2.3.2 -n demo
- ```
-
- If you want to customize your cluster specifications, `kbcli` provides various options, such as setting cluster version, termination policy, CPU, and memory. You can view these options by adding `--help` or `-h` flag.
-
- ```bash
- kbcli cluster create milvus --help
-
- kbcli cluster create milvus -h
- ```
-
- If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
-
-2. Check whether the cluster is created successfully.
-
- ```bash
- kbcli cluster list -n demo
- >
- NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME
- mycluster demo milvus-2.3.2 Delete Running Jul 05,2024 17:35 UTC+0800
- ```
-
-3. Check the cluster information.
-
- ```bash
- kbcli cluster describe mycluster -n demo
- >
- Name: milvus Created Time: Jul 05,2024 17:35 UTC+0800
- NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY
- demo milvus-2.3.2 Running Delete
-
- Endpoints:
- COMPONENT MODE INTERNAL EXTERNAL
- milvus ReadWrite milvus-milvus.default.svc.cluster.local:19530
- minio ReadWrite milvus-minio.default.svc.cluster.local:9000
- proxy ReadWrite milvus-proxy.default.svc.cluster.local:19530
- milvus-proxy.default.svc.cluster.local:9091
-
- Topology:
- COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME
- etcd milvus-etcd-0 Running Jul 05,2024 17:35 UTC+0800
- minio milvus-minio-0 Running Jul 05,2024 17:35 UTC+0800
- milvus milvus-milvus-0 Running Jul 05,2024 17:35 UTC+0800
- indexnode milvus-indexnode-0 Running Jul 05,2024 17:35 UTC+0800
- mixcoord milvus-mixcoord-0 Running Jul 05,2024 17:35 UTC+0800
- querynode milvus-querynode-0 Running Jul 05,2024 17:35 UTC+0800
- datanode milvus-datanode-0 Running Jul 05,2024 17:35 UTC+0800
- proxy milvus-proxy-0 Running Jul 05,2024 17:35 UTC+0800
-
- Resources Allocation:
- COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS
- milvus false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- etcd false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- minio false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- proxy false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- mixcoord false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- datanode false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- indexnode false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- querynode false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
-
- Images:
- COMPONENT TYPE IMAGE
- milvus milvus milvusdb/milvus:v2.3.2
- etcd etcd docker.io/milvusdb/etcd:3.5.5-r2
- minio minio docker.io/minio/minio:RELEASE.2022-03-17T06-34-49Z
- proxy proxy milvusdb/milvus:v2.3.2
- mixcoord mixcoord milvusdb/milvus:v2.3.2
- datanode datanode milvusdb/milvus:v2.3.2
- indexnode indexnode milvusdb/milvus:v2.3.2
- querynode querynode milvusdb/milvus:v2.3.2
-
- Show cluster events: kbcli cluster list-events -n demo milvus
- ```
-
-
-
-
-
## Scale
Currently, KubeBlocks supports vertically scaling a Milvus cluster.
diff --git a/docs/user_docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md b/docs/user_docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md
index fe66027fdc2..d85d6d3dcb5 100644
--- a/docs/user_docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md
+++ b/docs/user_docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md
@@ -191,7 +191,16 @@ KubeBlocks supports creating two types of MongoDB clusters: Standalone and Repli
kbcli cluster create mongodb -h
```
- If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create mongodb mycluster \
+ --mode='replicaset' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md b/docs/user_docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
index 6da43dec54f..58b2c9ede7f 100644
--- a/docs/user_docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
+++ b/docs/user_docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
@@ -190,7 +190,16 @@ KubeBlocks supports creating two types of MySQL clusters: Standalone and Replica
kbcli cluster create mysql -h
```
- If you only have one node for deploying a Replication Cluster, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a Replication Cluster, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create mysql mycluster \
+ --mode='replication' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md b/docs/user_docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md
index 6e51bc25d02..cb6029c9fb8 100644
--- a/docs/user_docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md
+++ b/docs/user_docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md
@@ -205,14 +205,17 @@ KubeBlocks supports creating two types of PostgreSQL clusters: Standalone and Re
kbcli cluster create postgresql -h
```
- For example, you can create a Replication Cluster with the `--replicas` flag.
+ If you only have one node for deploying a Replication Cluster, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
```bash
- kbcli cluster create postgresql mycluster --replicas=2 -n demo
+ kbcli cluster create postgresql mycluster \
+ --mode='replication' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
```
- If you only have one node for deploying a RaftGroup Cluster, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a Replication Cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
-
2. Verify whether this cluster is created successfully.
```bash
diff --git a/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md b/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md
index 4a8029e145b..d4b03b14647 100644
--- a/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md
+++ b/docs/user_docs/kubeblocks-for-qdrant/manage-qdrant.md
@@ -117,7 +117,16 @@ kubectl get cluster mycluster -n demo -o yaml
kbcli cluster create qdrant -h
```
- If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create qdrant mycluster \
+ --replicas=3
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Check whether the cluster is created.
diff --git a/docs/user_docs/kubeblocks-for-redis/cluster-management/create-and-connect-a-redis-cluster.md b/docs/user_docs/kubeblocks-for-redis/cluster-management/create-and-connect-a-redis-cluster.md
index ca8205e4a33..39d39bd0313 100644
--- a/docs/user_docs/kubeblocks-for-redis/cluster-management/create-and-connect-a-redis-cluster.md
+++ b/docs/user_docs/kubeblocks-for-redis/cluster-management/create-and-connect-a-redis-cluster.md
@@ -205,7 +205,15 @@ kubectl get cluster mycluster -n demo -o yaml
kbcli cluster create redis -h
```
- If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-afffinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
+ If you only have one node for deploying a cluster with multiple replicas, you can configure the cluster affinity by setting `--pod-anti-affinity`, `--tolerations`, and `--topology-keys` when creating a cluster. But you should note that for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability. For example,
+
+ ```bash
+ kbcli cluster create redis mycluster \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. Verify whether this cluster is created successfully.
diff --git a/docs/user_docs/kubeblocks-for-starrocks/manage-starrocks.md b/docs/user_docs/kubeblocks-for-starrocks/manage-starrocks.md
index d0d085dec6b..207afb25f09 100644
--- a/docs/user_docs/kubeblocks-for-starrocks/manage-starrocks.md
+++ b/docs/user_docs/kubeblocks-for-starrocks/manage-starrocks.md
@@ -28,13 +28,8 @@ This tutorial illustrates how to create and manage a StarRocks cluster by `kbcli
## Create a cluster
-
-
-
-
KubeBlocks implements a `Cluster` CRD to define a cluster. Here is an example of creating a StarRocks cluster. If you only have one node for deploying a cluster with multiple replicas, configure the cluster affinity by setting `spec.schedulingPolicy` or `spec.componentSpecs.schedulingPolicy`. For details, you can refer to the [API docs](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy). But for a production environment, it is not recommended to deploy all replicas on one node, which may decrease the cluster availability.
-
```yaml
cat <
-
-
-
-***Steps***
-
-1. Execute the following command to create a StarRocks cluster.
-
- ```bash
- kbcli cluster create mycluster --cluster-definition=starrocks -n demo
- ```
-
- You can also create a cluster with specified CPU, memory and storage values.
-
- ```bash
- kbcli cluster create mycluster --cluster-definition=starrocks --set cpu=1,memory=2Gi,storage=10Gi -n demo
- ```
-
-:::note
-
-If you want to customize your cluster specifications, `kbcli` provides various options, such as setting cluster version, termination policy, CPU, and memory. You can view these options by adding `--help` or `-h` flag.
-
-```bash
-kbcli cluster create --help
-kbcli cluster create -h
-```
-
-:::
-
-2. Check whether the cluster is created successfully.
-
- ```bash
- kbcli cluster list -n demo
- >
- NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME
- mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800
- ```
-
-3. Check the cluster information.
-
- ```bash
- kbcli cluster describe mycluster -n demo
- >
- Name: mycluster Created Time: Jul 17,2024 19:06 UTC+0800
- NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY
- demo starrocks starrocks-3.1.1 Running Delete
-
- Endpoints:
- COMPONENT MODE INTERNAL EXTERNAL
- fe ReadWrite mycluster-fe.default.svc.cluster.local:9030
-
- Topology:
- COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME
- be mycluster-be-0 Running minikube/192.168.49.2 Jul 17,2024 19:06 UTC+0800
- fe mycluster-fe-0 Running minikube/192.168.49.2 Jul 17,2024 19:06 UTC+0800
-
- Resources Allocation:
- COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS
- fe false 1 / 1 1Gi / 1Gi data:20Gi standard
- be false 1 / 1 1Gi / 1Gi data:20Gi standard
-
- Images:
- COMPONENT TYPE IMAGE
- fe fe apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/fe-ubuntu:2.5.4
- be be apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/fe-ubuntu:2.5.4
-
- Show cluster events: kbcli cluster list-events -n demo mycluster
- ```
-
-
-
-
-
## Scale
### Scale vertically
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
index d472503b26b..d1d36fd1f69 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-apecloud-mysql/cluster-management/create-and-connect-an-apecloud-mysql-cluster.md
@@ -200,13 +200,22 @@ KubeBlocks 支持创建两种类型的 ApeCloud MySQL 集群:单机版(Stand
kbcli cluster create apecloud-mysql -h
```
- 例如,您可以使用 `--set mode` 指定集群形态为 `raftGroup`,创建集群版集群。
+ 例如,您可以使用 `--mode` 指定集群形态为 `raftGroup`,创建集群版集群。
```bash
- kbcli cluster create apecloud-mysql mycluster --set mode='raftGroup' --namespace demo
+ kbcli cluster create apecloud-mysql mycluster --mode='raftGroup' --namespace demo
```
- 如果您只有一个节点用于部署集群版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署集群版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create apecloud-mysql mycluster \
+ --mode='raftGroup' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md b/i18n/zh-cn/user-docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md
index 22bd8d937f2..7d32c962d16 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-elasticsearch/manage-elasticsearch.md
@@ -163,7 +163,15 @@ kubectl get cluster mycluster -n demo -o yaml
kbcli cluster create elasticsearch -h
```
- 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create elasticsearch mycluster \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 查看集群是否已创建。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
index 159efa95142..b3cf28ccf5b 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-kafka/cluster-management/create-a-kafka-cluster.md
@@ -241,7 +241,7 @@ import TabItem from '@theme/TabItem';
1. 创建 Kafka 集群。
- 使用 `kbcli cluster create` 命令创建集群。您还可以使用 `--set` 参数自定义集群资源。
+ 使用 `kbcli cluster create` 命令创建集群。
```bash
kbcli cluster create kafka mycluster -n demo
@@ -254,7 +254,17 @@ import TabItem from '@theme/TabItem';
kbcli cluster create kafka -h
```
- 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create kafka mycluster \
+ --mode='combined' \
+ --replicas=3 \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md b/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md
index bb38bce571c..9921d10de8d 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-milvus/manage-milvus.md
@@ -34,10 +34,6 @@ Milvus 是高度灵活、可靠且速度极快的云原生开源矢量数据库
***步骤:***
-
-
-
-
KubeBlocks 通过 `Cluster` 定义集群。以下是创建 Milvus 集群的示例。Pod 默认分布在不同节点。如果您只有一个节点可用于部署多副本集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
@@ -293,91 +289,6 @@ kubectl get all,secret,rolebinding,serviceaccount -l app.kubernetes.io/instance=
kubectl get cluster mycluster -n demo -o yaml
```
-
-
-
-
-1. 创建一个 Milvus 集群。
-
- ```bash
- kbcli cluster create mycluster --cluster-definition=milvus-2.3.2 -n demo
- ```
-
- 如果您需要自定义集群规格,kbcli 也提供了诸多参数,如支持设置引擎版本、终止策略、CPU、内存规格。您可通过在命令结尾添加 `--help` 或 `-h` 来查看具体说明。比如,
-
- ```bash
- kbcli cluster create milvus --help
-
- kbcli cluster create milvus -h
- ```
-
- 如果您只有一个节点用于部署集群版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
-2. 检查集群是否已创建。
-
- ```bash
- kbcli cluster list -n demo
- >
- NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME
- mycluster demo milvus-2.3.2 Delete Running Jul 05,2024 17:35 UTC+0800
- ```
-
-3. 查看集群信息。
-
- ```bash
- kbcli cluster describe mycluster -n demo
- >
- Name: milvus Created Time: Jul 05,2024 17:35 UTC+0800
- NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY
- demo milvus-2.3.2 Running Delete
-
- Endpoints:
- COMPONENT MODE INTERNAL EXTERNAL
- milvus ReadWrite milvus-milvus.default.svc.cluster.local:19530
- minio ReadWrite milvus-minio.default.svc.cluster.local:9000
- proxy ReadWrite milvus-proxy.default.svc.cluster.local:19530
- milvus-proxy.default.svc.cluster.local:9091
-
- Topology:
- COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME
- etcd milvus-etcd-0 Running Jul 05,2024 17:35 UTC+0800
- minio milvus-minio-0 Running Jul 05,2024 17:35 UTC+0800
- milvus milvus-milvus-0 Running Jul 05,2024 17:35 UTC+0800
- indexnode milvus-indexnode-0 Running Jul 05,2024 17:35 UTC+0800
- mixcoord milvus-mixcoord-0 Running Jul 05,2024 17:35 UTC+0800
- querynode milvus-querynode-0 Running Jul 05,2024 17:35 UTC+0800
- datanode milvus-datanode-0 Running Jul 05,2024 17:35 UTC+0800
- proxy milvus-proxy-0 Running Jul 05,2024 17:35 UTC+0800
-
- Resources Allocation:
- COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS
- milvus false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- etcd false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- minio false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- proxy false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- mixcoord false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- datanode false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- indexnode false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
- querynode false 1 / 1 1Gi / 1Gi data:20Gi csi-hostpath-sc
-
- Images:
- COMPONENT TYPE IMAGE
- milvus milvus milvusdb/milvus:v2.3.2
- etcd etcd docker.io/milvusdb/etcd:3.5.5-r2
- minio minio docker.io/minio/minio:RELEASE.2022-03-17T06-34-49Z
- proxy proxy milvusdb/milvus:v2.3.2
- mixcoord mixcoord milvusdb/milvus:v2.3.2
- datanode datanode milvusdb/milvus:v2.3.2
- indexnode indexnode milvusdb/milvus:v2.3.2
- querynode querynode milvusdb/milvus:v2.3.2
-
- Show cluster events: kbcli cluster list-events -n demo milvus
- ```
-
-
-
-
-
## 扩缩容
当前,KubeBlocks 支持垂直扩缩容 Milvus 集群。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md
index 4af7b4154cc..ca08104dbd0 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-mongodb/cluster-management/create-and-connect-to-a-mongodb-cluster.md
@@ -192,7 +192,16 @@ KubeBlocks 支持创建两种 MongoDB 集群:单机版(Standalone)和主
kbcli cluster create mongodb mycluster --mode replicaset -n demo
```
- 如果您只有一个节点用于部署主备版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create mongodb mycluster \
+ --mode='replicaset' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
index 9f40690a307..9552012f56e 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-mysql-community-edition/cluster-management/create-and-connect-a-mysql-cluster.md
@@ -196,7 +196,16 @@ KubeBlocks 支持创建两种类型的 MySQL 集群:单机版(Standalone)
kbcli cluster create mysql -h
```
- 如果您只有一个节点用于部署主备版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署主备版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create mysql mycluster \
+ --mode='replication' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md
index 4d88b655de3..48fe8fec0ef 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-postgresql/cluster-management/create-and-connect-a-postgresql-cluster.md
@@ -214,7 +214,16 @@ KubeBlocks 支持创建两种 PostgreSQL 集群:单机版(Standalone)和
kbcli cluster create postgresql mycluster --replicas=2 -n demo
```
- 如果您只有一个节点用于部署主备版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署主备版集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create postgresql mycluster \
+ --mode='replication' \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md b/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md
index 0d0f225ddd3..7967361c017 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-qdrant/manage-qdrant.md
@@ -120,7 +120,16 @@ kubectl get cluster mycluster -n demo -o yaml
kbcli cluster create qdrant -h
```
- 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create qdrant mycluster \
+ --replicas=3
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 检查集群是否已创建。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-redis/cluster-management/create-and-connect-to-a-redis-cluster.md b/i18n/zh-cn/user-docs/kubeblocks-for-redis/cluster-management/create-and-connect-to-a-redis-cluster.md
index 375e770d661..55f33b21cd9 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-redis/cluster-management/create-and-connect-to-a-redis-cluster.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-redis/cluster-management/create-and-connect-to-a-redis-cluster.md
@@ -217,7 +217,15 @@ KubeBlocks 支持创建两种 Redis 集群:单机版(Standalone)和主备
kbcli cluster create redis -h
```
- 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
+ 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-affinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。例如,
+
+ ```bash
+ kbcli cluster create redis mycluster \
+ --pod-anti-affinity='Preferred' \
+ --tolerations='node-role.kubeblocks.io/data-plane:NoSchedule' \
+ --topology-keys='null' \
+ --namespace demo
+ ```
2. 验证集群是否创建成功。
diff --git a/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md b/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md
index 1a07a38b58e..f2dfe2f3bc5 100644
--- a/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md
+++ b/i18n/zh-cn/user-docs/kubeblocks-for-starrocks/manage-starrocks.md
@@ -25,10 +25,6 @@ StarRocks 是一款高性能分析型数据仓库,使用向量化、MPP 架构
***步骤:***
-
-
-
-
KubeBlocks 通过 `Cluster` 定义集群。以下是创建 StarRocks 集群的示例。Pod 默认分布在不同节点。如果您只有一个节点可用于部署多副本集群,可设置 `spec.schedulingPolicy` 或 `spec.componentSpecs.schedulingPolicy`,具体可参考 [API 文档](https://kubeblocks.io/docs/preview/developer_docs/api-reference/cluster#apps.kubeblocks.io/v1.SchedulingPolicy)。但生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
```yaml
@@ -98,69 +94,6 @@ kubectl get all,secret,rolebinding,serviceaccount -l app.kubernetes.io/instance=
kubectl get cluster mycluster -n demo -o yaml
```
-
-
-
-
-1. 执行以下命令,创建 StarRocks 集群。
-
- ```bash
- kbcli cluster create mycluster --cluster-definition=starrocks -n demo
- ```
-
- 如果您需要自定义集群规格,kbcli 也提供了诸多参数,如支持设置引擎版本、终止策略、CPU、内存规格。您可通过在命令结尾添加 --help 或 -h 来查看具体说明。比如,
-
- ```bash
- kbcli cluster create --help
- kbcli cluster create -h
- ```
-
- 如果您只有一个节点用于部署多副本集群,可在创建集群时配置集群亲和性,配置 `--pod-anti-afffinity`, `--tolerations` 和 `--topology-keys`。但需要注意的是,生产环境中,不建议将所有副本部署在同一个节点上,因为这可能会降低集群的可用性。
-
-2. 验证集群是否创建成功。
-
- ```bash
- kbcli cluster list -n demo
- >
- NAME NAMESPACE CLUSTER-DEFINITION VERSION TERMINATION-POLICY STATUS CREATED-TIME
- mycluster demo starrocks starrocks-3.1.1 Delete Running Jul 17,2024 19:06 UTC+0800
- ```
-
-3. 查看集群信息。
-
- ```bash
- kbcli cluster describe mycluster -n demo
- >
- Name: mycluster Created Time: Jul 17,2024 19:06 UTC+0800
- NAMESPACE CLUSTER-DEFINITION VERSION STATUS TERMINATION-POLICY
- demo starrocks starrocks-3.1.1 Running Delete
-
- Endpoints:
- COMPONENT MODE INTERNAL EXTERNAL
- fe ReadWrite mycluster-fe.default.svc.cluster.local:9030
-
- Topology:
- COMPONENT INSTANCE ROLE STATUS AZ NODE CREATED-TIME
- be mycluster-be-0 Running minikube/192.168.49.2 Jul 17,2024 19:06 UTC+0800
- fe mycluster-fe-0 Running minikube/192.168.49.2 Jul 17,2024 19:06 UTC+0800
-
- Resources Allocation:
- COMPONENT DEDICATED CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE-SIZE STORAGE-CLASS
- fe false 1 / 1 1Gi / 1Gi data:20Gi standard
- be false 1 / 1 1Gi / 1Gi data:20Gi standard
-
- Images:
- COMPONENT TYPE IMAGE
- fe fe docker.io/starrocks/fe-ubuntu:2.5.4
- be be docker.io/starrocks/be-ubuntu:2.5.4
-
- Show cluster events: kbcli cluster list-events -n demo mycluster
- ```
-
-
-
-
-
## 扩缩容
### 垂直扩缩容
From 9089d06689f30541e34459f4710c45c8885b1351 Mon Sep 17 00:00:00 2001
From: yuanyuan zhang
Date: Tue, 7 Jan 2025 17:08:59 +0800
Subject: [PATCH 5/6] docs: fix componentDef bugs
---
docs/user_docs/observability/monitor-database.md | 3 ++-
i18n/zh-cn/user-docs/observability/monitor-database.md | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/docs/user_docs/observability/monitor-database.md b/docs/user_docs/observability/monitor-database.md
index 70595351a72..df700f72d6b 100644
--- a/docs/user_docs/observability/monitor-database.md
+++ b/docs/user_docs/observability/monitor-database.md
@@ -124,6 +124,7 @@ spec:
topology: replication
componentSpecs:
- name: postgresql
+ componentDef: postgresql
serviceVersion: "14.7.2"
disableExporter: false
labels:
@@ -199,7 +200,7 @@ Edit the value of `disableExporter`.
...
componentSpecs:
- name: mysql
- componentDefRef: mysql
+ componentDef: mysql
disableExporter: true # Set to `false` to enable exporter
...
```
diff --git a/i18n/zh-cn/user-docs/observability/monitor-database.md b/i18n/zh-cn/user-docs/observability/monitor-database.md
index 50ddb6d11bf..074bf5fd865 100644
--- a/i18n/zh-cn/user-docs/observability/monitor-database.md
+++ b/i18n/zh-cn/user-docs/observability/monitor-database.md
@@ -124,6 +124,7 @@ spec:
topology: replication
componentSpecs:
- name: postgresql
+ componentDef: postgresql
serviceVersion: "14.7.2"
disableExporter: false
labels:
@@ -203,7 +204,7 @@ kubectl edit cluster mycluster -n demo
...
componentSpecs:
- name: mysql
- componentDefRef: mysql
+ componentDef: mysql
disableExporter: true # 将参数值设为 `false`,开启 exporter
```
From b5e536819543a9829751db05c86f089187efcdad Mon Sep 17 00:00:00 2001
From: yuanyuan zhang
Date: Tue, 7 Jan 2025 17:12:39 +0800
Subject: [PATCH 6/6] docs: update docs
---
docs/user_docs/observability/monitor-database.md | 3 +--
i18n/zh-cn/user-docs/observability/monitor-database.md | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/docs/user_docs/observability/monitor-database.md b/docs/user_docs/observability/monitor-database.md
index df700f72d6b..22c21eae63c 100644
--- a/docs/user_docs/observability/monitor-database.md
+++ b/docs/user_docs/observability/monitor-database.md
@@ -199,8 +199,7 @@ Edit the value of `disableExporter`.
```yaml
...
componentSpecs:
- - name: mysql
- componentDef: mysql
+...
disableExporter: true # Set to `false` to enable exporter
...
```
diff --git a/i18n/zh-cn/user-docs/observability/monitor-database.md b/i18n/zh-cn/user-docs/observability/monitor-database.md
index 074bf5fd865..c1058cb9e82 100644
--- a/i18n/zh-cn/user-docs/observability/monitor-database.md
+++ b/i18n/zh-cn/user-docs/observability/monitor-database.md
@@ -203,8 +203,7 @@ kubectl edit cluster mycluster -n demo
```yaml
...
componentSpecs:
- - name: mysql
- componentDef: mysql
+...
disableExporter: true # 将参数值设为 `false`,开启 exporter
```