diff --git a/api/v1/storagecluster_types.go b/api/v1/storagecluster_types.go index 7f0a34545e..b63338cab7 100644 --- a/api/v1/storagecluster_types.go +++ b/api/v1/storagecluster_types.go @@ -246,6 +246,8 @@ type ManageCephBlockPools struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ VirtualizationStorageClassName string `json:"virtualizationStorageClassName,omitempty"` + // PoolSpec specifies the pool specification for the default cephBlockPool + PoolSpec rookCephv1.PoolSpec `json:"poolSpec,omitempty"` } // ManageCephNonResilientPools defines how to reconcile ceph non-resilient pools @@ -294,6 +296,8 @@ type ManageCephObjectStores struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ StorageClassName string `json:"storageClassName,omitempty"` + // DataPoolSpec specifies the pool specification for the default cephObjectStore data pool + DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"` } // ManageCephObjectStoreUsers defines how to reconcile CephObjectStoreUsers diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 25007bf27e..0709f4ca77 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -259,6 +259,7 @@ func (in *KeyRotationSpec) DeepCopy() *KeyRotationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManageCephBlockPools) DeepCopyInto(out *ManageCephBlockPools) { *out = *in + in.PoolSpec.DeepCopyInto(&out.PoolSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephBlockPools. @@ -399,6 +400,7 @@ func (in *ManageCephObjectStores) DeepCopyInto(out *ManageCephObjectStores) { *out = new(bool) **out = **in } + in.DataPoolSpec.DeepCopyInto(&out.DataPoolSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephObjectStores. @@ -447,7 +449,7 @@ func (in *ManagedResourcesSpec) DeepCopyInto(out *ManagedResourcesSpec) { in.CephCluster.DeepCopyInto(&out.CephCluster) out.CephConfig = in.CephConfig out.CephDashboard = in.CephDashboard - out.CephBlockPools = in.CephBlockPools + in.CephBlockPools.DeepCopyInto(&out.CephBlockPools) in.CephNonResilientPools.DeepCopyInto(&out.CephNonResilientPools) in.CephFilesystems.DeepCopyInto(&out.CephFilesystems) in.CephObjectStores.DeepCopyInto(&out.CephObjectStores) diff --git a/config/crd/bases/ocs.openshift.io_storageclusters.yaml b/config/crd/bases/ocs.openshift.io_storageclusters.yaml index 85d94e7a06..7c065e70ae 100644 --- a/config/crd/bases/ocs.openshift.io_storageclusters.yaml +++ b/config/crd/bases/ocs.openshift.io_storageclusters.yaml @@ -722,6 +722,218 @@ spec: type: boolean disableStorageClass: type: boolean + poolSpec: + description: PoolSpec specifies the pool specification for + the default cephBlockPool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH + tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object reconcileStrategy: type: string storageClassName: @@ -1812,6 +2024,218 @@ spec: cephObjectStores: description: ManageCephObjectStores defines how to reconcile CephObjectStores properties: + dataPoolSpec: + description: DataPoolSpec specifies the pool specification + for the default cephObjectStore data pool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH + tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object disableRoute: type: boolean disableStorageClass: diff --git a/controllers/storagecluster/cephblockpools.go b/controllers/storagecluster/cephblockpools.go index 066e79684f..55a8140719 100644 --- a/controllers/storagecluster/cephblockpools.go +++ b/controllers/storagecluster/cephblockpools.go @@ -88,10 +88,12 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler, } _, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error { - cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass - cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true - cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") + // Pass the poolSpec from the storageCluster CR + + cephBlockPool.Spec.PoolSpec = storageCluster.Spec.ManagedResources.CephBlockPools.PoolSpec + + // Set default values in the poolSpec as necessary + setDefaultDataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster) cephBlockPool.Spec.PoolSpec.EnableRBDStats = true // Since provider mode handles mirroring, we only need to handle for internal mode @@ -151,7 +153,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata") + cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata) util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme) @@ -199,7 +201,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass cephBlockPool.Spec.EnableCrushUpdates = true cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") + cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata) cephBlockPool.Spec.PoolSpec.EnableRBDStats = true util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") diff --git a/controllers/storagecluster/cephblockpools_test.go b/controllers/storagecluster/cephblockpools_test.go index b24d2908a0..49fea46067 100644 --- a/controllers/storagecluster/cephblockpools_test.go +++ b/controllers/storagecluster/cephblockpools_test.go @@ -157,7 +157,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr DeviceClass: cr.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: getFailureDomain(cr), - Replicated: generateCephReplicatedSpec(cr, "data"), + Replicated: generateCephReplicatedSpec(cr, poolTypeData), EnableRBDStats: true, }, }, @@ -204,7 +204,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c DeviceClass: cr.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: getFailureDomain(cr), - Replicated: generateCephReplicatedSpec(cr, "data"), + Replicated: generateCephReplicatedSpec(cr, poolTypeMetadata), EnableRBDStats: true, }, Name: ".nfs", diff --git a/controllers/storagecluster/cephcluster.go b/controllers/storagecluster/cephcluster.go index 03575296aa..930676879c 100644 --- a/controllers/storagecluster/cephcluster.go +++ b/controllers/storagecluster/cephcluster.go @@ -48,6 +48,11 @@ const ( diskSpeedFast diskSpeed = "fast" ) +const ( + poolTypeData = "data" + poolTypeMetadata = "metadata" +) + type knownDiskType struct { speed diskSpeed provisioner StorageClassProvisionerType @@ -1418,3 +1423,26 @@ func isEncrptionSettingUpdated(clusterWideEncrytion bool, existingDeviceSet []ro } return false } + +// setDefaultDataPoolSpec sets the common pool spec for all data pools as necessary +func setDefaultDataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) { + poolSpec.EnableCrushUpdates = true + if poolSpec.DeviceClass == "" { + poolSpec.DeviceClass = sc.Status.DefaultCephDeviceClass + } + if poolSpec.FailureDomain == "" { + poolSpec.FailureDomain = getFailureDomain(sc) + } + // Set default replication settings if necessary + // Always set the default Size & ReplicasPerFailureDomain in arbiter mode + defaultReplicatedSpec := generateCephReplicatedSpec(sc, poolTypeData) + if poolSpec.Replicated.Size == 0 || arbiterEnabled(sc) { + poolSpec.Replicated.Size = defaultReplicatedSpec.Size + } + if poolSpec.Replicated.ReplicasPerFailureDomain == 0 || arbiterEnabled(sc) { + poolSpec.Replicated.ReplicasPerFailureDomain = defaultReplicatedSpec.ReplicasPerFailureDomain + } + if poolSpec.Replicated.TargetSizeRatio == 0.0 { + poolSpec.Replicated.TargetSizeRatio = defaultReplicatedSpec.TargetSizeRatio + } +} diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index 5c451c47b0..0d73013a0a 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -31,7 +31,7 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster Spec: cephv1.FilesystemSpec{ MetadataPool: cephv1.NamedPoolSpec{ PoolSpec: cephv1.PoolSpec{ - Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"), + Replicated: generateCephReplicatedSpec(initStorageCluster, poolTypeMetadata), FailureDomain: initStorageCluster.Status.FailureDomain, }}, MetadataServer: cephv1.MetadataServerSpec{ @@ -56,30 +56,10 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster // Append additional pools from specified additional data pools ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...) - // Iterate over each pool and set default values if necessary - defaultPoolSpec := generateDefaultPoolSpec(initStorageCluster) for i := range ret.Spec.DataPools { - pool := &ret.Spec.DataPools[i] - // Set default device class if not specified - if pool.PoolSpec.DeviceClass == "" { - pool.PoolSpec.DeviceClass = defaultPoolSpec.DeviceClass - } - // Set EnableCrushUpdates to always be true - pool.PoolSpec.EnableCrushUpdates = true - // Set default replication settings if not specified - if pool.PoolSpec.Replicated.Size == 0 { - pool.PoolSpec.Replicated.Size = defaultPoolSpec.Replicated.Size - } - if pool.PoolSpec.Replicated.ReplicasPerFailureDomain == 0 { - pool.PoolSpec.Replicated.ReplicasPerFailureDomain = defaultPoolSpec.Replicated.ReplicasPerFailureDomain - } - if pool.PoolSpec.Replicated.TargetSizeRatio == 0 { - pool.PoolSpec.Replicated.TargetSizeRatio = defaultPoolSpec.Replicated.TargetSizeRatio - } - // Set default failure domain if not specified - if pool.PoolSpec.FailureDomain == "" { - pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain - } + poolSpec := &ret.Spec.DataPools[i].PoolSpec + // Set default values in the poolSpec as necessary + setDefaultDataPoolSpec(poolSpec, initStorageCluster) } // set device class for metadata pool from the default data pool @@ -282,13 +262,3 @@ func getActiveMetadataServers(sc *ocsv1.StorageCluster) int { return defaults.CephFSActiveMetadataServers } - -// Define a function to generate default pool specifications -func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec { - return cephv1.PoolSpec{ - DeviceClass: sc.Status.DefaultCephDeviceClass, - EnableCrushUpdates: true, - Replicated: generateCephReplicatedSpec(sc, "data"), - FailureDomain: sc.Status.FailureDomain, - } -} diff --git a/controllers/storagecluster/cephfilesystem_test.go b/controllers/storagecluster/cephfilesystem_test.go index 41b48e96dc..7ed87a592b 100644 --- a/controllers/storagecluster/cephfilesystem_test.go +++ b/controllers/storagecluster/cephfilesystem_test.go @@ -139,7 +139,13 @@ func TestCephFileSystemDataPools(t *testing.T) { mocksc := &api.StorageCluster{} mockStorageCluster.DeepCopyInto(mocksc) mocksc.Status.FailureDomain = "zone" - defaultPoolSpec := generateDefaultPoolSpec(mocksc) + defaultPoolSpec := cephv1.PoolSpec{ + EnableCrushUpdates: true, + DeviceClass: mocksc.Status.DefaultCephDeviceClass, + FailureDomain: getFailureDomain(mocksc), + Replicated: generateCephReplicatedSpec(mocksc, poolTypeData), + } + var cases = []struct { label string sc *api.StorageCluster diff --git a/controllers/storagecluster/cephobjectstores.go b/controllers/storagecluster/cephobjectstores.go index b63b24a296..f178182704 100644 --- a/controllers/storagecluster/cephobjectstores.go +++ b/controllers/storagecluster/cephobjectstores.go @@ -168,17 +168,12 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S }, Spec: cephv1.ObjectStoreSpec{ PreservePoolsOnDelete: false, - DataPool: cephv1.PoolSpec{ - DeviceClass: initData.Status.DefaultCephDeviceClass, - EnableCrushUpdates: true, - FailureDomain: initData.Status.FailureDomain, - Replicated: generateCephReplicatedSpec(initData, "data"), - }, + DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR MetadataPool: cephv1.PoolSpec{ DeviceClass: initData.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: initData.Status.FailureDomain, - Replicated: generateCephReplicatedSpec(initData, "metadata"), + Replicated: generateCephReplicatedSpec(initData, poolTypeMetadata), }, Gateway: cephv1.GatewaySpec{ Port: 80, @@ -209,6 +204,9 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S obj.Spec.Gateway.HostNetwork = initData.Spec.ManagedResources.CephObjectStores.HostNetwork } + // Set default values in the poolSpec as necessary + setDefaultDataPoolSpec(&obj.Spec.DataPool, initData) + // if kmsConfig is not 'nil', add the KMS details to ObjectStore spec if kmsConfigMap != nil { diff --git a/controllers/storagecluster/generate.go b/controllers/storagecluster/generate.go index d273768618..b15fd1a5ae 100644 --- a/controllers/storagecluster/generate.go +++ b/controllers/storagecluster/generate.go @@ -141,8 +141,7 @@ func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string) crs.Size = getCephPoolReplicatedSize(initData) crs.ReplicasPerFailureDomain = uint(getReplicasPerFailureDomain(initData)) - //lint:ignore ST1017 required to compare it directly - if "data" == poolType { + if poolType == poolTypeData { crs.TargetSizeRatio = .49 } diff --git a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml index 85d94e7a06..7c065e70ae 100644 --- a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml +++ b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml @@ -722,6 +722,218 @@ spec: type: boolean disableStorageClass: type: boolean + poolSpec: + description: PoolSpec specifies the pool specification for + the default cephBlockPool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH + tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object reconcileStrategy: type: string storageClassName: @@ -1812,6 +2024,218 @@ spec: cephObjectStores: description: ManageCephObjectStores defines how to reconcile CephObjectStores properties: + dataPoolSpec: + description: DataPoolSpec specifies the pool specification + for the default cephObjectStore data pool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH + tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object disableRoute: type: boolean disableStorageClass: diff --git a/deploy/ocs-operator/manifests/storagecluster.crd.yaml b/deploy/ocs-operator/manifests/storagecluster.crd.yaml index 85d94e7a06..7c065e70ae 100644 --- a/deploy/ocs-operator/manifests/storagecluster.crd.yaml +++ b/deploy/ocs-operator/manifests/storagecluster.crd.yaml @@ -722,6 +722,218 @@ spec: type: boolean disableStorageClass: type: boolean + poolSpec: + description: PoolSpec specifies the pool specification for + the default cephBlockPool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH + tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object reconcileStrategy: type: string storageClassName: @@ -1812,6 +2024,218 @@ spec: cephObjectStores: description: ManageCephObjectStores defines how to reconcile CephObjectStores properties: + dataPoolSpec: + description: DataPoolSpec specifies the pool specification + for the default cephObjectStore data pool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: |- + DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" + The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) + Do NOT set a default value for kubebuilder as this will override the Parameters + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableCrushUpdates: + description: Allow rook operator to change the pool CRUSH + tunables once the pool is created + type: boolean + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: |- + Number of coding chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + This is the number of OSDs that can be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: |- + Number of data chunks per object in an erasure coded storage pool (required for erasure-coded pool type). + The number of chunks required to recover an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: |- + MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object disableRoute: type: boolean disableStorageClass: diff --git a/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go b/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go index 7f0a34545e..b63338cab7 100644 --- a/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go +++ b/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go @@ -246,6 +246,8 @@ type ManageCephBlockPools struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ VirtualizationStorageClassName string `json:"virtualizationStorageClassName,omitempty"` + // PoolSpec specifies the pool specification for the default cephBlockPool + PoolSpec rookCephv1.PoolSpec `json:"poolSpec,omitempty"` } // ManageCephNonResilientPools defines how to reconcile ceph non-resilient pools @@ -294,6 +296,8 @@ type ManageCephObjectStores struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ StorageClassName string `json:"storageClassName,omitempty"` + // DataPoolSpec specifies the pool specification for the default cephObjectStore data pool + DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"` } // ManageCephObjectStoreUsers defines how to reconcile CephObjectStoreUsers diff --git a/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go b/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go index 25007bf27e..0709f4ca77 100644 --- a/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go +++ b/metrics/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go @@ -259,6 +259,7 @@ func (in *KeyRotationSpec) DeepCopy() *KeyRotationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManageCephBlockPools) DeepCopyInto(out *ManageCephBlockPools) { *out = *in + in.PoolSpec.DeepCopyInto(&out.PoolSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephBlockPools. @@ -399,6 +400,7 @@ func (in *ManageCephObjectStores) DeepCopyInto(out *ManageCephObjectStores) { *out = new(bool) **out = **in } + in.DataPoolSpec.DeepCopyInto(&out.DataPoolSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephObjectStores. @@ -447,7 +449,7 @@ func (in *ManagedResourcesSpec) DeepCopyInto(out *ManagedResourcesSpec) { in.CephCluster.DeepCopyInto(&out.CephCluster) out.CephConfig = in.CephConfig out.CephDashboard = in.CephDashboard - out.CephBlockPools = in.CephBlockPools + in.CephBlockPools.DeepCopyInto(&out.CephBlockPools) in.CephNonResilientPools.DeepCopyInto(&out.CephNonResilientPools) in.CephFilesystems.DeepCopyInto(&out.CephFilesystems) in.CephObjectStores.DeepCopyInto(&out.CephObjectStores) diff --git a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go index 7f0a34545e..b63338cab7 100644 --- a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go +++ b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go @@ -246,6 +246,8 @@ type ManageCephBlockPools struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ VirtualizationStorageClassName string `json:"virtualizationStorageClassName,omitempty"` + // PoolSpec specifies the pool specification for the default cephBlockPool + PoolSpec rookCephv1.PoolSpec `json:"poolSpec,omitempty"` } // ManageCephNonResilientPools defines how to reconcile ceph non-resilient pools @@ -294,6 +296,8 @@ type ManageCephObjectStores struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ StorageClassName string `json:"storageClassName,omitempty"` + // DataPoolSpec specifies the pool specification for the default cephObjectStore data pool + DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"` } // ManageCephObjectStoreUsers defines how to reconcile CephObjectStoreUsers diff --git a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go index 25007bf27e..0709f4ca77 100644 --- a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go @@ -259,6 +259,7 @@ func (in *KeyRotationSpec) DeepCopy() *KeyRotationSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManageCephBlockPools) DeepCopyInto(out *ManageCephBlockPools) { *out = *in + in.PoolSpec.DeepCopyInto(&out.PoolSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephBlockPools. @@ -399,6 +400,7 @@ func (in *ManageCephObjectStores) DeepCopyInto(out *ManageCephObjectStores) { *out = new(bool) **out = **in } + in.DataPoolSpec.DeepCopyInto(&out.DataPoolSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephObjectStores. @@ -447,7 +449,7 @@ func (in *ManagedResourcesSpec) DeepCopyInto(out *ManagedResourcesSpec) { in.CephCluster.DeepCopyInto(&out.CephCluster) out.CephConfig = in.CephConfig out.CephDashboard = in.CephDashboard - out.CephBlockPools = in.CephBlockPools + in.CephBlockPools.DeepCopyInto(&out.CephBlockPools) in.CephNonResilientPools.DeepCopyInto(&out.CephNonResilientPools) in.CephFilesystems.DeepCopyInto(&out.CephFilesystems) in.CephObjectStores.DeepCopyInto(&out.CephObjectStores)