diff --git a/apis/apps/v1/cluster_types.go b/apis/apps/v1/cluster_types.go index e7e316e1f66..fe8b5eb5236 100644 --- a/apis/apps/v1/cluster_types.go +++ b/apis/apps/v1/cluster_types.go @@ -146,7 +146,7 @@ type ClusterSpec struct { // Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. // This field allows for detailed configuration of each Component within the Cluster. // - // Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. + // Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. // // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=128 @@ -155,14 +155,14 @@ type ClusterSpec struct { // +optional ComponentSpecs []ClusterComponentSpec `json:"componentSpecs,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name"` - // Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. - // Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. + // Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. + // Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. // Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. // // This field supports dynamic resharding by facilitating the addition or removal of shards - // through the `shards` field in ShardingSpec. + // through the `shards` field in ClusterSharding. // - // Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. + // Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. // // +patchMergeKey=name // +patchStrategy=merge,retainKeys @@ -171,7 +171,7 @@ type ClusterSpec struct { // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=128 // +optional - ShardingSpecs []ShardingSpec `json:"shardingSpecs,omitempty"` + Shardings []ClusterSharding `json:"shardings,omitempty"` // Specifies runtimeClassName for all Pods managed by this Cluster. // @@ -184,7 +184,7 @@ type ClusterSpec struct { SchedulingPolicy *SchedulingPolicy `json:"schedulingPolicy,omitempty"` // Defines a list of additional Services that are exposed by a Cluster. - // This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, + // This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, // alongside Services defined with ComponentService. // // Services defined here can be referenced by other clusters using the ServiceRefClusterSelector. @@ -222,10 +222,10 @@ type ClusterStatus struct { // +optional Components map[string]ClusterComponentStatus `json:"components,omitempty"` - // Represents the generation number of the referenced ClusterDefinition. + // Records the current status information of all shardings within the Cluster. // // +optional - ClusterDefGeneration int64 `json:"clusterDefGeneration,omitempty"` + Shardings map[string]ClusterComponentStatus `json:"shardings,omitempty"` // Represents a list of detailed status of the Cluster object. // Each condition in the list provides real-time information about certain aspect of the Cluster object. @@ -259,7 +259,7 @@ const ( type ClusterComponentSpec struct { // Specifies the Component's name. // It's part of the Service DNS name and must comply with the IANA service naming rule. - // The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), + // The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), // but required otherwise. // // +kubebuilder:validation:MaxLength=22 @@ -542,22 +542,23 @@ type ClusterComponentService struct { PodService *bool `json:"podService,omitempty"` } -// ShardingSpec defines how KubeBlocks manage dynamic provisioned shards. +// ClusterSharding defines how KubeBlocks manage dynamic provisioned shards. // A typical design pattern for distributed databases is to distribute data across multiple shards, // with each shard consisting of multiple replicas. // Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components // using a template when shards are added. // When shards are removed, the corresponding Components are also deleted. -type ShardingSpec struct { +type ClusterSharding struct { // Represents the common parent part of all shard names. + // // This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. - // It is used to generate the names of underlying Components following the pattern `$(shardingSpec.name)-$(ShardID)`. + // It is used to generate the names of underlying Components following the pattern `$(clusterSharding.name)-$(ShardID)`. // ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. // For example, if the sharding specification name is "my-shard" and the ShardID is "abc", the resulting Component name // would be "my-shard-abc". // - // Note that the name defined in Component template(`shardingSpec.template.name`) will be disregarded - // when generating the Component names of the shards. The `shardingSpec.name` field takes precedence. + // Note that the name defined in Component template(`clusterSharding.template.name`) will be disregarded + // when generating the Component names of the shards. The `clusterSharding.name` field takes precedence. // // +kubebuilder:validation:Required // +kubebuilder:validation:MaxLength=15 @@ -565,10 +566,19 @@ type ShardingSpec struct { // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" Name string `json:"name"` + // Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. + // + // The full name or regular expression is supported to match the ShardingDefinition. + // + // +kubebuilder:validation:MaxLength=64 + // +optional + ShardingDef string `json:"shardingDef,omitempty"` + // The template for generating Components for shards, where each shard consists of one Component. + // // This field is of type ClusterComponentSpec, which encapsulates all the required details and // definitions for creating and managing the Components. - // KubeBlocks uses this template to generate a set of identical Components or shards. + // KubeBlocks uses this template to generate a set of identical Components of shards. // All the generated Components will have the same specifications and definitions as specified in the `template` field. // // This allows for the creation of multiple Components with consistent configurations, @@ -578,20 +588,21 @@ type ShardingSpec struct { Template ClusterComponentSpec `json:"template"` // Specifies the desired number of shards. + // // Users can declare the desired number of shards through this field. // KubeBlocks dynamically creates and deletes Components based on the difference // between the desired and actual number of shards. // KubeBlocks provides lifecycle management for sharding, including: // - // - Executing the postProvision Action defined in the ComponentDefinition when the number of shards increases. + // - Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. // This allows for custom actions to be performed after a new shard is provisioned. - // - Executing the preTerminate Action defined in the ComponentDefinition when the number of shards decreases. + // - Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. // This enables custom cleanup or data migration tasks to be executed before a shard is terminated. // Resources and data associated with the corresponding Component will also be deleted. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=2048 + // +kubebuilder:validation:Required Shards int32 `json:"shards,omitempty"` } diff --git a/apis/apps/v1/clusterdefinition_types.go b/apis/apps/v1/clusterdefinition_types.go index 398a8b395e1..da4732c6f72 100644 --- a/apis/apps/v1/clusterdefinition_types.go +++ b/apis/apps/v1/clusterdefinition_types.go @@ -37,12 +37,13 @@ import ( // ClusterDefinition defines the topology for databases or storage systems, // offering a variety of topological configurations to meet diverse deployment needs and scenarios. // -// It includes a list of Components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. +// It includes a list of Components and/or Shardings, each linked to a ComponentDefinition or a ShardingDefinition, +// which enhances reusability and reduce redundancy. // For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, // simplifying the setup of new systems. // -// Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for Components, -// ensuring a controlled and predictable management of component lifecycles. +// Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown between Components and/or Shardings, +// ensuring a controlled and predictable management of cluster lifecycles. type ClusterDefinition struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -107,10 +108,15 @@ type ClusterTopology struct { // Components specifies the components in the topology. // - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=128 - Components []ClusterTopologyComponent `json:"components"` + // +optional + Components []ClusterTopologyComponent `json:"components,omitempty"` + + // Shardings specifies the shardings in the topology. + // + // +kubebuilder:validation:MaxItems=128 + // +optional + Shardings []ClusterTopologySharding `json:"shardings,omitempty"` // Specifies the sequence in which components within a cluster topology are // started, stopped, and upgraded. @@ -157,34 +163,64 @@ type ClusterTopologyComponent struct { CompDef string `json:"compDef"` } +// ClusterTopologySharding defines a sharding within a ClusterTopology. +type ClusterTopologySharding struct { + // Defines the unique identifier of the sharding within the cluster topology. + // It follows IANA Service naming rules and is used as part of the Service's DNS name. + // The name must start with a lowercase letter, can contain lowercase letters, numbers, + // and hyphens, and must end with a lowercase letter or number. + // + // Cannot be updated once set. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=16 + // +kubebuilder:validation:Pattern:=`^[a-z]([a-z0-9\-]*[a-z0-9])?$` + Name string `json:"name"` + + // Specifies the sharding definition that defines the characteristics and behavior of the sharding. + // + // The system selects the ShardingDefinition CR with the latest version that matches the pattern. + // This approach allows: + // + // 1. Precise selection by providing the exact name of a ShardingDefinition CR. + // 2. Flexible and automatic selection of the most up-to-date ShardingDefinition CR + // by specifying a regular expression pattern. + // + // Once set, this field cannot be updated. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=64 + ShardingDef string `json:"shardingDef"` +} + // ClusterTopologyOrders manages the lifecycle of components within a cluster by defining their provisioning, // terminating, and updating sequences. // It organizes components into stages or groups, where each group indicates a set of components // that can be managed concurrently. // These groups are processed sequentially, allowing precise control based on component dependencies and requirements. type ClusterTopologyOrders struct { - // Specifies the order for creating and initializing components. - // This is designed for components that depend on one another. Components without dependencies can be grouped together. + // Specifies the order for creating and initializing entities. + // This is designed for entities that depend on one another. Entities without dependencies can be grouped together. // - // Components that can be provisioned independently or have no dependencies can be listed together in the same stage, + // Entities that can be provisioned independently or have no dependencies can be listed together in the same stage, // separated by commas. // // +optional Provision []string `json:"provision,omitempty"` - // Outlines the order for stopping and deleting components. - // This sequence is designed for components that require a graceful shutdown or have interdependencies. + // Outlines the order for stopping and deleting entities. + // This sequence is designed for entities that require a graceful shutdown or have interdependencies. // - // Components that can be terminated independently or have no dependencies can be listed together in the same stage, + // Entities that can be terminated independently or have no dependencies can be listed together in the same stage, // separated by commas. // // +optional Terminate []string `json:"terminate,omitempty"` - // Update determines the order for updating components' specifications, such as image upgrades or resource scaling. - // This sequence is designed for components that have dependencies or require specific update procedures. + // Update determines the order for updating entities' specifications, such as image upgrades or resource scaling. + // This sequence is designed for entities that have dependencies or require specific update procedures. // - // Components that can be updated independently or have no dependencies can be listed together in the same stage, + // Entities that can be updated independently or have no dependencies can be listed together in the same stage, // separated by commas. // // +optional diff --git a/apis/apps/v1/deprecated.go b/apis/apps/v1/deprecated.go index 681be954a4c..912d21e356d 100644 --- a/apis/apps/v1/deprecated.go +++ b/apis/apps/v1/deprecated.go @@ -117,8 +117,8 @@ func (r *ClusterSpec) GetComponentByName(componentName string) *ClusterComponent return nil } -func (r *ClusterSpec) GetShardingByName(shardingName string) *ShardingSpec { - for _, v := range r.ShardingSpecs { +func (r *ClusterSpec) GetShardingByName(shardingName string) *ClusterSharding { + for _, v := range r.Shardings { if v.Name == shardingName { return &v } diff --git a/apis/apps/v1/types.go b/apis/apps/v1/types.go index fe559f01cd6..fc725cb390b 100644 --- a/apis/apps/v1/types.go +++ b/apis/apps/v1/types.go @@ -80,8 +80,7 @@ const ( const ( ConditionTypeProvisioningStarted = "ProvisioningStarted" // ConditionTypeProvisioningStarted the operator starts resource provisioning to create or change the cluster ConditionTypeApplyResources = "ApplyResources" // ConditionTypeApplyResources the operator start to apply resources to create or change the cluster - ConditionTypeReplicasReady = "ReplicasReady" // ConditionTypeReplicasReady all pods of components are ready - ConditionTypeReady = "Ready" // ConditionTypeReady all components are running + ConditionTypeReady = "Ready" // ConditionTypeReady all components and shardings are running ) type ServiceRef struct { diff --git a/apis/apps/v1/zz_generated.deepcopy.go b/apis/apps/v1/zz_generated.deepcopy.go index 5c3f87365ae..bea8c83623b 100644 --- a/apis/apps/v1/zz_generated.deepcopy.go +++ b/apis/apps/v1/zz_generated.deepcopy.go @@ -515,6 +515,22 @@ func (in *ClusterService) DeepCopy() *ClusterService { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSharding) DeepCopyInto(out *ClusterSharding) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSharding. +func (in *ClusterSharding) DeepCopy() *ClusterSharding { + if in == nil { + return nil + } + out := new(ClusterSharding) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = *in @@ -525,9 +541,9 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ShardingSpecs != nil { - in, out := &in.ShardingSpecs, &out.ShardingSpecs - *out = make([]ShardingSpec, len(*in)) + if in.Shardings != nil { + in, out := &in.Shardings, &out.Shardings + *out = make([]ClusterSharding, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -576,6 +592,13 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { (*out)[key] = *val.DeepCopy() } } + if in.Shardings != nil { + in, out := &in.Shardings, &out.Shardings + *out = make(map[string]ClusterComponentStatus, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) @@ -603,6 +626,11 @@ func (in *ClusterTopology) DeepCopyInto(out *ClusterTopology) { *out = make([]ClusterTopologyComponent, len(*in)) copy(*out, *in) } + if in.Shardings != nil { + in, out := &in.Shardings, &out.Shardings + *out = make([]ClusterTopologySharding, len(*in)) + copy(*out, *in) + } if in.Orders != nil { in, out := &in.Orders, &out.Orders *out = new(ClusterTopologyOrders) @@ -665,6 +693,21 @@ func (in *ClusterTopologyOrders) DeepCopy() *ClusterTopologyOrders { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTopologySharding) DeepCopyInto(out *ClusterTopologySharding) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTopologySharding. +func (in *ClusterTopologySharding) DeepCopy() *ClusterTopologySharding { + if in == nil { + return nil + } + out := new(ClusterTopologySharding) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterVarSelector) DeepCopyInto(out *ClusterVarSelector) { *out = *in @@ -2745,22 +2788,6 @@ func (in *ShardingLifecycleActions) DeepCopy() *ShardingLifecycleActions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ShardingSpec) DeepCopyInto(out *ShardingSpec) { - *out = *in - in.Template.DeepCopyInto(&out.Template) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardingSpec. -func (in *ShardingSpec) DeepCopy() *ShardingSpec { - if in == nil { - return nil - } - out := new(ShardingSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ShardingSystemAccount) DeepCopyInto(out *ShardingSystemAccount) { *out = *in diff --git a/apis/operations/v1alpha1/opsrequest_validation.go b/apis/operations/v1alpha1/opsrequest_validation.go index e5003bdf0b7..ee3eaada1bb 100644 --- a/apis/operations/v1alpha1/opsrequest_validation.go +++ b/apis/operations/v1alpha1/opsrequest_validation.go @@ -344,9 +344,9 @@ func (r *OpsRequest) validateHorizontalScaling(_ context.Context, _ client.Clien } } } - for _, shardingSpec := range cluster.Spec.ShardingSpecs { - if hScale, ok := hScaleMap[shardingSpec.Name]; ok { - if err := r.validateHorizontalScalingSpec(hScale, shardingSpec.Template, cluster.Name, true); err != nil { + for _, spec := range cluster.Spec.Shardings { + if hScale, ok := hScaleMap[spec.Name]; ok { + if err := r.validateHorizontalScalingSpec(hScale, spec.Template, cluster.Name, true); err != nil { return err } } @@ -503,11 +503,11 @@ func (r *OpsRequest) checkInstanceTemplate(cluster *appsv1.Cluster, componentOps instanceNameMap[instances[i].Name] = sets.Empty{} } } - for _, shardingSpec := range cluster.Spec.ShardingSpecs { - if shardingSpec.Name != componentOps.ComponentName { + for _, spec := range cluster.Spec.Shardings { + if spec.Name != componentOps.ComponentName { continue } - setInstanceMap(shardingSpec.Template.Instances) + setInstanceMap(spec.Template.Instances) } for _, compSpec := range cluster.Spec.ComponentSpecs { if compSpec.Name != componentOps.ComponentName { @@ -533,8 +533,8 @@ func (r *OpsRequest) checkComponentExistence(cluster *appsv1.Cluster, compOpsLis for _, compSpec := range cluster.Spec.ComponentSpecs { compNameMap[compSpec.Name] = sets.Empty{} } - for _, shardingSpec := range cluster.Spec.ShardingSpecs { - compNameMap[shardingSpec.Name] = sets.Empty{} + for _, spec := range cluster.Spec.Shardings { + compNameMap[spec.Name] = sets.Empty{} } var ( notFoundCompNames []string @@ -615,7 +615,7 @@ func (r *OpsRequest) checkVolumesAllowExpansion(ctx context.Context, cli client. for _, comp := range cluster.Spec.ComponentSpecs { fillCompVols(comp, comp.Name, false) } - for _, sharding := range cluster.Spec.ShardingSpecs { + for _, sharding := range cluster.Spec.Shardings { fillCompVols(sharding.Template, sharding.Name, true) } diff --git a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml index c0d7783c79b..749cf133be5 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml @@ -39,13 +39,14 @@ spec: offering a variety of topological configurations to meet diverse deployment needs and scenarios. - It includes a list of Components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. + It includes a list of Components and/or Shardings, each linked to a ComponentDefinition or a ShardingDefinition, + which enhances reusability and reduce redundancy. For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, simplifying the setup of new systems. - Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for Components, - ensuring a controlled and predictable management of component lifecycles. + Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown between Components and/or Shardings, + ensuring a controlled and predictable management of cluster lifecycles. properties: apiVersion: description: |- @@ -111,7 +112,6 @@ spec: - name type: object maxItems: 128 - minItems: 1 type: array default: description: |- @@ -132,40 +132,80 @@ spec: properties: provision: description: |- - Specifies the order for creating and initializing components. - This is designed for components that depend on one another. Components without dependencies can be grouped together. + Specifies the order for creating and initializing entities. + This is designed for entities that depend on one another. Entities without dependencies can be grouped together. - Components that can be provisioned independently or have no dependencies can be listed together in the same stage, + Entities that can be provisioned independently or have no dependencies can be listed together in the same stage, separated by commas. items: type: string type: array terminate: description: |- - Outlines the order for stopping and deleting components. - This sequence is designed for components that require a graceful shutdown or have interdependencies. + Outlines the order for stopping and deleting entities. + This sequence is designed for entities that require a graceful shutdown or have interdependencies. - Components that can be terminated independently or have no dependencies can be listed together in the same stage, + Entities that can be terminated independently or have no dependencies can be listed together in the same stage, separated by commas. items: type: string type: array update: description: |- - Update determines the order for updating components' specifications, such as image upgrades or resource scaling. - This sequence is designed for components that have dependencies or require specific update procedures. + Update determines the order for updating entities' specifications, such as image upgrades or resource scaling. + This sequence is designed for entities that have dependencies or require specific update procedures. - Components that can be updated independently or have no dependencies can be listed together in the same stage, + Entities that can be updated independently or have no dependencies can be listed together in the same stage, separated by commas. items: type: string type: array type: object + shardings: + description: Shardings specifies the shardings in the topology. + items: + description: ClusterTopologySharding defines a sharding within + a ClusterTopology. + properties: + name: + description: |- + Defines the unique identifier of the sharding within the cluster topology. + It follows IANA Service naming rules and is used as part of the Service's DNS name. + The name must start with a lowercase letter, can contain lowercase letters, numbers, + and hyphens, and must end with a lowercase letter or number. + + + Cannot be updated once set. + maxLength: 16 + pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ + type: string + shardingDef: + description: |- + Specifies the sharding definition that defines the characteristics and behavior of the sharding. + + + The system selects the ShardingDefinition CR with the latest version that matches the pattern. + This approach allows: + + + 1. Precise selection by providing the exact name of a ShardingDefinition CR. + 2. Flexible and automatic selection of the most up-to-date ShardingDefinition CR + by specifying a regular expression pattern. + + + Once set, this field cannot be updated. + maxLength: 64 + type: string + required: + - name + - shardingDef + type: object + maxItems: 128 + type: array required: - - components - name type: object maxItems: 128 diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index f81831b088c..f6ff2897e8e 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -174,7 +174,7 @@ spec: This field allows for detailed configuration of each Component within the Cluster. - Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. + Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. items: description: ClusterComponentSpec defines the specification of a Component within a Cluster. @@ -3752,7 +3752,7 @@ spec: description: |- Specifies the Component's name. It's part of the Service DNS name and must comply with the IANA service naming rule. - The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), + The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), but required otherwise. maxLength: 22 pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ @@ -8324,7 +8324,7 @@ spec: services: description: |- Defines a list of additional Services that are exposed by a Cluster. - This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, + This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, alongside Services defined with ComponentService. @@ -8736,21 +8736,21 @@ spec: type: object type: array x-kubernetes-preserve-unknown-fields: true - shardingSpecs: + shardings: description: |- - Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. - Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. + Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. + Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. This field supports dynamic resharding by facilitating the addition or removal of shards - through the `shards` field in ShardingSpec. + through the `shards` field in ClusterSharding. - Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. + Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. items: description: |- - ShardingSpec defines how KubeBlocks manage dynamic provisioned shards. + ClusterSharding defines how KubeBlocks manage dynamic provisioned shards. A typical design pattern for distributed databases is to distribute data across multiple shards, with each shard consisting of multiple replicas. Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components @@ -8760,33 +8760,45 @@ spec: name: description: |- Represents the common parent part of all shard names. + + This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. - It is used to generate the names of underlying Components following the pattern `$(shardingSpec.name)-$(ShardID)`. + It is used to generate the names of underlying Components following the pattern `$(clusterSharding.name)-$(ShardID)`. ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. For example, if the sharding specification name is "my-shard" and the ShardID is "abc", the resulting Component name would be "my-shard-abc". - Note that the name defined in Component template(`shardingSpec.template.name`) will be disregarded - when generating the Component names of the shards. The `shardingSpec.name` field takes precedence. + Note that the name defined in Component template(`clusterSharding.template.name`) will be disregarded + when generating the Component names of the shards. The `clusterSharding.name` field takes precedence. maxLength: 15 pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string x-kubernetes-validations: - message: name is immutable rule: self == oldSelf + shardingDef: + description: |- + Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. + + + The full name or regular expression is supported to match the ShardingDefinition. + maxLength: 64 + type: string shards: description: |- Specifies the desired number of shards. + + Users can declare the desired number of shards through this field. KubeBlocks dynamically creates and deletes Components based on the difference between the desired and actual number of shards. KubeBlocks provides lifecycle management for sharding, including: - - Executing the postProvision Action defined in the ComponentDefinition when the number of shards increases. + - Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. This allows for custom actions to be performed after a new shard is provisioned. - - Executing the preTerminate Action defined in the ComponentDefinition when the number of shards decreases. + - Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. This enables custom cleanup or data migration tasks to be executed before a shard is terminated. Resources and data associated with the corresponding Component will also be deleted. format: int32 @@ -8796,9 +8808,11 @@ spec: template: description: |- The template for generating Components for shards, where each shard consists of one Component. + + This field is of type ClusterComponentSpec, which encapsulates all the required details and definitions for creating and managing the Components. - KubeBlocks uses this template to generate a set of identical Components or shards. + KubeBlocks uses this template to generate a set of identical Components of shards. All the generated Components will have the same specifications and definitions as specified in the `template` field. @@ -12429,7 +12443,7 @@ spec: description: |- Specifies the Component's name. It's part of the Service DNS name and must comply with the IANA service naming rule. - The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), + The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), but required otherwise. maxLength: 22 pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ @@ -15918,10 +15932,6 @@ spec: status: description: ClusterStatus defines the observed state of the Cluster. properties: - clusterDefGeneration: - description: Represents the generation number of the referenced ClusterDefinition. - format: int64 - type: integer components: additionalProperties: description: ClusterComponentStatus records Component status. @@ -16048,6 +16058,33 @@ spec: - Failed - Abnormal type: string + shardings: + additionalProperties: + description: ClusterComponentStatus records Component status. + properties: + message: + additionalProperties: + type: string + description: |- + Records detailed information about the Component in its current phase. + The keys are either podName, deployName, or statefulSetName, formatted as 'ObjectKind/Name'. + type: object + phase: + description: Specifies the current state of the Component. + enum: + - Creating + - Running + - Updating + - Stopping + - Stopped + - Deleting + - Failed + - Abnormal + type: string + type: object + description: Records the current status information of all shardings + within the Cluster. + type: object type: object type: object served: true diff --git a/controllers/apps/cluster_controller.go b/controllers/apps/cluster_controller.go index 01b3c927df8..7a081ecb3b5 100644 --- a/controllers/apps/cluster_controller.go +++ b/controllers/apps/cluster_controller.go @@ -129,14 +129,14 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct AddTransformer( // handle cluster deletion &clusterDeletionTransformer{}, - // update finalizer and cd&cv labels - &clusterAssureMetaTransformer{}, - // validate cd & cv's existence and availability - &clusterLoadRefResourcesTransformer{}, + // update finalizer and definition labels + &clusterMetaTransformer{}, + // validate the cluster spec + &clusterValidationTransformer{}, // handle cluster shared account - &clusterSharedAccountTransformer{}, - // normalize the cluster and component API - &ClusterAPINormalizationTransformer{}, + &clusterShardingAccountTransformer{}, + // normalize the cluster spec + &clusterNormalizationTransformer{}, // placement replicas across data-plane k8s clusters &clusterPlacementTransformer{multiClusterMgr: r.MultiClusterMgr}, // handle cluster services diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index ce0138c1dde..c124273c27f 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -235,7 +235,8 @@ var _ = Describe("Cluster Controller", func() { shardingComponentProcessorWrapper := func(compName, compDefName string, processor ...func(*testapps.MockClusterFactory)) func(f *testapps.MockClusterFactory) { return func(f *testapps.MockClusterFactory) { - f.AddShardingSpec(compName, compDefName).SetShards(defaultShardCount) + f.AddSharding(compName, "", compDefName). + SetShards(defaultShardCount) for _, p := range processor { if p != nil { p(f) @@ -450,9 +451,9 @@ var _ = Describe("Cluster Controller", func() { By("scale in the sharding component") Expect(testapps.GetAndChangeObj(&testCtx, clusterKey, func(cluster *appsv1.Cluster) { - for i := range cluster.Spec.ShardingSpecs { - if cluster.Spec.ShardingSpecs[i].Name == compName { - cluster.Spec.ShardingSpecs[i].Shards = int32(shards - 1) + for i := range cluster.Spec.Shardings { + if cluster.Spec.Shardings[i].Name == compName { + cluster.Spec.Shardings[i].Shards = int32(shards - 1) } } })()).ShouldNot(HaveOccurred()) diff --git a/controllers/apps/cluster_plan_builder.go b/controllers/apps/cluster_plan_builder.go index 94a0216bb62..f6ef2b1a670 100644 --- a/controllers/apps/cluster_plan_builder.go +++ b/controllers/apps/cluster_plan_builder.go @@ -54,16 +54,22 @@ type clusterTransformContext struct { Client client.Reader record.EventRecorder logr.Logger - Cluster *appsv1.Cluster - OrigCluster *appsv1.Cluster - ClusterDef *appsv1.ClusterDefinition - ComponentDefs map[string]*appsv1.ComponentDefinition - // ComponentSpecs includes all cluster component specs generated from ComponentSpecs and ShardingSpecs - ComponentSpecs []*appsv1.ClusterComponentSpec - // ShardingComponentSpecs includes all sharding component specs generated from ShardingSpecs - ShardingComponentSpecs map[string][]*appsv1.ClusterComponentSpec - // Annotations to be added to components, mapping with ComponentSpecs. - Annotations map[string]map[string]string + + Cluster *appsv1.Cluster + OrigCluster *appsv1.Cluster + + clusterDef *appsv1.ClusterDefinition + shardingDefs map[string]*appsv1.ShardingDefinition + componentDefs map[string]*appsv1.ComponentDefinition + + // consolidated components and shardings from topology and/or user-specified + components []*appsv1.ClusterComponentSpec + shardings []*appsv1.ClusterSharding + + shardingComps map[string][]*appsv1.ClusterComponentSpec // comp specs for each sharding + + // TODO: remove this, annotations to be added to components for sharding, mapping with @allComps. + annotations map[string]map[string]string } // clusterPlanBuilder a graph.PlanBuilder implementation for Cluster reconciliation @@ -104,6 +110,33 @@ func (c *clusterTransformContext) GetLogger() logr.Logger { return c.Logger } +func (c *clusterTransformContext) sharding(name string) bool { + // hack: use shardingComps to determine if the entity is sharding or component + _, ok := c.shardingComps[name] + return ok +} + +func (c *clusterTransformContext) total() int { + cnt := len(c.components) + for _, comps := range c.shardingComps { + cnt += len(comps) + } + return cnt +} + +func (c *clusterTransformContext) traverse(f func(spec *appsv1.ClusterComponentSpec)) { + if f != nil { + for _, comp := range c.components { + f(comp) + } + for _, comps := range c.shardingComps { + for _, comp := range comps { + f(comp) + } + } + } +} + func init() { model.AddScheme(appsv1alpha1.AddToScheme) model.AddScheme(appsv1beta1.AddToScheme) diff --git a/controllers/apps/cluster_status_conditions.go b/controllers/apps/cluster_status_conditions.go index 55159be0f40..a016853a744 100644 --- a/controllers/apps/cluster_status_conditions.go +++ b/controllers/apps/cluster_status_conditions.go @@ -22,8 +22,8 @@ package apps import ( "fmt" "slices" + "strings" - "golang.org/x/exp/maps" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,10 +36,8 @@ const ( ReasonPreCheckFailed = "PreCheckFailed" // ReasonPreCheckFailed preChecks failed for provisioning started ReasonApplyResourcesFailed = "ApplyResourcesFailed" // ReasonApplyResourcesFailed applies resources failed to create or change the cluster ReasonApplyResourcesSucceed = "ApplyResourcesSucceed" // ReasonApplyResourcesSucceed applies resources succeeded to create or change the cluster - ReasonReplicasNotReady = "ReplicasNotReady" // ReasonReplicasNotReady the pods of components are not ready - ReasonAllReplicasReady = "AllReplicasReady" // ReasonAllReplicasReady the pods of components are ready - ReasonComponentsNotReady = "ComponentsNotReady" // ReasonComponentsNotReady the components of cluster are not ready ReasonClusterReady = "ClusterReady" // ReasonClusterReady the components of cluster are ready, the component phase is running + ReasonComponentsNotReady = "ComponentsNotReady" // ReasonComponentsNotReady the components of cluster are not ready ) func setProvisioningStartedCondition(conditions *[]metav1.Condition, clusterName string, clusterGeneration int64, err error) { @@ -93,7 +91,6 @@ func setApplyResourceCondition(conditions *[]metav1.Condition, clusterGeneration meta.SetStatusCondition(conditions, condition) } -// newApplyResourcesCondition creates a condition when applied resources succeed. func newApplyResourcesCondition(clusterGeneration int64) metav1.Condition { return metav1.Condition{ Type: appsv1.ConditionTypeApplyResources, @@ -104,7 +101,6 @@ func newApplyResourcesCondition(clusterGeneration int64) metav1.Condition { } } -// newApplyResourcesCondition creates a condition when applied resources succeed. func newFailedApplyResourcesCondition(err error) metav1.Condition { return metav1.Condition{ Type: appsv1.ConditionTypeApplyResources, @@ -114,46 +110,27 @@ func newFailedApplyResourcesCondition(err error) metav1.Condition { } } -// newAllReplicasPodsReadyConditions creates a condition when all pods of components are ready -func newAllReplicasPodsReadyConditions() metav1.Condition { - return metav1.Condition{ - Type: appsv1.ConditionTypeReplicasReady, - Status: metav1.ConditionTrue, - Message: "all pods of components are ready, waiting for the probe detection successful", - Reason: ReasonAllReplicasReady, - } -} - -// newReplicasNotReadyCondition creates a condition when pods of components are not ready -func newReplicasNotReadyCondition(notReadyComponentNames map[string]struct{}) metav1.Condition { - cNameSlice := maps.Keys(notReadyComponentNames) - slices.Sort(cNameSlice) - return metav1.Condition{ - Type: appsv1.ConditionTypeReplicasReady, - Status: metav1.ConditionFalse, - Message: fmt.Sprintf("pods are not ready in Components: %v, refer to related component message in Cluster.status.components", cNameSlice), - Reason: ReasonReplicasNotReady, - } -} - -// newClusterReadyCondition creates a condition when all components of cluster are running func newClusterReadyCondition(clusterName string) metav1.Condition { return metav1.Condition{ Type: appsv1.ConditionTypeReady, Status: metav1.ConditionTrue, - Message: fmt.Sprintf("Cluster: %s is ready, current phase is Running", clusterName), + Message: fmt.Sprintf("cluster %s is ready", clusterName), Reason: ReasonClusterReady, } } -// newComponentsNotReadyCondition creates a condition when components of cluster are not ready -func newComponentsNotReadyCondition(notReadyComponentNames map[string]struct{}) metav1.Condition { - cNameSlice := maps.Keys(notReadyComponentNames) - slices.Sort(cNameSlice) +func newClusterNotReadyCondition(clusterName string, kindNames map[string][]string) metav1.Condition { + messages := make([]string, 0) + for kind, names := range kindNames { + if len(names) > 0 { + slices.Sort(names) + messages = append(messages, fmt.Sprintf("unavailable %ss: %s", kind, strings.Join(names, ","))) + } + } return metav1.Condition{ Type: appsv1.ConditionTypeReady, Status: metav1.ConditionFalse, - Message: fmt.Sprintf("pods are unavailable in Components: %v, refer to related component message in Cluster.status.components", cNameSlice), + Message: fmt.Sprintf("cluster %s is NOT ready, %s", clusterName, strings.Join(messages, ",")), Reason: ReasonComponentsNotReady, } } diff --git a/controllers/apps/clusterdefinition_controller.go b/controllers/apps/clusterdefinition_controller.go index 935a5e4cf24..4b95cac6bef 100644 --- a/controllers/apps/clusterdefinition_controller.go +++ b/controllers/apps/clusterdefinition_controller.go @@ -27,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -178,19 +179,32 @@ func (r *ClusterDefinitionReconciler) reconcileTopologies(rctx intctrlutil.Reque } func (r *ClusterDefinitionReconciler) validateTopology(rctx intctrlutil.RequestCtx, topology appsv1.ClusterTopology) error { - if !checkUniqueItemWithValue(topology.Components, "Name", nil) { - return fmt.Errorf("duplicate topology component names") + if err := r.validateTopologyComponents(rctx, topology); err != nil { + return err + } + if err := r.validateTopologyShardings(rctx, topology); err != nil { + return err + } + if err := r.globalUniqueNameCheck(topology); err != nil { + return err } if topology.Orders != nil { if err := r.validateTopologyOrders(topology); err != nil { return err } } + return nil +} + +func (r *ClusterDefinitionReconciler) validateTopologyComponents(rctx intctrlutil.RequestCtx, topology appsv1.ClusterTopology) error { + if !checkUniqueItemWithValue(topology.Components, "Name", nil) { + return fmt.Errorf("duplicate topology component names") + } // validate topology reference component definitions name pattern for _, comp := range topology.Components { - if err := component.ValidateCompDefRegexp(comp.CompDef); err != nil { - return fmt.Errorf("invalid component definition reference pattern: %s", comp.CompDef) + if err := component.ValidateDefNameRegexp(comp.CompDef); err != nil { + return fmt.Errorf("invalid component definition reference: %s", comp.CompDef) } } @@ -206,34 +220,6 @@ func (r *ClusterDefinitionReconciler) validateTopology(rctx intctrlutil.RequestC return nil } -func (r *ClusterDefinitionReconciler) validateTopologyOrders(topology appsv1.ClusterTopology) error { - comps := make([]string, 0) - for _, comp := range topology.Components { - comps = append(comps, comp.Name) - } - slices.Sort(comps) - - validate := func(order []string) bool { - if len(order) == 0 { - return true - } - items := strings.Split(strings.Join(order, ","), ",") - slices.Sort(items) - return slices.Equal(items, comps) - } - - if !validate(topology.Orders.Provision) { - return fmt.Errorf("the components in provision orders are different from those in definition") - } - if !validate(topology.Orders.Terminate) { - return fmt.Errorf("the components in terminate orders are different from those in definition") - } - if !validate(topology.Orders.Update) { - return fmt.Errorf("the components in update orders are different from those in definition") - } - return nil -} - func (r *ClusterDefinitionReconciler) loadTopologyCompDefs(ctx context.Context, topology appsv1.ClusterTopology) (map[string][]*appsv1.ComponentDefinition, error) { compDefList := &appsv1.ComponentDefinitionList{} @@ -263,7 +249,112 @@ func (r *ClusterDefinitionReconciler) validateTopologyComponent(compDefs map[str comp appsv1.ClusterTopologyComponent) error { defs, ok := compDefs[comp.Name] if !ok || len(defs) == 0 { - return fmt.Errorf("there is no matched definitions found for the topology component %s", comp.Name) + return fmt.Errorf("there is no matched definitions found for the component %s", comp.Name) + } + return nil +} + +func (r *ClusterDefinitionReconciler) validateTopologyShardings(rctx intctrlutil.RequestCtx, topology appsv1.ClusterTopology) error { + if !checkUniqueItemWithValue(topology.Shardings, "Name", nil) { + return fmt.Errorf("duplicate topology sharding names") + } + + for _, sharding := range topology.Shardings { + if err := component.ValidateDefNameRegexp(sharding.ShardingDef); err != nil { + return fmt.Errorf("invalid sharding definition reference: %s", sharding.ShardingDef) + } + } + + shardingDefs, err := r.loadTopologyShardingDefs(rctx.Ctx, topology) + if err != nil { + return err + } + for _, sharding := range topology.Shardings { + if err := r.validateTopologySharding(shardingDefs, sharding); err != nil { + return err + } + } + return nil +} + +func (r *ClusterDefinitionReconciler) loadTopologyShardingDefs(ctx context.Context, + topology appsv1.ClusterTopology) (map[string][]*appsv1.ShardingDefinition, error) { + shardingDefList := &appsv1.ShardingDefinitionList{} + if err := r.Client.List(ctx, shardingDefList); err != nil { + return nil, err + } + + shardingDefs := map[string]*appsv1.ShardingDefinition{} + for i, item := range shardingDefList.Items { + shardingDefs[item.Name] = &shardingDefList.Items[i] + } + + result := make(map[string][]*appsv1.ShardingDefinition) + for _, sharding := range topology.Shardings { + defs := make([]*appsv1.ShardingDefinition, 0) + for shardingDefName := range shardingDefs { + if component.PrefixOrRegexMatched(shardingDefName, sharding.ShardingDef) { + defs = append(defs, shardingDefs[shardingDefName]) + } + } + result[sharding.Name] = defs + } + return result, nil +} + +func (r *ClusterDefinitionReconciler) validateTopologySharding(shardingDefs map[string][]*appsv1.ShardingDefinition, + sharding appsv1.ClusterTopologySharding) error { + defs, ok := shardingDefs[sharding.Name] + if !ok || len(defs) == 0 { + return fmt.Errorf("there is no matched definitions found for the sharding %s", sharding.Name) + } + return nil +} + +func (r *ClusterDefinitionReconciler) globalUniqueNameCheck(topology appsv1.ClusterTopology) error { + if len(topology.Components) == 0 || len(topology.Shardings) == 0 { + return nil + } + names := sets.New[string]() + for _, comp := range topology.Components { + names.Insert(comp.Name) + } + for _, sharding := range topology.Shardings { + if names.Has(sharding.Name) { + return fmt.Errorf("duplicate topology component and sharding names: %s", sharding.Name) + } + names.Insert(sharding.Name) + } + return nil +} + +func (r *ClusterDefinitionReconciler) validateTopologyOrders(topology appsv1.ClusterTopology) error { + entities := make([]string, 0) + for _, comp := range topology.Components { + entities = append(entities, comp.Name) + } + for _, sharding := range topology.Shardings { + entities = append(entities, sharding.Name) + } + slices.Sort(entities) + + validate := func(order []string) bool { + if len(order) == 0 { + return true + } + items := strings.Split(strings.Join(order, ","), ",") + slices.Sort(items) + return slices.Equal(items, entities) + } + + if !validate(topology.Orders.Provision) { + return fmt.Errorf("the components and shardings in provision orders are different from those in definition") + } + if !validate(topology.Orders.Terminate) { + return fmt.Errorf("the components and shardings in terminate orders are different from those in definition") + } + if !validate(topology.Orders.Update) { + return fmt.Errorf("the components and shardings in update orders are different from those in definition") } return nil } diff --git a/controllers/apps/clusterdefinition_controller_test.go b/controllers/apps/clusterdefinition_controller_test.go index e9e09dc1fc3..241bed304da 100644 --- a/controllers/apps/clusterdefinition_controller_test.go +++ b/controllers/apps/clusterdefinition_controller_test.go @@ -34,8 +34,9 @@ import ( var _ = Describe("ClusterDefinition Controller", func() { const ( - clusterDefName = "test-clusterdef" - compDefinitionName = "test-component-definition" + clusterDefName = "test-clusterdef" + compDefinitionName = "test-component-definition" + shardingDefinitionName = "test-sharding-definition" ) var ( @@ -55,6 +56,7 @@ var _ = Describe("ClusterDefinition Controller", func() { // resources should be released in following order // non-namespaced testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.ClusterDefinitionSignature, true, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.ShardingDefinitionSignature, true, ml) testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, intctrlutil.ComponentDefinitionSignature, true, ml) testapps.ClearResources(&testCtx, intctrlutil.ConfigConstraintSignature, ml) @@ -105,6 +107,38 @@ var _ = Describe("ClusterDefinition Controller", func() { Update: []string{"storage", "server", "proxy"}, }, } + multipleCompsNShardingTopology = appsv1.ClusterTopology{ + Name: "topo3", + Default: false, + Components: []appsv1.ClusterTopologyComponent{ + { + Name: "proxy", + CompDef: compDefinitionName, + }, + { + Name: "server", + CompDef: compDefinitionName, + }, + { + Name: "storage", + CompDef: compDefinitionName, + }, + }, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: "sharding-1", + ShardingDef: shardingDefinitionName, + }, + { + Name: "sharding-2", + ShardingDef: shardingDefinitionName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{"storage", "server", "proxy", "sharding-2", "sharding-1"}, + Update: []string{"storage", "server", "proxy", "sharding-2", "sharding-1"}, + }, + } ) BeforeEach(func() { @@ -120,10 +154,20 @@ var _ = Describe("ClusterDefinition Controller", func() { g.Expect(compDef.Status.Phase).Should(Equal(appsv1.AvailablePhase)) })).Should(Succeed()) + By("create a ShardingDefinition obj") + shardingDefObj := testapps.NewShardingDefinitionFactory(shardingDefinitionName, compDefinitionName). + Create(&testCtx). + GetObject() + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(shardingDefObj), func(g Gomega, shardingDef *appsv1.ShardingDefinition) { + g.Expect(shardingDef.Status.ObservedGeneration).Should(Equal(shardingDef.Generation)) + g.Expect(shardingDef.Status.Phase).Should(Equal(appsv1.AvailablePhase)) + })).Should(Succeed()) + By("Create a ClusterDefinition obj") clusterDefObj = testapps.NewClusterDefFactory(clusterDefName). AddClusterTopology(singleCompTopology). AddClusterTopology(multipleCompsTopology). + AddClusterTopology(multipleCompsNShardingTopology). Create(&testCtx). GetObject() Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(g Gomega, cd *appsv1.ClusterDefinition) { @@ -189,8 +233,42 @@ var _ = Describe("ClusterDefinition Controller", func() { })).Should(Succeed()) }) - It("different components in topology orders", func() { - By("update cd to add/remove components in orders") + It("duplicate topology sharding", func() { + By("update cd to set all sharding names as same") + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(cd *appsv1.ClusterDefinition) { + for i, topology := range cd.Spec.Topologies { + if len(topology.Shardings) == 0 { + continue + } + name := topology.Shardings[0].Name + for j := range topology.Shardings { + cd.Spec.Topologies[i].Shardings[j].Name = name + } + } + })()).Should(Succeed()) + + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(g Gomega, cd *appsv1.ClusterDefinition) { + g.Expect(cd.Status.ObservedGeneration).Should(Equal(cd.Generation)) + g.Expect(cd.Status.Phase).Should(Equal(appsv1.UnavailablePhase)) + g.Expect(cd.Status.Message).Should(ContainSubstring("duplicate topology sharding")) + })).Should(Succeed()) + }) + + It("duplicate topology component and sharding", func() { + By("update cd to set the name of one component and sharding as same") + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(cd *appsv1.ClusterDefinition) { + cd.Spec.Topologies[2].Shardings[0].Name = cd.Spec.Topologies[2].Components[0].Name + })()).Should(Succeed()) + + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(g Gomega, cd *appsv1.ClusterDefinition) { + g.Expect(cd.Status.ObservedGeneration).Should(Equal(cd.Generation)) + g.Expect(cd.Status.Phase).Should(Equal(appsv1.UnavailablePhase)) + g.Expect(cd.Status.Message).Should(ContainSubstring("duplicate topology component and sharding")) + })).Should(Succeed()) + }) + + It("different entities in topology orders", func() { + By("update cd to add/remove entities in orders") Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(cd *appsv1.ClusterDefinition) { for i := range cd.Spec.Topologies { update := func(orders []string) []string { @@ -200,7 +278,7 @@ var _ = Describe("ClusterDefinition Controller", func() { rand.Shuffle(len(orders), func(m, n int) { orders[m], orders[n] = orders[n], orders[m] }) - return append(orders[1:], "comp-non-exist") + return append(orders[1:], "entities-non-exist") } topology := cd.Spec.Topologies[i] if topology.Orders != nil { @@ -214,7 +292,7 @@ var _ = Describe("ClusterDefinition Controller", func() { Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(g Gomega, cd *appsv1.ClusterDefinition) { g.Expect(cd.Status.ObservedGeneration).Should(Equal(cd.Generation)) g.Expect(cd.Status.Phase).Should(Equal(appsv1.UnavailablePhase)) - g.Expect(cd.Status.Message).Should(MatchRegexp("the components in provision|terminate|update orders are different from those in definition")) + g.Expect(cd.Status.Message).Should(MatchRegexp("the components and shardings in provision|terminate|update orders are different from those in definition")) })).Should(Succeed()) }) @@ -227,7 +305,20 @@ var _ = Describe("ClusterDefinition Controller", func() { Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(g Gomega, cd *appsv1.ClusterDefinition) { g.Expect(cd.Status.ObservedGeneration).Should(Equal(cd.Generation)) g.Expect(cd.Status.Phase).Should(Equal(appsv1.UnavailablePhase)) - g.Expect(cd.Status.Message).Should(ContainSubstring("there is no matched definitions found for the topology component")) + g.Expect(cd.Status.Message).Should(ContainSubstring("there is no matched definitions found for the component")) + })).Should(Succeed()) + }) + + It("topology sharding has no matched definitions", func() { + By("update cd to set a non-exist shardingDef") + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(cd *appsv1.ClusterDefinition) { + cd.Spec.Topologies[2].Shardings[0].ShardingDef = "shardingdef-non-exist" + })()).Should(Succeed()) + + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), func(g Gomega, cd *appsv1.ClusterDefinition) { + g.Expect(cd.Status.ObservedGeneration).Should(Equal(cd.Generation)) + g.Expect(cd.Status.Phase).Should(Equal(appsv1.UnavailablePhase)) + g.Expect(cd.Status.Message).Should(ContainSubstring("there is no matched definitions found for the sharding")) })).Should(Succeed()) }) }) diff --git a/controllers/apps/componentdefinition_controller.go b/controllers/apps/componentdefinition_controller.go index 45ec237401b..4b6f8f903b9 100644 --- a/controllers/apps/componentdefinition_controller.go +++ b/controllers/apps/componentdefinition_controller.go @@ -241,7 +241,7 @@ func (r *ComponentDefinitionReconciler) validateVars(cli client.Client, rctx int if len(compDef) == 0 { continue } - if err := component.ValidateCompDefRegexp(compDef); err != nil { + if err := component.ValidateDefNameRegexp(compDef); err != nil { return errors.Wrapf(err, "invalid reference to component definition name pattern: %s", compDef) } } diff --git a/controllers/apps/componentversion_controller.go b/controllers/apps/componentversion_controller.go index 978db976609..57a57890a69 100644 --- a/controllers/apps/componentversion_controller.go +++ b/controllers/apps/componentversion_controller.go @@ -379,7 +379,7 @@ func (r *ComponentVersionReconciler) imageDefinedInActions(_ appsv1.ComponentDef func validateCompatibilityRulesCompDef(compVersion *appsv1.ComponentVersion) error { for _, rule := range compVersion.Spec.CompatibilityRules { for _, compDefName := range rule.CompDefs { - if err := component.ValidateCompDefRegexp(compDefName); err != nil { + if err := component.ValidateDefNameRegexp(compDefName); err != nil { return errors.Wrapf(err, "invalid reference to component definition name pattern: %s in compatibility rules", compDefName) } } diff --git a/controllers/apps/shardingdefinition_controller.go b/controllers/apps/shardingdefinition_controller.go index d220765c437..c847bf22479 100644 --- a/controllers/apps/shardingdefinition_controller.go +++ b/controllers/apps/shardingdefinition_controller.go @@ -24,8 +24,10 @@ import ( "encoding/json" "fmt" "hash/fnv" + "slices" "strings" + "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/rand" @@ -162,7 +164,7 @@ func (r *ShardingDefinitionReconciler) validateTemplate(ctx context.Context, cli shardingDef *appsv1.ShardingDefinition) error { template := shardingDef.Spec.Template - if err := component.ValidateCompDefRegexp(template.CompDef); err != nil { + if err := component.ValidateDefNameRegexp(template.CompDef); err != nil { return err } @@ -292,3 +294,62 @@ func (r *ShardingDefinitionReconciler) immutableHash(cli client.Client, rctx int shardingDef.Annotations[immutableHashAnnotationKey], _ = r.specHash(shardingDef) return cli.Patch(rctx.Ctx, shardingDef, patch) } + +// resolveShardingDefinition resolves and returns the specific sharding definition object supported. +func resolveShardingDefinition(ctx context.Context, cli client.Reader, shardingDefName string) (*appsv1.ShardingDefinition, error) { + shardingDefs, err := listShardingDefinitionsWithPattern(ctx, cli, shardingDefName) + if err != nil { + return nil, err + } + if len(shardingDefs) == 0 { + return nil, fmt.Errorf("no sharding definition found for the specified name: %s", shardingDefName) + } + + m := make(map[string]int) + for i, def := range shardingDefs { + m[def.Name] = i + } + // choose the latest one + names := maps.Keys(m) + slices.Sort(names) + latestName := names[len(names)-1] + + return shardingDefs[m[latestName]], nil +} + +// listShardingDefinitionsWithPattern returns all sharding definitions whose names match the given pattern +func listShardingDefinitionsWithPattern(ctx context.Context, cli client.Reader, name string) ([]*appsv1.ShardingDefinition, error) { + shardingDefList := &appsv1.ShardingDefinitionList{} + if err := cli.List(ctx, shardingDefList); err != nil { + return nil, err + } + fullyMatched := make([]*appsv1.ShardingDefinition, 0) + patternMatched := make([]*appsv1.ShardingDefinition, 0) + for i, item := range shardingDefList.Items { + if item.Name == name { + fullyMatched = append(fullyMatched, &shardingDefList.Items[i]) + } + if component.PrefixOrRegexMatched(item.Name, name) { + patternMatched = append(patternMatched, &shardingDefList.Items[i]) + } + } + if len(fullyMatched) > 0 { + return fullyMatched, nil + } + return patternMatched, nil +} + +func validateShardingShards(shardingDef *appsv1.ShardingDefinition, sharding *appsv1.ClusterSharding) error { + var ( + limit = shardingDef.Spec.ShardsLimit + shards = sharding.Shards + ) + if limit == nil || (shards >= limit.MinShards && shards <= limit.MaxShards) { + return nil + } + return shardsOutOfLimitError(sharding.Name, shards, *limit) +} + +func shardsOutOfLimitError(shardingName string, shards int32, limit appsv1.ShardsLimit) error { + return fmt.Errorf("shards %d out-of-limit [%d, %d], sharding: %s", shards, limit.MinShards, limit.MaxShards, shardingName) +} diff --git a/controllers/apps/transformer_cluster_api_normalization.go b/controllers/apps/transformer_cluster_api_normalization.go deleted file mode 100644 index 84f8f7aa07f..00000000000 --- a/controllers/apps/transformer_cluster_api_normalization.go +++ /dev/null @@ -1,263 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - - appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" - "github.com/apecloud/kubeblocks/pkg/controller/component" - "github.com/apecloud/kubeblocks/pkg/controller/graph" - "github.com/apecloud/kubeblocks/pkg/controller/model" - "github.com/apecloud/kubeblocks/pkg/controllerutil" -) - -// ClusterAPINormalizationTransformer handles cluster and component API conversion. -type ClusterAPINormalizationTransformer struct{} - -var _ graph.Transformer = &ClusterAPINormalizationTransformer{} - -func (t *ClusterAPINormalizationTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*clusterTransformContext) - cluster := transCtx.Cluster - if model.IsObjectDeleting(transCtx.OrigCluster) { - return nil - } - - var err error - defer func() { - setProvisioningStartedCondition(&cluster.Status.Conditions, cluster.Name, cluster.Generation, err) - }() - - if err = t.validateSpec(cluster); err != nil { - return err - } - - // build all component specs - transCtx.ComponentSpecs, err = t.buildCompSpecs(transCtx, cluster) - if err != nil { - return err - } - - // resolve all component definitions referenced - if err = t.resolveCompDefinitions(transCtx); err != nil { - return err - } - - // update the resolved component definitions and service versions to cluster spec. - t.updateCompSpecs(transCtx) - - return nil -} - -func (t *ClusterAPINormalizationTransformer) validateSpec(cluster *appsv1.Cluster) error { - if len(cluster.Spec.ShardingSpecs) == 0 { - return nil - } - shardCompNameMap := map[string]sets.Empty{} - for _, v := range cluster.Spec.ShardingSpecs { - shardCompNameMap[v.Name] = sets.Empty{} - } - for _, v := range cluster.Spec.ComponentSpecs { - if _, ok := shardCompNameMap[v.Name]; ok { - return fmt.Errorf(`duplicate component name "%s" in spec.shardingSpec`, v.Name) - } - } - return nil -} - -func (t *ClusterAPINormalizationTransformer) buildCompSpecs(transCtx *clusterTransformContext, - cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, error) { - if withClusterTopology(cluster) { - return t.buildCompSpecs4Topology(transCtx.ClusterDef, cluster) - } - if withClusterUserDefined(cluster) { - return t.buildCompSpecs4Specified(transCtx, cluster) - } - return nil, nil -} - -func (t *ClusterAPINormalizationTransformer) buildCompSpecs4Topology(clusterDef *appsv1.ClusterDefinition, - cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, error) { - newCompSpec := func(comp appsv1.ClusterTopologyComponent) *appsv1.ClusterComponentSpec { - return &appsv1.ClusterComponentSpec{ - Name: comp.Name, - ComponentDef: comp.CompDef, - } - } - - mergeCompSpec := func(comp appsv1.ClusterTopologyComponent, compSpec *appsv1.ClusterComponentSpec) *appsv1.ClusterComponentSpec { - if len(compSpec.ComponentDef) == 0 { - compSpec.ComponentDef = comp.CompDef - } - return compSpec - } - - clusterTopology := referredClusterTopology(clusterDef, cluster.Spec.Topology) - if clusterTopology == nil { - return nil, fmt.Errorf("referred cluster topology not found : %s", cluster.Spec.Topology) - } - - specifiedCompSpecs := make(map[string]*appsv1.ClusterComponentSpec) - for i, compSpec := range cluster.Spec.ComponentSpecs { - specifiedCompSpecs[compSpec.Name] = cluster.Spec.ComponentSpecs[i].DeepCopy() - } - - compSpecs := make([]*appsv1.ClusterComponentSpec, 0) - for i := range clusterTopology.Components { - comp := clusterTopology.Components[i] - if _, ok := specifiedCompSpecs[comp.Name]; ok { - compSpecs = append(compSpecs, mergeCompSpec(comp, specifiedCompSpecs[comp.Name])) - } else { - compSpecs = append(compSpecs, newCompSpec(comp)) - } - } - return compSpecs, nil -} - -func (t *ClusterAPINormalizationTransformer) buildCompSpecs4Specified(transCtx *clusterTransformContext, - cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, error) { - compSpecs := make([]*appsv1.ClusterComponentSpec, 0) - for i := range cluster.Spec.ComponentSpecs { - compSpecs = append(compSpecs, cluster.Spec.ComponentSpecs[i].DeepCopy()) - } - if cluster.Spec.ShardingSpecs != nil { - shardingCompSpecs, err := t.buildCompSpecs4Sharding(transCtx, cluster) - if err != nil { - return nil, err - } - compSpecs = append(compSpecs, shardingCompSpecs...) - } - return compSpecs, nil -} - -func (t *ClusterAPINormalizationTransformer) buildCompSpecs4Sharding(transCtx *clusterTransformContext, - cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, error) { - compSpecs := make([]*appsv1.ClusterComponentSpec, 0) - if transCtx.ShardingComponentSpecs == nil { - transCtx.ShardingComponentSpecs = make(map[string][]*appsv1.ClusterComponentSpec, 0) - } - for i, sharding := range cluster.Spec.ShardingSpecs { - shardingComps, err := controllerutil.GenShardingCompSpecList(transCtx.Context, transCtx.Client, cluster, &cluster.Spec.ShardingSpecs[i]) - if err != nil { - return nil, err - } - compSpecs = append(compSpecs, shardingComps...) - transCtx.ShardingComponentSpecs[sharding.Name] = shardingComps - } - return compSpecs, nil -} - -func (t *ClusterAPINormalizationTransformer) resolveCompDefinitions(transCtx *clusterTransformContext) error { - if transCtx.ComponentDefs == nil { - transCtx.ComponentDefs = make(map[string]*appsv1.ComponentDefinition) - } - for i, compSpec := range transCtx.ComponentSpecs { - compDef, serviceVersion, err := t.resolveCompDefinitionNServiceVersion(transCtx, compSpec) - if err != nil { - return err - } - transCtx.ComponentDefs[compDef.Name] = compDef - // set the componentDef and serviceVersion as resolved - transCtx.ComponentSpecs[i].ComponentDef = compDef.Name - transCtx.ComponentSpecs[i].ServiceVersion = serviceVersion - } - return nil -} - -func (t *ClusterAPINormalizationTransformer) resolveCompDefinitionNServiceVersion(transCtx *clusterTransformContext, - compSpec *appsv1.ClusterComponentSpec) (*appsv1.ComponentDefinition, string, error) { - var ( - ctx = transCtx.Context - cli = transCtx.Client - cluster = transCtx.Cluster - ) - comp := &appsv1.Component{} - err := cli.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: component.FullName(cluster.Name, compSpec.Name)}, comp) - if err != nil && !apierrors.IsNotFound(err) { - return nil, "", err - } - - if apierrors.IsNotFound(err) || t.checkCompUpgrade(compSpec, comp) { - return resolveCompDefinitionNServiceVersion(ctx, cli, compSpec.ComponentDef, compSpec.ServiceVersion) - } - return resolveCompDefinitionNServiceVersion(ctx, cli, comp.Spec.CompDef, comp.Spec.ServiceVersion) -} - -func (t *ClusterAPINormalizationTransformer) checkCompUpgrade(compSpec *appsv1.ClusterComponentSpec, comp *appsv1.Component) bool { - return compSpec.ServiceVersion != comp.Spec.ServiceVersion || compSpec.ComponentDef != comp.Spec.CompDef -} - -func (t *ClusterAPINormalizationTransformer) updateCompSpecs(transCtx *clusterTransformContext) { - if withClusterTopology(transCtx.Cluster) { - t.updateCompSpecs4Topology(transCtx) - } - if withClusterUserDefined(transCtx.Cluster) { - t.updateCompSpecs4Specified(transCtx) - } -} - -func (t *ClusterAPINormalizationTransformer) updateCompSpecs4Topology(transCtx *clusterTransformContext) { - var ( - cluster = transCtx.Cluster - ) - compSpecs := make([]appsv1.ClusterComponentSpec, 0) - for i := range transCtx.ComponentSpecs { - compSpecs = append(compSpecs, appsv1.ClusterComponentSpec{ - Name: transCtx.ComponentSpecs[i].Name, - ComponentDef: transCtx.ComponentSpecs[i].ComponentDef, - ServiceVersion: transCtx.ComponentSpecs[i].ServiceVersion, - }) - } - for i, compSpec := range cluster.Spec.ComponentSpecs { - for j := range compSpecs { - if compSpec.Name == compSpecs[j].Name { - compSpecs[j] = cluster.Spec.ComponentSpecs[i] - compSpecs[j].ComponentDef = transCtx.ComponentSpecs[j].ComponentDef - compSpecs[j].ServiceVersion = transCtx.ComponentSpecs[j].ServiceVersion - break - } - } - } - cluster.Spec.ComponentSpecs = compSpecs -} - -func (t *ClusterAPINormalizationTransformer) updateCompSpecs4Specified(transCtx *clusterTransformContext) { - var ( - resolvedCompSpecs = transCtx.ComponentSpecs - idx = 0 - cluster = transCtx.Cluster - ) - for i := range cluster.Spec.ComponentSpecs { - cluster.Spec.ComponentSpecs[i].ComponentDef = resolvedCompSpecs[i].ComponentDef - cluster.Spec.ComponentSpecs[i].ServiceVersion = resolvedCompSpecs[i].ServiceVersion - } - idx += len(cluster.Spec.ComponentSpecs) - - for i, sharding := range cluster.Spec.ShardingSpecs { - cluster.Spec.ShardingSpecs[i].Template.ComponentDef = resolvedCompSpecs[idx].ComponentDef - cluster.Spec.ShardingSpecs[i].Template.ServiceVersion = resolvedCompSpecs[idx].ServiceVersion - idx += int(sharding.Shards) - } -} diff --git a/controllers/apps/transformer_cluster_backup_policy.go b/controllers/apps/transformer_cluster_backup_policy.go index 9451d67f500..25e60d6b1ac 100644 --- a/controllers/apps/transformer_cluster_backup_policy.go +++ b/controllers/apps/transformer_cluster_backup_policy.go @@ -129,7 +129,7 @@ func (r *clusterBackupPolicyTransformer) Transform(ctx graph.TransformContext, d } transformComponentBackupPolicy := func(compSpec *appsv1.ClusterComponentSpec, componentName string, isSharding bool) error { - compDef := r.ComponentDefs[compSpec.ComponentDef] + compDef := r.componentDefs[compSpec.ComponentDef] if compDef == nil { return nil } @@ -159,9 +159,9 @@ func (r *clusterBackupPolicyTransformer) Transform(ctx graph.TransformContext, d return err } } - for i := range r.Cluster.Spec.ShardingSpecs { - shardingSpec := r.Cluster.Spec.ShardingSpecs[i] - if err := transformComponentBackupPolicy(&shardingSpec.Template, shardingSpec.Name, true); err != nil { + for i := range r.Cluster.Spec.Shardings { + spec := r.Cluster.Spec.Shardings[i] + if err := transformComponentBackupPolicy(&spec.Template, spec.Name, true); err != nil { return err } } diff --git a/controllers/apps/transformer_cluster_component.go b/controllers/apps/transformer_cluster_component.go index 7f5ef4842e9..2a22bf4f51c 100644 --- a/controllers/apps/transformer_cluster_component.go +++ b/controllers/apps/transformer_cluster_component.go @@ -20,6 +20,7 @@ along with this program. If not, see . package apps import ( + "context" "fmt" "reflect" "slices" @@ -30,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -40,7 +42,7 @@ import ( ictrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) -// clusterComponentTransformer transforms all cluster.Spec.ComponentSpecs to mapping Component objects +// clusterComponentTransformer transforms components and shardings to mapping Component objects type clusterComponentTransformer struct{} var _ graph.Transformer = &clusterComponentTransformer{} @@ -51,99 +53,95 @@ func (t *clusterComponentTransformer) Transform(ctx graph.TransformContext, dag return nil } - if len(transCtx.ComponentSpecs) == 0 { - return nil - } - - allCompsUpToDate, err := checkAllCompsUpToDate(transCtx, transCtx.Cluster) + updateToDate, err := checkAllCompsUpToDate(transCtx, transCtx.Cluster) if err != nil { return err } // if the cluster is not updating and all components are up-to-date, skip the reconciliation - if !transCtx.OrigCluster.IsUpdating() && allCompsUpToDate { + if !transCtx.OrigCluster.IsUpdating() && updateToDate { return nil } - return t.reconcileComponents(transCtx, dag) + return t.transform(transCtx, dag) } -func (t *clusterComponentTransformer) reconcileComponents(transCtx *clusterTransformContext, dag *graph.DAG) error { - cluster := transCtx.Cluster - - protoCompSpecMap := make(map[string]*appsv1.ClusterComponentSpec) - for _, compSpec := range transCtx.ComponentSpecs { - protoCompSpecMap[compSpec.Name] = compSpec - } - - protoCompSet := sets.KeySet(protoCompSpecMap) - runningCompSet, err := component.GetClusterComponentShortNameSet(transCtx.Context, transCtx.Client, cluster) +func (t *clusterComponentTransformer) transform(transCtx *clusterTransformContext, dag *graph.DAG) error { + runningSet, err := t.runningSet(transCtx) if err != nil { return err } + protoSet := t.protoSet(transCtx) - createCompSet, deleteCompSet, updateCompSet := setDiff(runningCompSet, protoCompSet) + createSet, deleteSet, updateSet := setDiff(runningSet, protoSet) - // component objects to be deleted (scale-in) - if err := deleteCompsInOrder(transCtx, dag, deleteCompSet, false); err != nil { + if err := deleteCompNShardingInOrder(transCtx, dag, deleteSet, pointer.Bool(true)); err != nil { return err } - // component objects to be updated var delayedErr error - if err := t.handleCompsUpdate(transCtx, dag, protoCompSpecMap, updateCompSet, transCtx.Annotations); err != nil { + if err := t.handleUpdate(transCtx, dag, updateSet); err != nil { if !ictrlutil.IsDelayedRequeueError(err) { return err } delayedErr = err } - // component objects to be created - if err := t.handleCompsCreate(transCtx, dag, protoCompSpecMap, createCompSet, transCtx.Annotations); err != nil { + if err := t.handleCreate(transCtx, dag, createSet); err != nil { return err } return delayedErr } -func (t *clusterComponentTransformer) handleCompsCreate(transCtx *clusterTransformContext, dag *graph.DAG, - protoCompSpecMap map[string]*appsv1.ClusterComponentSpec, createCompSet sets.Set[string], - protoCompAnnotationsMap map[string]map[string]string) error { - handler := newCompHandler(transCtx, protoCompSpecMap, protoCompAnnotationsMap, createOp) - return handleCompsInOrder(transCtx, dag, createCompSet, handler) +func (t *clusterComponentTransformer) runningSet(transCtx *clusterTransformContext) (sets.Set[string], error) { + return clusterRunningCompNShardingSet(transCtx.Context, transCtx.Client, transCtx.Cluster) +} + +func (t *clusterComponentTransformer) protoSet(transCtx *clusterTransformContext) sets.Set[string] { + names := sets.Set[string]{} + for _, comp := range transCtx.components { + names.Insert(comp.Name) + } + for _, sharding := range transCtx.shardings { + names.Insert(sharding.Name) + } + return names +} + +func (t *clusterComponentTransformer) handleCreate(transCtx *clusterTransformContext, dag *graph.DAG, createSet sets.Set[string]) error { + handler := newCompNShardingHandler(transCtx, createOp) + return handleCompNShardingInOrder(transCtx, dag, createSet, handler) } -func (t *clusterComponentTransformer) handleCompsUpdate(transCtx *clusterTransformContext, dag *graph.DAG, - protoCompSpecMap map[string]*appsv1.ClusterComponentSpec, updateCompSet sets.Set[string], - protoCompAnnotationsMap map[string]map[string]string) error { - handler := newCompHandler(transCtx, protoCompSpecMap, protoCompAnnotationsMap, updateOp) - return handleCompsInOrder(transCtx, dag, updateCompSet, handler) +func (t *clusterComponentTransformer) handleUpdate(transCtx *clusterTransformContext, dag *graph.DAG, updateSet sets.Set[string]) error { + handler := newCompNShardingHandler(transCtx, updateOp) + return handleCompNShardingInOrder(transCtx, dag, updateSet, handler) } -func deleteCompsInOrder(transCtx *clusterTransformContext, dag *graph.DAG, deleteCompSet sets.Set[string], terminate bool) error { - handler := newCompHandler(transCtx, nil, nil, deleteOp) - if h, ok := handler.(*parallelDeleteCompHandler); ok { - h.terminate = terminate +func deleteCompNShardingInOrder(transCtx *clusterTransformContext, dag *graph.DAG, deleteSet sets.Set[string], scaleIn *bool) error { + handler := newCompNShardingHandler(transCtx, deleteOp) + if h, ok := handler.(*clusterParallelHandler); ok { + h.scaleIn = scaleIn } - if h, ok := handler.(*orderedDeleteCompHandler); ok { - h.terminate = terminate + if h, ok := handler.(*orderedDeleteHandler); ok { + h.scaleIn = scaleIn } - return handleCompsInOrder(transCtx, dag, deleteCompSet, handler) + return handleCompNShardingInOrder(transCtx, dag, deleteSet, handler) } -func handleCompsInOrder(transCtx *clusterTransformContext, dag *graph.DAG, - compNameSet sets.Set[string], handler compConditionalHandler) error { +func handleCompNShardingInOrder(transCtx *clusterTransformContext, dag *graph.DAG, nameSet sets.Set[string], handler clusterConditionalHandler) error { unmatched := "" - for _, compName := range handler.ordered(sets.List(compNameSet)) { - ok, err := handler.match(transCtx, dag, compName) + for _, name := range handler.ordered(sets.List(nameSet)) { + ok, err := handler.match(transCtx, dag, name) if err != nil { return err } if !ok { - unmatched = compName + unmatched = name break } - if err = handler.handle(transCtx, dag, compName); err != nil { + if err = handler.handle(transCtx, dag, name); err != nil { return err } } @@ -159,7 +157,7 @@ func checkAllCompsUpToDate(transCtx *clusterTransformContext, cluster *appsv1.Cl if err := transCtx.Client.List(transCtx.Context, compList, client.InNamespace(cluster.Namespace), client.MatchingLabels(labels)); err != nil { return false, err } - if len(compList.Items) != len(transCtx.ComponentSpecs) { + if len(compList.Items) != transCtx.total() { return false, nil } for _, comp := range compList.Items { @@ -174,19 +172,6 @@ func checkAllCompsUpToDate(transCtx *clusterTransformContext, cluster *appsv1.Cl return true, nil } -// getRunningCompObject gets the component object from cache snapshot -func getRunningCompObject(transCtx *clusterTransformContext, cluster *appsv1.Cluster, compName string) (*appsv1.Component, error) { - compKey := types.NamespacedName{ - Namespace: cluster.Namespace, - Name: component.FullName(cluster.Name, compName), - } - comp := &appsv1.Component{} - if err := transCtx.Client.Get(transCtx.Context, compKey, comp); err != nil { - return nil, err - } - return comp, nil -} - // copyAndMergeComponent merges two component objects for updating: // 1. new a component object targetCompObj by copying from oldCompObj // 2. merge all fields can be updated from newCompObj into targetCompObj @@ -238,19 +223,18 @@ const ( updateOp int = 2 ) -func newCompHandler(transCtx *clusterTransformContext, compSpecs map[string]*appsv1.ClusterComponentSpec, - annotations map[string]map[string]string, op int) compConditionalHandler { +func newCompNShardingHandler(transCtx *clusterTransformContext, op int) clusterConditionalHandler { orders := definedOrders(transCtx, op) if len(orders) == 0 { - return newParallelHandler(compSpecs, annotations, op) + return newParallelHandler(op) } - return newOrderedHandler(compSpecs, annotations, orders, op) + return newOrderedHandler(orders, op) } func definedOrders(transCtx *clusterTransformContext, op int) []string { var ( cluster = transCtx.Cluster - clusterDef = transCtx.ClusterDef + clusterDef = transCtx.clusterDef ) if len(cluster.Spec.Topology) != 0 && clusterDef != nil { for _, topology := range clusterDef.Spec.Topologies { @@ -264,7 +248,7 @@ func definedOrders(transCtx *clusterTransformContext, op int) []string { case updateOp: return topology.Orders.Update default: - panic("runtime error: unknown component op: " + strconv.Itoa(op)) + panic("runtime error: unknown operation: " + strconv.Itoa(op)) } } } @@ -273,127 +257,104 @@ func definedOrders(transCtx *clusterTransformContext, op int) []string { return nil } -func newParallelHandler(compSpecs map[string]*appsv1.ClusterComponentSpec, - annotations map[string]map[string]string, op int) compConditionalHandler { - switch op { - case createOp: - return ¶llelCreateCompHandler{ - createCompHandler: createCompHandler{ - compSpecs: compSpecs, - annotations: annotations, - }, - } - case deleteOp: - return ¶llelDeleteCompHandler{} - case updateOp: - return ¶llelUpdateCompHandler{ - updateCompHandler: updateCompHandler{ - compSpecs: compSpecs, - annotations: annotations, - }, +func newParallelHandler(op int) clusterConditionalHandler { + if op == createOp || op == deleteOp || op == updateOp { + return &clusterParallelHandler{ + clusterCompNShardingHandler: clusterCompNShardingHandler{op: op}, } - default: - panic("runtime error: unknown component op: " + strconv.Itoa(op)) } + panic("runtime error: unknown operation: " + strconv.Itoa(op)) } -func newOrderedHandler(compSpecs map[string]*appsv1.ClusterComponentSpec, - annotations map[string]map[string]string, orders []string, op int) compConditionalHandler { - upworking := func(comp *appsv1.Component) bool { - target := appsv1.RunningClusterCompPhase - if comp.Spec.Stop != nil && *comp.Spec.Stop { - target = appsv1.StoppedClusterCompPhase - } - return comp.Status.Phase == target - } +func newOrderedHandler(orders []string, op int) clusterConditionalHandler { switch op { - case createOp: - return &orderedCreateCompHandler{ - compOrderedOrder: compOrderedOrder{ - orders: orders, - }, - compPhasePrecondition: compPhasePrecondition{ - orders: orders, - phaseExpectation: upworking, - }, - createCompHandler: createCompHandler{ - compSpecs: compSpecs, - annotations: annotations, - }, + case createOp, updateOp: + return &orderedCreateNUpdateHandler{ + clusterOrderedOrder: clusterOrderedOrder{orders: orders}, + phasePrecondition: phasePrecondition{orders: orders}, + clusterCompNShardingHandler: clusterCompNShardingHandler{op: op}, } case deleteOp: - return &orderedDeleteCompHandler{ - compOrderedOrder: compOrderedOrder{ - orders: orders, - }, - compNotExistPrecondition: compNotExistPrecondition{ - orders: orders, - }, - deleteCompHandler: deleteCompHandler{}, - } - case updateOp: - return &orderedUpdateCompHandler{ - compOrderedOrder: compOrderedOrder{ - orders: orders, - }, - compPhasePrecondition: compPhasePrecondition{ - orders: orders, - phaseExpectation: upworking, - }, - updateCompHandler: updateCompHandler{ - compSpecs: compSpecs, - annotations: annotations, - }, + return &orderedDeleteHandler{ + clusterOrderedOrder: clusterOrderedOrder{orders: orders}, + notExistPrecondition: notExistPrecondition{orders: orders}, + clusterCompNShardingHandler: clusterCompNShardingHandler{op: op}, } default: - panic("runtime error: unknown component op: " + strconv.Itoa(op)) + panic("runtime error: unknown operation: " + strconv.Itoa(op)) } } -type compConditionalHandler interface { +type clusterConditionalHandler interface { ordered([]string) []string - match(transCtx *clusterTransformContext, dag *graph.DAG, compName string) (bool, error) - handle(transCtx *clusterTransformContext, dag *graph.DAG, compName string) error + match(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) + handle(transCtx *clusterTransformContext, dag *graph.DAG, name string) error } -type compParallelOrder struct{} +type clusterParallelOrder struct{} -func (o *compParallelOrder) ordered(compNames []string) []string { - return compNames +func (o *clusterParallelOrder) ordered(names []string) []string { + return names } -type compOrderedOrder struct { +type clusterOrderedOrder struct { orders []string } -func (o *compOrderedOrder) ordered(compNames []string) []string { +func (o *clusterOrderedOrder) ordered(names []string) []string { result := make([]string, 0) for _, order := range o.orders { comps := strings.Split(order, ",") - for _, comp := range compNames { + for _, comp := range names { if slices.Index(comps, comp) >= 0 { result = append(result, comp) } } } - if len(result) != len(compNames) { - panic("runtime error: cannot find order for components " + strings.Join(compNames, ",")) + if len(result) != len(names) { + panic("runtime error: cannot find order for components and shardings " + strings.Join(names, ",")) } return result } -type compDummyPrecondition struct{} +type dummyPrecondition struct{} -func (c *compDummyPrecondition) match(*clusterTransformContext, *graph.DAG, string) (bool, error) { +func (c *dummyPrecondition) match(*clusterTransformContext, *graph.DAG, string) (bool, error) { return true, nil } -type compNotExistPrecondition struct { +type notExistPrecondition struct { orders []string } -func (c *compNotExistPrecondition) match(transCtx *clusterTransformContext, dag *graph.DAG, compName string) (bool, error) { - get := func(compKey types.NamespacedName) (bool, error) { +func (c *notExistPrecondition) match(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + for _, predecessor := range predecessors(c.orders, name) { + exist, err := c.predecessorExist(transCtx, dag, predecessor) + if err != nil { + return false, err + } + if exist { + return false, nil + } + } + return true, nil +} + +func (c *notExistPrecondition) predecessorExist(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + if transCtx.sharding(name) { + return c.shardingExist(transCtx, dag, name) + } + return c.compExist(transCtx, dag, name) +} + +func (c *notExistPrecondition) compExist(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + var ( + compKey = types.NamespacedName{ + Namespace: transCtx.Cluster.Namespace, + Name: component.FullName(transCtx.Cluster.Name, name), + } + ) + get := func() (bool, error) { comp := &appsv1.Component{} err := transCtx.Client.Get(transCtx.Context, compKey, comp) if err != nil && !apierrors.IsNotFound(err) { @@ -401,7 +362,7 @@ func (c *compNotExistPrecondition) match(transCtx *clusterTransformContext, dag } return err == nil, nil } - dagCreate := func(compKey types.NamespacedName) bool { + dagCreate := func() bool { graphCli, _ := transCtx.Client.(model.GraphClient) comp := &appsv1.Component{ ObjectMeta: metav1.ObjectMeta{ @@ -411,199 +372,491 @@ func (c *compNotExistPrecondition) match(transCtx *clusterTransformContext, dag } return graphCli.IsAction(dag, comp, model.ActionCreatePtr()) } - for _, predecessor := range predecessors(c.orders, compName) { - compKey := types.NamespacedName{ - Namespace: transCtx.Cluster.Namespace, - Name: component.FullName(transCtx.Cluster.Name, predecessor), - } - exist, err := get(compKey) + + exist, err := get() + if err != nil { + return false, err + } + if exist { + return true, nil + } + if dagCreate() { + return true, nil + } + return false, nil +} + +func (c *notExistPrecondition) shardingExist(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + list := func() (bool, error) { + comps, err := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) if err != nil { return false, err } - if exist { - return false, nil + return len(comps) > 0, nil + } + dagCreate := func() bool { + graphCli, _ := transCtx.Client.(model.GraphClient) + for _, obj := range graphCli.FindAll(dag, &appsv1.Component{}) { + if shardingCompWithName(obj.(*appsv1.Component), name) && + graphCli.IsAction(dag, obj, model.ActionCreatePtr()) { + return true + } } - if dagCreate(compKey) { + return false + } + + exist, err := list() + if err != nil { + return false, err + } + if exist { + return true, nil + } + if dagCreate() { + return true, nil + } + return false, nil +} + +type phasePrecondition struct { + orders []string +} + +func (c *phasePrecondition) match(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + for _, predecessor := range predecessors(c.orders, name) { + match, err := c.predecessorMatch(transCtx, dag, predecessor) + if err != nil { + return false, err + } + if !match { return false, nil } } return true, nil } -type compPhasePrecondition struct { - orders []string - phaseExpectation func(component2 *appsv1.Component) bool +func (c *phasePrecondition) predecessorMatch(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + if transCtx.sharding(name) { + return c.shardingMatch(transCtx, dag, name) + } + return c.compMatch(transCtx, dag, name) } -func (c *compPhasePrecondition) match(transCtx *clusterTransformContext, dag *graph.DAG, compName string) (bool, error) { - dagGet := func(compKey types.NamespacedName) bool { +func (c *phasePrecondition) compMatch(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + var ( + compKey = types.NamespacedName{ + Namespace: transCtx.Cluster.Namespace, + Name: component.FullName(transCtx.Cluster.Name, name), + } + ) + dagGet := func() bool { graphCli, _ := transCtx.Client.(model.GraphClient) for _, obj := range graphCli.FindAll(dag, &appsv1.Component{}) { if client.ObjectKeyFromObject(obj) == compKey { - return true + return true // TODO: should check the action? } } return false } - for _, predecessor := range predecessors(c.orders, compName) { - comp := &appsv1.Component{} - compKey := types.NamespacedName{ - Namespace: transCtx.Cluster.Namespace, - Name: component.FullName(transCtx.Cluster.Name, predecessor), - } - if err := transCtx.Client.Get(transCtx.Context, compKey, comp); err != nil { - return false, client.IgnoreNotFound(err) - } - if comp.Generation != comp.Status.ObservedGeneration || !c.phaseExpectation(comp) { - return false, nil + + comp := &appsv1.Component{} + if err := transCtx.Client.Get(transCtx.Context, compKey, comp); err != nil { + return false, client.IgnoreNotFound(err) + } + if !c.expected(comp) { + return false, nil + } + // create or update in DAG? + if dagGet() { + return false, nil + } + return true, nil +} + +func (c *phasePrecondition) shardingMatch(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { + dagList := func() bool { + graphCli, _ := transCtx.Client.(model.GraphClient) + for _, obj := range graphCli.FindAll(dag, &appsv1.Component{}) { + if shardingCompWithName(obj.(*appsv1.Component), name) { + return true // TODO: should check the action? + } } - // create or update if exists in DAG - if dagGet(compKey) { + return false + } + + protoComps, ok := transCtx.shardingComps[name] + if !ok { + return false, fmt.Errorf("cluster sharding %s not found", name) + } + + comps, err := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + if err != nil { + return false, err + } + if len(comps) != len(protoComps) { + return false, nil + } + for _, comp := range comps { + if !c.expected(&comp) { return false, nil } } + // create or update in DAG? + if dagList() { + return false, nil + } return true, nil } -func predecessors(orders []string, compName string) []string { - var previous []string - for _, comps := range orders { - compNames := strings.Split(comps, ",") - if index := slices.Index(compNames, compName); index >= 0 { - return previous +func (c *phasePrecondition) expected(comp *appsv1.Component) bool { + if comp.Generation == comp.Status.ObservedGeneration { + expect := appsv1.RunningClusterCompPhase + if comp.Spec.Stop != nil && *comp.Spec.Stop { + expect = appsv1.StoppedClusterCompPhase } - previous = compNames + return comp.Status.Phase == expect } - panic("runtime error: cannot find predecessor for component " + compName) + return false } -type createCompHandler struct { - compSpecs map[string]*appsv1.ClusterComponentSpec - annotations map[string]map[string]string +type clusterCompNShardingHandler struct { + op int + scaleIn *bool } -func (h *createCompHandler) handle(transCtx *clusterTransformContext, dag *graph.DAG, compName string) error { - cluster := transCtx.Cluster - graphCli, _ := transCtx.Client.(model.GraphClient) - comp, err := component.BuildComponentExt(cluster, h.compSpecs[compName], shardingNameFromComp(transCtx, compName), h.annotations[compName]) - if err != nil { - return err +func (h *clusterCompNShardingHandler) handle(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + if transCtx.sharding(name) { + handler := &clusterShardingHandler{scaleIn: h.scaleIn} + switch h.op { + case createOp: + return handler.create(transCtx, dag, name) + case deleteOp: + return handler.delete(transCtx, dag, name) + default: + return handler.update(transCtx, dag, name) + } + } else { + handler := &clusterComponentHandler{} + switch h.op { + case createOp: + return handler.create(transCtx, dag, name) + case deleteOp: + return handler.delete(transCtx, dag, name) + default: + return handler.update(transCtx, dag, name) + } } - graphCli.Create(dag, comp) - h.initClusterCompStatus(cluster, compName) - return nil } -func (h *createCompHandler) initClusterCompStatus(cluster *appsv1.Cluster, compName string) { - if cluster.Status.Components == nil { - cluster.Status.Components = make(map[string]appsv1.ClusterComponentStatus) +func predecessors(orders []string, name string) []string { + var previous []string + for _, order := range orders { + names := strings.Split(order, ",") + if index := slices.Index(names, name); index >= 0 { + return previous + } + previous = names } - cluster.Status.Components[compName] = appsv1.ClusterComponentStatus{} + panic("runtime error: cannot find predecessor for component or sharding " + name) +} + +type clusterParallelHandler struct { + clusterParallelOrder + dummyPrecondition + clusterCompNShardingHandler } -type deleteCompHandler struct { - terminate bool // vs scale-in +type orderedCreateNUpdateHandler struct { + clusterOrderedOrder + phasePrecondition + clusterCompNShardingHandler } -func (h *deleteCompHandler) handle(transCtx *clusterTransformContext, dag *graph.DAG, compName string) error { - cluster := transCtx.Cluster +type orderedDeleteHandler struct { + clusterOrderedOrder + notExistPrecondition + clusterCompNShardingHandler +} + +func setDiff(s1, s2 sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) { + return s2.Difference(s1), s1.Difference(s2), s1.Intersection(s2) +} + +func mapDiff[T interface{}](m1, m2 map[string]T) (sets.Set[string], sets.Set[string], sets.Set[string]) { + s1, s2 := sets.KeySet(m1), sets.KeySet(m2) + return setDiff(s1, s2) +} + +type clusterComponentHandler struct{} + +func (h *clusterComponentHandler) create(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + proto, err := h.protoComp(transCtx, name) + if err != nil { + return err + } graphCli, _ := transCtx.Client.(model.GraphClient) - comp, err := getRunningCompObject(transCtx, cluster, compName) + graphCli.Create(dag, proto) + + // initClusterCompNShardingStatus(transCtx, name) + + return nil +} + +func (h *clusterComponentHandler) delete(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + comp, err := h.runningComp(transCtx, name) if err != nil && !apierrors.IsNotFound(err) { return err } if apierrors.IsNotFound(err) || model.IsObjectDeleting(comp) { return nil } + transCtx.Logger.Info(fmt.Sprintf("deleting component %s", comp.Name)) - deleteCompVertex := graphCli.Do(dag, nil, comp, model.ActionDeletePtr(), nil) - if !h.terminate { // scale-in - compCopy := comp.DeepCopy() - if comp.Annotations == nil { - comp.Annotations = make(map[string]string) - } - // update the scale-in annotation to component before deleting - comp.Annotations[constant.ComponentScaleInAnnotationKey] = trueVal - graphCli.Do(dag, compCopy, comp, model.ActionUpdatePtr(), deleteCompVertex) + graphCli, _ := transCtx.Client.(model.GraphClient) + graphCli.Delete(dag, comp) + + return nil +} + +func (h *clusterComponentHandler) update(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + running, err1 := h.runningComp(transCtx, name) + if err1 != nil { + return err1 + } + proto, err2 := h.protoComp(transCtx, name) + if err2 != nil { + return err2 + } + + if obj := copyAndMergeComponent(running, proto); obj != nil { + graphCli, _ := transCtx.Client.(model.GraphClient) + graphCli.Update(dag, running, obj) } return nil } -type updateCompHandler struct { - compSpecs map[string]*appsv1.ClusterComponentSpec - annotations map[string]map[string]string +func (h *clusterComponentHandler) runningComp(transCtx *clusterTransformContext, name string) (*appsv1.Component, error) { + compKey := types.NamespacedName{ + Namespace: transCtx.Cluster.Namespace, + Name: component.FullName(transCtx.Cluster.Name, name), + } + comp := &appsv1.Component{} + if err := transCtx.Client.Get(transCtx.Context, compKey, comp); err != nil { + return nil, err + } + return comp, nil } -func (h *updateCompHandler) handle(transCtx *clusterTransformContext, dag *graph.DAG, compName string) error { - cluster := transCtx.Cluster +func (h *clusterComponentHandler) protoComp(transCtx *clusterTransformContext, name string) (*appsv1.Component, error) { + for _, comp := range transCtx.components { + if comp.Name == name { + return component.BuildComponent(transCtx.Cluster, comp, nil, nil) + } + } + return nil, fmt.Errorf("cluster component %s not found", name) +} + +type clusterShardingHandler struct { + scaleIn *bool +} + +func (h *clusterShardingHandler) create(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + protoComps, err := h.protoComps(transCtx, name) + if err != nil { + return err + } graphCli, _ := transCtx.Client.(model.GraphClient) - runningComp, getErr := getRunningCompObject(transCtx, cluster, compName) - if getErr != nil { - return getErr + for i := range protoComps { + graphCli.Create(dag, protoComps[i]) } - comp, buildErr := component.BuildComponentExt(cluster, h.compSpecs[compName], shardingNameFromComp(transCtx, compName), h.annotations[compName]) - if buildErr != nil { - return buildErr + + // initClusterCompNShardingStatus(transCtx, name) + + // TODO: + // 1. sharding post-provision + // 2. provision strategy + + return nil +} + +func (h *clusterShardingHandler) delete(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + runningComps, err := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + if err != nil { + return err } - if newCompObj := copyAndMergeComponent(runningComp, comp); newCompObj != nil { - graphCli.Update(dag, runningComp, newCompObj) + + // TODO: sharding pre-terminate + + graphCli, _ := transCtx.Client.(model.GraphClient) + for i := range runningComps { + h.deleteComp(transCtx, graphCli, dag, &runningComps[i], nil) } + return nil } -type parallelCreateCompHandler struct { - compParallelOrder - compDummyPrecondition - createCompHandler +func (h *clusterShardingHandler) deleteComp(transCtx *clusterTransformContext, + graphCli model.GraphClient, dag *graph.DAG, comp *appsv1.Component, scaleIn *bool) { + if !model.IsObjectDeleting(comp) { + transCtx.Logger.Info(fmt.Sprintf("deleting sharding component %s", comp.Name)) + + vertex := graphCli.Do(dag, nil, comp, model.ActionDeletePtr(), nil) + if scaleIn != nil && *scaleIn { + compCopy := comp.DeepCopy() + if comp.Annotations == nil { + compCopy.Annotations = make(map[string]string) + } + compCopy.Annotations[constant.ComponentScaleInAnnotationKey] = trueVal + graphCli.Do(dag, compCopy, comp, model.ActionUpdatePtr(), vertex) + } + } } -type parallelDeleteCompHandler struct { - compParallelOrder - compDummyPrecondition - deleteCompHandler +func (h *clusterShardingHandler) update(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { + runningComps, err1 := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + if err1 != nil { + return err1 + } + + runningCompsMap := make(map[string]*appsv1.Component) + for i, comp := range runningComps { + runningCompsMap[comp.Name] = &runningComps[i] + } + + protoComps, err2 := h.protoComps(transCtx, name) + if err2 != nil { + return err2 + } + protoCompsMap := make(map[string]*appsv1.Component) + for i, comp := range protoComps { + protoCompsMap[comp.Name] = protoComps[i] + } + + toCreate, toDelete, toUpdate := mapDiff(runningCompsMap, protoCompsMap) + + // TODO: update strategy + + h.deleteComps(transCtx, dag, runningCompsMap, toDelete) + h.updateComps(transCtx, dag, runningCompsMap, protoCompsMap, toUpdate) + h.createComps(transCtx, dag, protoCompsMap, toCreate) + + return nil } -type parallelUpdateCompHandler struct { - compParallelOrder - compDummyPrecondition - updateCompHandler +func (h *clusterShardingHandler) createComps(transCtx *clusterTransformContext, dag *graph.DAG, + protoComps map[string]*appsv1.Component, createSet sets.Set[string]) { + graphCli, _ := transCtx.Client.(model.GraphClient) + for name := range createSet { + graphCli.Create(dag, protoComps[name]) + // TODO: shard post-provision + } } -type orderedCreateCompHandler struct { - compOrderedOrder - compPhasePrecondition - createCompHandler +func (h *clusterShardingHandler) deleteComps(transCtx *clusterTransformContext, dag *graph.DAG, + runningComps map[string]*appsv1.Component, deleteSet sets.Set[string]) { + graphCli, _ := transCtx.Client.(model.GraphClient) + for name := range deleteSet { + // TODO: shard pre-terminate + h.deleteComp(transCtx, graphCli, dag, runningComps[name], h.scaleIn) + } } -type orderedDeleteCompHandler struct { - compOrderedOrder - compNotExistPrecondition - deleteCompHandler +func (h *clusterShardingHandler) updateComps(transCtx *clusterTransformContext, dag *graph.DAG, + runningComps map[string]*appsv1.Component, protoComps map[string]*appsv1.Component, updateSet sets.Set[string]) { + graphCli, _ := transCtx.Client.(model.GraphClient) + for name := range updateSet { + running, proto := runningComps[name], protoComps[name] + if obj := copyAndMergeComponent(running, proto); obj != nil { + graphCli.Update(dag, running, obj) + } + } } -type orderedUpdateCompHandler struct { - compOrderedOrder - compPhasePrecondition - updateCompHandler +func (h *clusterShardingHandler) protoComps(transCtx *clusterTransformContext, name string) ([]*appsv1.Component, error) { + build := func(sharding *appsv1.ClusterSharding) ([]*appsv1.Component, error) { + labels := map[string]string{ + constant.KBAppShardingNameLabelKey: sharding.Name, + } + if len(sharding.ShardingDef) > 0 { + labels[constant.ShardingDefLabelKey] = sharding.ShardingDef + } + + objs := make([]*appsv1.Component, 0) + + shardingComps := transCtx.shardingComps[sharding.Name] + for i := range shardingComps { + spec := shardingComps[i] + var annotations map[string]string + if transCtx.annotations != nil { + annotations = transCtx.annotations[spec.Name] + } + obj, err := component.BuildComponent(transCtx.Cluster, spec, labels, annotations) + if err != nil { + return nil, err + } + objs = append(objs, obj) + } + return objs, nil + } + + for _, sharding := range transCtx.shardings { + if sharding.Name == name { + return build(sharding) + } + } + return nil, fmt.Errorf("cluster sharding %s not found", name) } -func shardingNameFromComp(transCtx *clusterTransformContext, compName string) string { - equal := func(spec *appsv1.ClusterComponentSpec) bool { - return spec.Name == compName +// func initClusterCompNShardingStatus(transCtx *clusterTransformContext, name string) { +// var ( +// cluster = transCtx.Cluster +// ) +// m := &cluster.Status.Components +// if transCtx.sharding(name) { +// m = &cluster.Status.Shardings +// } +// if *m == nil { +// *m = make(map[string]appsv1.ClusterComponentStatus) +// } +// (*m)[name] = appsv1.ClusterComponentStatus{} +// } + +func clusterRunningCompNShardingSet(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster) (sets.Set[string], error) { + compList := &appsv1.ComponentList{} + ml := client.MatchingLabels{constant.AppInstanceLabelKey: cluster.Name} + if err := cli.List(ctx, compList, client.InNamespace(cluster.Namespace), ml); err != nil { + return nil, err } - for shardingName, shardingComps := range transCtx.ShardingComponentSpecs { - if slices.IndexFunc(shardingComps, equal) >= 0 { - return shardingName + + names := sets.Set[string]{} + for _, comp := range compList.Items { + if shardingName := shardingCompNName(&comp); len(shardingName) > 0 { + names.Insert(shardingName) + } else { + name, err := component.ShortName(cluster.Name, comp.Name) + if err != nil { + return nil, err + } + names.Insert(name) } } - return "" + return names, nil } -func setDiff(s1, s2 sets.Set[string]) (sets.Set[string], sets.Set[string], sets.Set[string]) { - return s2.Difference(s1), s1.Difference(s2), s1.Intersection(s2) +func shardingCompWithName(comp *appsv1.Component, shardingName string) bool { + if comp == nil || comp.Labels == nil { + return false + } + name, ok := comp.Labels[constant.KBAppShardingNameLabelKey] + return ok && name == shardingName } -func mapDiff[T interface{}](m1, m2 map[string]T) (sets.Set[string], sets.Set[string], sets.Set[string]) { - s1, s2 := sets.KeySet(m1), sets.KeySet(m2) - return setDiff(s1, s2) +func shardingCompNName(comp *appsv1.Component) string { + if comp != nil && comp.Labels != nil { + name, ok := comp.Labels[constant.KBAppShardingNameLabelKey] + if ok { + return name + } + } + return "" } diff --git a/controllers/apps/transformer_cluster_component_status.go b/controllers/apps/transformer_cluster_component_status.go index c16db85dea7..af94c39eeec 100644 --- a/controllers/apps/transformer_cluster_component_status.go +++ b/controllers/apps/transformer_cluster_component_status.go @@ -22,17 +22,19 @@ package apps import ( "fmt" + "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" ) -// clusterComponentStatusTransformer transforms all cluster components' status. +// clusterComponentStatusTransformer transforms cluster components' status. type clusterComponentStatusTransformer struct{} var _ graph.Transformer = &clusterComponentStatusTransformer{} @@ -43,95 +45,231 @@ func (t *clusterComponentStatusTransformer) Transform(ctx graph.TransformContext return nil } - // has no components defined - if len(transCtx.ComponentSpecs) == 0 || !transCtx.OrigCluster.IsStatusUpdating() { + if !transCtx.OrigCluster.IsStatusUpdating() { return nil } - return t.reconcileComponentsStatus(transCtx) + return t.transform(transCtx) } -func (t *clusterComponentStatusTransformer) reconcileComponentsStatus(transCtx *clusterTransformContext) error { - cluster := transCtx.Cluster - if cluster.Status.Components == nil { - cluster.Status.Components = make(map[string]appsv1.ClusterComponentStatus) +func (t *clusterComponentStatusTransformer) transform(transCtx *clusterTransformContext) error { + comps, shardingComps, err := t.listClusterComponents(transCtx) + if err != nil { + return err } - for _, compSpec := range transCtx.ComponentSpecs { - compKey := types.NamespacedName{ - Namespace: cluster.Namespace, - Name: component.FullName(cluster.Name, compSpec.Name), + + t.transformCompStatus(transCtx, comps) + t.transformShardingStatus(transCtx, shardingComps) + + return nil +} + +func (t *clusterComponentStatusTransformer) listClusterComponents( + transCtx *clusterTransformContext) (map[string]*appsv1.Component, map[string][]*appsv1.Component, error) { + var ( + cluster = transCtx.Cluster + ) + + compList := &appsv1.ComponentList{} + ml := client.MatchingLabels(constant.GetClusterLabels(cluster.Name)) + if err := transCtx.Client.List(transCtx.Context, compList, client.InNamespace(cluster.Namespace), ml); err != nil { + return nil, nil, err + } + + if len(compList.Items) == 0 { + return nil, nil, nil + } + + comps := make(map[string]*appsv1.Component) + shardingComps := make(map[string][]*appsv1.Component) + + sharding := func(comp *appsv1.Component) bool { + shardingName := shardingCompNName(comp) + if len(shardingName) == 0 { + return false } - comp := &appsv1.Component{} - if err := transCtx.Client.Get(transCtx.Context, compKey, comp); err != nil { - if apierrors.IsNotFound(err) { - continue - } - return err + + if _, ok := shardingComps[shardingName]; !ok { + shardingComps[shardingName] = []*appsv1.Component{comp} + } else { + shardingComps[shardingName] = append(shardingComps[shardingName], comp) } - cluster.Status.Components[compSpec.Name] = t.buildClusterCompStatus(transCtx, comp, compSpec.Name) + return true + } + + for i, comp := range compList.Items { + if sharding(&compList.Items[i]) { + continue + } + compName, err := component.ShortName(cluster.Name, comp.Name) + if err != nil { + return nil, nil, err + } + if _, ok := comps[compName]; ok { + return nil, nil, fmt.Errorf("duplicate component name: %s", compName) + } + comps[compName] = &compList.Items[i] + } + return comps, shardingComps, nil +} + +func (t *clusterComponentStatusTransformer) transformCompStatus(transCtx *clusterTransformContext, comps map[string]*appsv1.Component) { + var ( + cluster = transCtx.Cluster + ) + + if len(transCtx.components) == 0 && len(comps) == 0 { + cluster.Status.Components = nil + return + } + + runningSet := sets.New[string]() + if comps != nil { + runningSet.Insert(maps.Keys(comps)...) + } + protoSet := sets.New[string]() + for _, spec := range transCtx.components { + protoSet.Insert(spec.Name) + } + createSet, deleteSet, updateSet := setDiff(runningSet, protoSet) + + // reset the status + cluster.Status.Components = make(map[string]appsv1.ClusterComponentStatus) + for name := range createSet { + cluster.Status.Components[name] = appsv1.ClusterComponentStatus{ + Phase: "", + Message: map[string]string{ + "reason": "the component to be created", + }, + } + } + for name := range deleteSet { + cluster.Status.Components[name] = appsv1.ClusterComponentStatus{ + Phase: appsv1.DeletingClusterCompPhase, + Message: map[string]string{ + "reason": "the component is under deleting", + }, + } + } + for name := range updateSet { + cluster.Status.Components[name] = t.buildClusterCompStatus(transCtx, name, comps[name]) } - return nil } -// buildClusterCompStatus builds cluster component status from specified component object. func (t *clusterComponentStatusTransformer) buildClusterCompStatus(transCtx *clusterTransformContext, - comp *appsv1.Component, compName string) appsv1.ClusterComponentStatus { + compName string, comp *appsv1.Component) appsv1.ClusterComponentStatus { var ( cluster = transCtx.Cluster status = cluster.Status.Components[compName] ) phase := status.Phase - t.updateClusterComponentStatus(comp, &status) + if string(status.Phase) != string(comp.Status.Phase) { + status.Phase = comp.Status.Phase + status.Message = comp.Status.Message + } if phase != status.Phase { - phaseTransitionMsg := clusterComponentPhaseTransitionMsg(status.Phase) - if transCtx.GetRecorder() != nil && phaseTransitionMsg != "" { - transCtx.GetRecorder().Eventf(transCtx.Cluster, corev1.EventTypeNormal, componentPhaseTransition, phaseTransitionMsg) + msg := clusterCompNShardingPhaseTransitionMsg("component", compName, status.Phase) + if transCtx.GetRecorder() != nil && msg != "" { + transCtx.GetRecorder().Eventf(transCtx.Cluster, corev1.EventTypeNormal, componentPhaseTransition, msg) } - transCtx.GetLogger().Info(fmt.Sprintf("cluster component phase transition: %s -> %s (%s)", - phase, status.Phase, phaseTransitionMsg)) + transCtx.GetLogger().Info(fmt.Sprintf("cluster component phase transition: %s -> %s (%s)", phase, status.Phase, msg)) } return status } -// updateClusterComponentStatus sets the cluster component phase and messages conditionally. -func (t *clusterComponentStatusTransformer) updateClusterComponentStatus(comp *appsv1.Component, - status *appsv1.ClusterComponentStatus) { - if string(status.Phase) != string(comp.Status.Phase) { - status.Phase = comp.Status.Phase - if status.Message == nil { - status.Message = comp.Status.Message - } else { - for k, v := range comp.Status.Message { - status.Message[k] = v - } +func (t *clusterComponentStatusTransformer) transformShardingStatus(transCtx *clusterTransformContext, shardingComps map[string][]*appsv1.Component) { + var ( + cluster = transCtx.Cluster + ) + + if len(transCtx.shardings) == 0 && len(shardingComps) == 0 { + cluster.Status.Shardings = nil + return + } + + runningSet := sets.New[string]() + if shardingComps != nil { + runningSet.Insert(maps.Keys(shardingComps)...) + } + protoSet := sets.New[string]() + for _, spec := range transCtx.shardings { + protoSet.Insert(spec.Name) + } + createSet, deleteSet, updateSet := setDiff(runningSet, protoSet) + + // reset the status + cluster.Status.Shardings = make(map[string]appsv1.ClusterComponentStatus) + for name := range createSet { + cluster.Status.Shardings[name] = appsv1.ClusterComponentStatus{ + Phase: "", + Message: map[string]string{ + "reason": "the sharding to be created", + }, } } - // TODO(v1.0): status - //// if ready flag not changed, don't update the ready time - // ready := t.isClusterComponentPodsReady(comp.Status.Phase) - // if status.PodsReady == nil || *status.PodsReady != ready { - // status.PodsReady = &ready - // if ready { - // now := metav1.Now() - // status.PodsReadyTime = &now - // } - // } + for name := range deleteSet { + cluster.Status.Shardings[name] = appsv1.ClusterComponentStatus{ + Phase: appsv1.DeletingClusterCompPhase, + Message: map[string]string{ + "reason": "the sharding is under deleting", + }, + } + } + for name := range updateSet { + cluster.Status.Shardings[name] = t.buildClusterShardingStatus(transCtx, name, shardingComps[name]) + } } -// func (t *clusterComponentStatusTransformer) isClusterComponentPodsReady(phase appsv1.ClusterComponentPhase) bool { -// podsReadyPhases := []appsv1.ClusterComponentPhase{ -// appsv1.RunningClusterCompPhase, -// appsv1.StoppingClusterCompPhase, -// appsv1.StoppedClusterCompPhase, -// } -// return slices.Contains(podsReadyPhases, phase) -// } +func (t *clusterComponentStatusTransformer) buildClusterShardingStatus(transCtx *clusterTransformContext, + shardingName string, comps []*appsv1.Component) appsv1.ClusterComponentStatus { + var ( + cluster = transCtx.Cluster + status = cluster.Status.Shardings[shardingName] + ) + + phase := status.Phase + newPhase, newMessage := t.shardingPhaseNMessage(comps) + if status.Phase != newPhase { + status.Phase = newPhase + status.Message = newMessage + } + + if phase != status.Phase { + msg := clusterCompNShardingPhaseTransitionMsg("sharding", shardingName, status.Phase) + if transCtx.GetRecorder() != nil && msg != "" { + transCtx.GetRecorder().Eventf(transCtx.Cluster, corev1.EventTypeNormal, componentPhaseTransition, msg) + } + transCtx.GetLogger().Info(fmt.Sprintf("cluster sharding phase transition: %s -> %s (%s)", phase, status.Phase, msg)) + } + + return status +} + +func (t *clusterComponentStatusTransformer) shardingPhaseNMessage(comps []*appsv1.Component) (appsv1.ClusterComponentPhase, map[string]string) { + statusList := make([]appsv1.ClusterComponentStatus, 0) + phasedMessage := map[appsv1.ClusterComponentPhase]map[string]string{} + for _, comp := range comps { + phase := comp.Status.Phase + message := comp.Status.Message + if _, ok := phasedMessage[phase]; !ok { + phasedMessage[phase] = message + } + statusList = append(statusList, appsv1.ClusterComponentStatus{Phase: phase}) + } + if len(phasedMessage) == 0 { + // ??? + return "", map[string]string{"reason": "the component objects are not found"} + } + + phase := appsv1.ClusterComponentPhase(composeClusterPhase(statusList)) + return phase, phasedMessage[phase] +} -func clusterComponentPhaseTransitionMsg(phase appsv1.ClusterComponentPhase) string { +func clusterCompNShardingPhaseTransitionMsg(kind, name string, phase appsv1.ClusterComponentPhase) string { if len(phase) == 0 { return "" } - return fmt.Sprintf("component is %s", phase) + return fmt.Sprintf("cluster %s %s is %s", kind, name, phase) } diff --git a/controllers/apps/transformer_cluster_component_status_test.go b/controllers/apps/transformer_cluster_component_status_test.go new file mode 100644 index 00000000000..5d7052f6834 --- /dev/null +++ b/controllers/apps/transformer_cluster_component_status_test.go @@ -0,0 +1,694 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package apps + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/model" + testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" +) + +var _ = Describe("cluster component status transformer", func() { + const ( + compDefName = "test-compdef" + ) + + var ( + transCtx *clusterTransformContext + dag *graph.DAG + ) + + newDag := func(graphCli model.GraphClient) *graph.DAG { + dag = graph.NewDAG() + graphCli.Root(dag, transCtx.OrigCluster, transCtx.Cluster, model.ActionStatusPtr()) + return dag + } + + normalizeTransformContext := func(transCtx *clusterTransformContext) { + var ( + cluster = transCtx.Cluster + err error + ) + transformer := clusterNormalizationTransformer{} + transCtx.components, transCtx.shardings, err = transformer.resolveCompsNShardingsFromSpecified(transCtx, cluster) + Expect(err).Should(BeNil()) + + err = transformer.validateNBuildAllCompSpecs(transCtx, cluster) + Expect(err).Should(BeNil()) + } + + BeforeEach(func() { + cluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, "test-cluster", ""). + AddComponent("comp1", compDefName). + AddComponent("comp2", compDefName). + AddSharding("sharding1", "", compDefName). + AddSharding("sharding2", "", compDefName). + GetObject() + + transCtx = &clusterTransformContext{ + Context: testCtx.Ctx, + Client: model.NewGraphClient(&mockReader{objs: []client.Object{}}), + EventRecorder: clusterRecorder, + Logger: logger, + Cluster: cluster.DeepCopy(), + OrigCluster: cluster, + } + normalizeTransformContext(transCtx) + + dag = newDag(transCtx.Client.(model.GraphClient)) + }) + + Context("component", func() { + It("empty", func() { + transCtx.components = nil + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(BeNil()) + }) + + It("comp not created", func() { + transCtx.Cluster.Status.Components = nil + + // only have comp1 object in the cluster + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp1")) + Expect(transCtx.Cluster.Status.Components["comp1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp2")) + Expect(transCtx.Cluster.Status.Components["comp2"].Phase).Should(Equal(appsv1.ClusterComponentPhase(""))) + }) + + It("comp spec deleted", func() { + // have seen the comp1 and comp2 objects in the cluster + transCtx.Cluster.Status.Components = map[string]appsv1.ClusterComponentStatus{ + "comp1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "comp2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp2", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + // delete comp2 from cluster spec + transCtx.components = transCtx.components[:1] + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp1")) + Expect(transCtx.Cluster.Status.Components["comp1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp2")) + Expect(transCtx.Cluster.Status.Components["comp2"].Phase).Should(Equal(appsv1.DeletingClusterCompPhase)) + }) + + It("comp object deleted", func() { + // have seen the comp1 and comp2 objects in the cluster + transCtx.Cluster.Status.Components = map[string]appsv1.ClusterComponentStatus{ + "comp1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "comp2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + // comp2 object is deleted??? + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp1")) + Expect(transCtx.Cluster.Status.Components["comp1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp2")) + Expect(transCtx.Cluster.Status.Components["comp2"].Phase).Should(Equal(appsv1.ClusterComponentPhase(""))) + }) + + It("comp deleted", func() { + // have seen the comp1 and comp2 objects in the cluster + transCtx.Cluster.Status.Components = map[string]appsv1.ClusterComponentStatus{ + "comp1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "comp2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + // delete comp2 object + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + // delete comp2 spec + transCtx.components = transCtx.components[:1] + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(HaveLen(1)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp1")) + Expect(transCtx.Cluster.Status.Components["comp1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).ShouldNot(HaveKey("comp2")) + }) + + It("ok", func() { + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp2", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.CreatingClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp1")) + Expect(transCtx.Cluster.Status.Components["comp1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp2")) + Expect(transCtx.Cluster.Status.Components["comp2"].Phase).Should(Equal(appsv1.CreatingClusterCompPhase)) + }) + + It("phase changed", func() { + transCtx.Cluster.Status.Components = map[string]appsv1.ClusterComponentStatus{ + "comp1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "comp2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.UpdatingClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-comp2", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.DeletingClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Components).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp1")) + Expect(transCtx.Cluster.Status.Components["comp1"].Phase).Should(Equal(appsv1.UpdatingClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).Should(HaveKey("comp2")) + Expect(transCtx.Cluster.Status.Components["comp2"].Phase).Should(Equal(appsv1.DeletingClusterCompPhase)) + }) + }) + + Context("sharding", func() { + It("empty", func() { + transCtx.shardings = nil + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(BeNil()) + }) + + It("sharding not created", func() { + transCtx.Cluster.Status.Shardings = nil + + // only have sharding1 object in the cluster + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding2")) + Expect(transCtx.Cluster.Status.Shardings["sharding2"].Phase).Should(Equal(appsv1.ClusterComponentPhase(""))) + }) + + It("sharding spec deleted", func() { + // have seen the sharding1 and sharding2 objects in the cluster + transCtx.Cluster.Status.Shardings = map[string]appsv1.ClusterComponentStatus{ + "sharding1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "sharding2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding2", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding2", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + // delete sharding2 from cluster spec + transCtx.shardings = transCtx.shardings[:1] + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding2")) + Expect(transCtx.Cluster.Status.Shardings["sharding2"].Phase).Should(Equal(appsv1.DeletingClusterCompPhase)) + }) + + It("sharding object deleted", func() { + // have seen the sharding1 and sharding2 objects in the cluster + transCtx.Cluster.Status.Shardings = map[string]appsv1.ClusterComponentStatus{ + "sharding1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "sharding2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + // sharding2 object is deleted??? + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding2")) + Expect(transCtx.Cluster.Status.Shardings["sharding2"].Phase).Should(Equal(appsv1.ClusterComponentPhase(""))) + }) + + It("sharding deleted", func() { + // have seen the sharding1 and sharding2 objects in the cluster + transCtx.Cluster.Status.Shardings = map[string]appsv1.ClusterComponentStatus{ + "sharding1": { + Phase: appsv1.RunningClusterCompPhase, + }, + "sharding2": { + Phase: appsv1.RunningClusterCompPhase, + }, + } + + // delete sharding2 object + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + // delete sharding2 spec + transCtx.shardings = transCtx.shardings[:1] + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(1)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Components).ShouldNot(HaveKey("sharding2")) + }) + + It("ok", func() { + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding2", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding2", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.CreatingClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding2")) + Expect(transCtx.Cluster.Status.Shardings["sharding2"].Phase).Should(Equal(appsv1.CreatingClusterCompPhase)) + }) + + It("compose phases", func() { + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1-01", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1-02", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.CreatingClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.UpdatingClusterCompPhase)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding2")) + Expect(transCtx.Cluster.Status.Shardings["sharding2"].Phase).Should(Equal(appsv1.ClusterComponentPhase(""))) + }) + + It("phase changed", func() { + transCtx.Cluster.Status.Shardings = map[string]appsv1.ClusterComponentStatus{ + "sharding1": { + Phase: appsv1.CreatingClusterCompPhase, + }, + } + + reader := &mockReader{ + objs: []client.Object{ + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1-01", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + &appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: "test-cluster-sharding1-02", + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: transCtx.Cluster.Name, + constant.KBAppShardingNameLabelKey: "sharding1", + }, + }, + Status: appsv1.ComponentStatus{ + Phase: appsv1.RunningClusterCompPhase, + }, + }, + }, + } + transCtx.Client = model.NewGraphClient(reader) + + transformer := &clusterComponentStatusTransformer{} + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveLen(2)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding1")) + Expect(transCtx.Cluster.Status.Shardings["sharding1"].Phase).Should(Equal(appsv1.RunningClusterCompPhase)) + Expect(transCtx.Cluster.Status.Shardings).Should(HaveKey("sharding2")) + Expect(transCtx.Cluster.Status.Shardings["sharding2"].Phase).Should(Equal(appsv1.ClusterComponentPhase(""))) + }) + }) +}) diff --git a/controllers/apps/transformer_cluster_component_test.go b/controllers/apps/transformer_cluster_component_test.go index 4fb298d52c3..aba980f6137 100644 --- a/controllers/apps/transformer_cluster_component_test.go +++ b/controllers/apps/transformer_cluster_component_test.go @@ -26,12 +26,16 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" @@ -59,32 +63,55 @@ func (r *mockReader) List(ctx context.Context, list client.ObjectList, opts ...c if !items.IsValid() { return fmt.Errorf("ObjectList has no Items field: %s", list.GetObjectKind().GroupVersionKind().String()) } + + objs := reflect.MakeSlice(items.Type(), 0, 0) if len(r.objs) > 0 { - objs := reflect.MakeSlice(items.Type(), 0, 0) - for i := range r.objs { + listOpts := &client.ListOptions{} + for _, opt := range opts { + opt.ApplyToList(listOpts) + } + + for i, o := range r.objs { if reflect.TypeOf(r.objs[i]).Elem().AssignableTo(items.Type().Elem()) { - objs = reflect.Append(objs, reflect.ValueOf(r.objs[i]).Elem()) + if listOpts.LabelSelector == nil || listOpts.LabelSelector.Matches(labels.Set(o.GetLabels())) { + objs = reflect.Append(objs, reflect.ValueOf(r.objs[i]).Elem()) + } } } - items.Set(objs) } + items.Set(objs) + return nil } var _ = Describe("cluster component transformer test", func() { const ( - clusterDefName = "test-clusterdef" - clusterTopologyDefault = "test-topology-default" - clusterTopologyNoOrders = "test-topology-no-orders" - clusterTopologyProvisionNUpdateOOD = "test-topology-ood" - clusterTopology4Stop = "test-topology-stop" - compDefName = "test-compdef" - clusterName = "test-cluster" - comp1aName = "comp-1a" - comp1bName = "comp-1b" - comp2aName = "comp-2a" - comp2bName = "comp-2b" - comp3aName = "comp-3a" + clusterDefName = "test-clusterdef" + clusterTopologyDefault = "test-topology-default" + clusterTopologyNoOrders = "test-topology-no-orders" + clusterTopologyProvisionNUpdateOOD = "test-topology-ood" + clusterTopologyStop = "test-topology-stop" + clusterTopologyDefault4Sharding = "test-topology-default-sharding" + clusterTopologyNoOrders4Sharding = "test-topology-no-orders-sharding" + clusterTopologyProvisionNUpdateOOD4Sharding = "test-topology-ood-sharding" + clusterTopologyStop4Sharding = "test-topology-stop-sharding" + clusterTopologyCompNSharding = "test-topology-comp-sharding" + clusterTopologyShardingNComp = "test-topology-sharding-comp" + clusterTopologyCompNShardingOOD = "test-topology-ood-comp-sharding" + clusterTopologyShardingNCompOOD = "test-topology-ood-sharding-comp" + compDefName = "test-compdef" + shardingDefName = "test-shardingdef" + clusterName = "test-cluster" + comp1aName = "comp-1a" + comp1bName = "comp-1b" + comp2aName = "comp-2a" + comp2bName = "comp-2b" + comp3aName = "comp-3a" + sharding1aName = "sharding-1a" + sharding1bName = "sharding-1b" + sharding2aName = "sharding-2a" + sharding2bName = "sharding-2b" + sharding3aName = "sharding-3a" ) var ( @@ -181,7 +208,7 @@ var _ = Describe("cluster component transformer test", func() { }, }). AddClusterTopology(appsv1.ClusterTopology{ - Name: clusterTopology4Stop, + Name: clusterTopologyStop, Components: []appsv1.ClusterTopologyComponent{ { Name: comp1aName, @@ -200,6 +227,191 @@ var _ = Describe("cluster component transformer test", func() { Update: []string{comp1aName, comp2aName, comp3aName}, }, }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyDefault4Sharding, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding1bName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2bName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{ + fmt.Sprintf("%s,%s", sharding1aName, sharding1bName), + fmt.Sprintf("%s,%s", sharding2aName, sharding2bName), + }, + Terminate: []string{ + fmt.Sprintf("%s,%s", sharding2aName, sharding2bName), + fmt.Sprintf("%s,%s", sharding1aName, sharding1bName), + }, + Update: []string{ + fmt.Sprintf("%s,%s", sharding1aName, sharding1bName), + fmt.Sprintf("%s,%s", sharding2aName, sharding2bName), + }, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyNoOrders4Sharding, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding1bName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2bName, + ShardingDef: shardingDefName, + }, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyProvisionNUpdateOOD4Sharding, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding1bName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2bName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{ + fmt.Sprintf("%s,%s", sharding1aName, sharding1bName), + fmt.Sprintf("%s,%s", sharding2aName, sharding2bName), + }, + Update: []string{ + fmt.Sprintf("%s,%s", sharding2aName, sharding2bName), + fmt.Sprintf("%s,%s", sharding1aName, sharding1bName), + }, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyStop4Sharding, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding2aName, + ShardingDef: shardingDefName, + }, + { + Name: sharding3aName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Update: []string{sharding1aName, sharding2aName, sharding3aName}, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyCompNSharding, + Components: []appsv1.ClusterTopologyComponent{ + { + Name: comp1aName, + CompDef: compDefName, + }, + }, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{comp1aName, sharding1aName}, + Terminate: []string{sharding1aName, comp1aName}, + Update: []string{comp1aName, sharding1aName}, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyShardingNComp, + Components: []appsv1.ClusterTopologyComponent{ + { + Name: comp1aName, + CompDef: compDefName, + }, + }, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{sharding1aName, comp1aName}, + Terminate: []string{comp1aName, sharding1aName}, + Update: []string{sharding1aName, comp1aName}, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyCompNShardingOOD, + Components: []appsv1.ClusterTopologyComponent{ + { + Name: comp1aName, + CompDef: compDefName, + }, + }, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{comp1aName, sharding1aName}, + Update: []string{sharding1aName, comp1aName}, + }, + }). + AddClusterTopology(appsv1.ClusterTopology{ + Name: clusterTopologyShardingNCompOOD, + Components: []appsv1.ClusterTopologyComponent{ + { + Name: comp1aName, + CompDef: compDefName, + }, + }, + Shardings: []appsv1.ClusterTopologySharding{ + { + Name: sharding1aName, + ShardingDef: shardingDefName, + }, + }, + Orders: &appsv1.ClusterTopologyOrders{ + Provision: []string{sharding1aName, comp1aName}, + Update: []string{comp1aName, sharding1aName}, + }, + }). GetObject() }) @@ -211,53 +423,83 @@ var _ = Describe("cluster component transformer test", func() { return d } - buildCompSpecs := func(clusterDef *appsv1.ClusterDefinition, cluster *appsv1.Cluster) []*appsv1.ClusterComponentSpec { - apiTransformer := ClusterAPINormalizationTransformer{} - compSpecs, err := apiTransformer.buildCompSpecs4Topology(clusterDef, cluster) + normalizeTransformContext := func(transCtx *clusterTransformContext) { + var ( + clusterDef = transCtx.clusterDef + cluster = transCtx.Cluster + err error + ) + transformer := clusterNormalizationTransformer{} + transCtx.components, transCtx.shardings, err = transformer.resolveCompsNShardingsFromTopology(clusterDef, cluster) + Expect(err).Should(BeNil()) + + err = transformer.validateNBuildAllCompSpecs(transCtx, cluster) Expect(err).Should(BeNil()) - return compSpecs } - mockCompObj := func(transCtx *clusterTransformContext, compName string, setters ...func(*appsv1.Component)) *appsv1.Component { - var compSpec *appsv1.ClusterComponentSpec - for i, spec := range transCtx.ComponentSpecs { - if spec.Name == compName { - compSpec = transCtx.ComponentSpecs[i] - break + newTransformerNCtx := func(topology string, processors ...func(*testapps.MockClusterFactory)) (graph.Transformer, *clusterTransformContext, *graph.DAG) { + f := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName). + WithRandomName(). + SetTopology(topology) + if len(processors) > 0 { + for _, processor := range processors { + processor(f) } + } else { + f.SetReplicas(1) } - Expect(compSpec).ShouldNot(BeNil()) + cluster := f.GetObject() + + graphCli := model.NewGraphClient(k8sClient) + transCtx := &clusterTransformContext{ + Context: ctx, + Client: graphCli, + EventRecorder: nil, + Logger: logger, + Cluster: cluster, + OrigCluster: cluster.DeepCopy(), + clusterDef: clusterDef, + } + normalizeTransformContext(transCtx) + + return &clusterComponentTransformer{}, transCtx, newDAG(graphCli, cluster) + } + newCompObj := func(transCtx *clusterTransformContext, compSpec *appsv1.ClusterComponentSpec, setters ...func(*appsv1.Component)) *appsv1.Component { comp, err := component.BuildComponent(transCtx.Cluster, compSpec, nil, nil) Expect(err).Should(BeNil()) - for _, setter := range setters { if setter != nil { setter(comp) } } - return comp } - newTransformerNCtx := func(topology string) (graph.Transformer, *clusterTransformContext, *graph.DAG) { - cluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName). - WithRandomName(). - SetTopology(topology). - SetReplicas(1). - GetObject() - graphCli := model.NewGraphClient(k8sClient) - transCtx := &clusterTransformContext{ - Context: ctx, - Client: graphCli, - EventRecorder: nil, - Logger: logger, - Cluster: cluster, - OrigCluster: cluster.DeepCopy(), - ClusterDef: clusterDef, - ComponentSpecs: buildCompSpecs(clusterDef, cluster), + mockCompObj := func(transCtx *clusterTransformContext, compName string, setters ...func(*appsv1.Component)) *appsv1.Component { + var compSpec *appsv1.ClusterComponentSpec + for i, spec := range transCtx.components { + if spec.Name == compName { + compSpec = transCtx.components[i] + break + } } - return &clusterComponentTransformer{}, transCtx, newDAG(graphCli, cluster) + Expect(compSpec).ShouldNot(BeNil()) + return newCompObj(transCtx, compSpec, setters...) + } + + mockShardingCompObj := func(transCtx *clusterTransformContext, shardingName string, setters ...func(*appsv1.Component)) *appsv1.Component { + specs := transCtx.shardingComps[shardingName] + Expect(specs).Should(HaveLen(1)) + Expect(specs[0]).ShouldNot(BeNil()) + + if setters == nil { + setters = []func(*appsv1.Component){} + } + setters = append(setters, func(comp *appsv1.Component) { + comp.Labels[constant.KBAppShardingNameLabelKey] = shardingName + }) + return newCompObj(transCtx, specs[0], setters...) } Context("component orders", func() { @@ -513,7 +755,7 @@ var _ = Describe("cluster component transformer test", func() { }) It("w/ orders update - stop", func() { - transformer, transCtx, dag := newTransformerNCtx(clusterTopology4Stop) + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyStop) // mock to stop all components reader := &mockReader{ @@ -530,8 +772,8 @@ var _ = Describe("cluster component transformer test", func() { }, } transCtx.Client = model.NewGraphClient(reader) - for i := range transCtx.ComponentSpecs { - transCtx.ComponentSpecs[i].Stop = &[]bool{true}[0] + for i := range transCtx.components { + transCtx.components[i].Stop = pointer.Bool(true) } transCtx.OrigCluster.Generation += 1 // mock cluster spec update @@ -551,13 +793,13 @@ var _ = Describe("cluster component transformer test", func() { }) It("w/ orders update - stop the second component", func() { - transformer, transCtx, dag := newTransformerNCtx(clusterTopology4Stop) + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyStop) // mock to stop all components and the first component has been stopped reader := &mockReader{ objs: []client.Object{ mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) { - comp.Spec.Stop = &[]bool{true}[0] + comp.Spec.Stop = pointer.Bool(true) comp.Status.Phase = appsv1.StoppedClusterCompPhase }), mockCompObj(transCtx, comp2aName, func(comp *appsv1.Component) { @@ -569,8 +811,8 @@ var _ = Describe("cluster component transformer test", func() { }, } transCtx.Client = model.NewGraphClient(reader) - for i := range transCtx.ComponentSpecs { - transCtx.ComponentSpecs[i].Stop = &[]bool{true}[0] + for i := range transCtx.components { + transCtx.components[i].Stop = pointer.Bool(true) } transCtx.OrigCluster.Generation += 1 // mock cluster spec update @@ -631,8 +873,697 @@ var _ = Describe("cluster component transformer test", func() { }...) // try again - err = transformer.Transform(transCtx, dag) + err = transformer.Transform(transCtx, newDAG(graphCli, transCtx.Cluster)) + Expect(err).Should(BeNil()) + }) + }) + + Context("sharding orders", func() { + It("w/o orders", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyNoOrders4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + + // check the components + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(4)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + }) + + It("w/ orders provision - has no predecessors", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // check the first two components + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(2)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(Or(HavePrefix(sharding1aName), HavePrefix(sharding1bName))) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + }) + + It("w/ orders provision - has a predecessor not ready", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock first two components status as running and creating + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.CreatingClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // should have no components to update + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(0)) + }) + + It("w/ orders provision - has a predecessor in DAG", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock one of first two components status as running + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // should have one component to create + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(HavePrefix(sharding1bName)) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + }) + + It("w/ orders provision - all predecessors ready", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock first two components status as running + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + + // check the last two components + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(2)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(Or(HavePrefix(sharding2aName), HavePrefix(sharding2bName))) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + }) + + It("w/ orders update - has no predecessors", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock first two components + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // check the first component + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(HavePrefix(sharding1aName)) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + }) + + It("w/ orders update - has a predecessor not ready", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock components + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.CreatingClusterCompPhase // not ready + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // should have no components to update + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(0)) + }) + + It("w/ orders update - has a predecessor in DAG", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock components + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // should have one component to update + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(HavePrefix(sharding1aName)) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + }) + + It("w/ orders update - all predecessors ready", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock components + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(HavePrefix(sharding2aName)) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + }) + + It("w/ orders update - stop", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyStop4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding3aName, "", "") + }) + + // mock to stop all components + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding3aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + for i, sharding := range transCtx.shardings { + transCtx.shardings[i].Template.Stop = pointer.Bool(true) + for j := range transCtx.shardingComps[sharding.Name] { + transCtx.shardingComps[sharding.Name][j].Stop = pointer.Bool(true) + } + } + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding2aName))) + + // should have the first component to update only + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(HavePrefix(sharding1aName)) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + Expect(comp.Spec.Stop).ShouldNot(BeNil()) + Expect(*comp.Spec.Stop).Should(BeTrue()) + }) + + It("w/ orders update - stop the second component", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyStop4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding3aName, "", "") + }) + + // mock to stop all components and the first component has been stopped + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Spec.Stop = pointer.Bool(true) + comp.Status.Phase = appsv1.StoppedClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding3aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + for i, sharding := range transCtx.shardings { + transCtx.shardings[i].Template.Stop = pointer.Bool(true) + for j := range transCtx.shardingComps[sharding.Name] { + transCtx.shardingComps[sharding.Name][j].Stop = pointer.Bool(true) + } + } + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), ContainSubstring(sharding3aName))) + + // should have the second component to update only + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(HavePrefix(sharding2aName)) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + Expect(comp.Spec.Stop).ShouldNot(BeNil()) + Expect(*comp.Spec.Stop).Should(BeTrue()) + }) + + It("w/ orders provision & update - OOD", func() { + transformer, transCtx, dag := newTransformerNCtx(clusterTopologyProvisionNUpdateOOD4Sharding, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding1bName, "", ""). + AddSharding(sharding2aName, "", ""). + AddSharding(sharding2bName, "", "") + }) + + // mock first two components status as running + reader := &mockReader{ + objs: []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + + // sharding2aName and sharding2bName are not ready (exist) when updating sharding1aName and sharding1bName + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(ictrlutil.IsDelayedRequeueError(err)).Should(BeTrue()) + + // check the last two components under provisioning + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(2)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(Or(HavePrefix(sharding2aName), HavePrefix(sharding2bName))) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + + // mock last two components status as running + reader.objs = append(reader.objs, []client.Object{ + mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding2bName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }...) + + // try again + err = transformer.Transform(transCtx, newDAG(graphCli, transCtx.Cluster)) Expect(err).Should(BeNil()) }) }) + + Context("component and sharding orders", func() { + It("provision", func() { + for _, suit := range []struct { + topology string + errMatcher types.GomegaMatcher + firstCreatedNameMatcher types.GomegaMatcher + secondCreatedNameMatcher types.GomegaMatcher + mockObjects func(*clusterTransformContext) []client.Object + }{ + { + topology: clusterTopologyCompNSharding, + errMatcher: ContainSubstring(sharding1aName), + firstCreatedNameMatcher: Equal(comp1aName), + secondCreatedNameMatcher: HavePrefix(sharding1aName), + mockObjects: func(transCtx *clusterTransformContext) []client.Object { + return []client.Object{ + mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + } + }, + }, + { + topology: clusterTopologyShardingNComp, + errMatcher: ContainSubstring(comp1aName), + firstCreatedNameMatcher: HavePrefix(sharding1aName), + secondCreatedNameMatcher: Equal(comp1aName), + mockObjects: func(transCtx *clusterTransformContext) []client.Object { + return []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + } + }, + }, + } { + By(suit.topology) + transformer, transCtx, dag := newTransformerNCtx(suit.topology, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", "") + }) + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), suit.errMatcher)) + + // check the first component + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(suit.firstCreatedNameMatcher) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + + // mock first component status as running + reader := &mockReader{objs: suit.mockObjects(transCtx)} + transCtx.Client = model.NewGraphClient(reader) + + // try again and check the last component + dag = newDAG(graphCli, transCtx.Cluster) + err = transformer.Transform(transCtx, dag) + Expect(err).Should(BeNil()) + + graphCli = transCtx.Client.(model.GraphClient) + objs = graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(suit.secondCreatedNameMatcher) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + } + }) + + It("update", func() { + for _, suit := range []struct { + topology string + errMatcher types.GomegaMatcher + updatedNameMatcher types.GomegaMatcher + }{ + {clusterTopologyCompNSharding, ContainSubstring(sharding1aName), Equal(comp1aName)}, + {clusterTopologyShardingNComp, ContainSubstring(comp1aName), HavePrefix(sharding1aName)}, + } { + By(suit.topology) + transformer, transCtx, dag := newTransformerNCtx(suit.topology, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", "") + }) + + reader := &mockReader{ + objs: []client.Object{ + mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Spec.Replicas = 2 // to update + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), suit.errMatcher)) + + // check the updated component + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(suit.updatedNameMatcher) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + } + }) + + It("update - stop", func() { + for _, suit := range []struct { + topology string + errMatcher types.GomegaMatcher + updatedNameMatcher types.GomegaMatcher + }{ + {clusterTopologyCompNSharding, ContainSubstring(sharding1aName), Equal(comp1aName)}, + {clusterTopologyShardingNComp, ContainSubstring(comp1aName), HavePrefix(sharding1aName)}, + } { + By(suit.topology) + transformer, transCtx, dag := newTransformerNCtx(suit.topology, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", ""). + AddSharding(sharding2aName, "", "") + }) + + // mock to stop all components and shardings + reader := &mockReader{ + objs: []client.Object{ + mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + }, + } + transCtx.Client = model.NewGraphClient(reader) + for i := range transCtx.components { + transCtx.components[i].Stop = pointer.Bool(true) + } + for i, sharding := range transCtx.shardings { + transCtx.shardings[i].Template.Stop = pointer.Bool(true) + for j := range transCtx.shardingComps[sharding.Name] { + transCtx.shardingComps[sharding.Name][j].Stop = pointer.Bool(true) + } + } + transCtx.OrigCluster.Generation += 1 // mock cluster spec update + + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(And(ContainSubstring("retry later"), suit.errMatcher)) + + // should have the first component to update only + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + comp := objs[0].(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(suit.updatedNameMatcher) + Expect(graphCli.IsAction(dag, comp, model.ActionUpdatePtr())).Should(BeTrue()) + Expect(comp.Spec.Stop).ShouldNot(BeNil()) + Expect(*comp.Spec.Stop).Should(BeTrue()) + } + }) + + It("provision & update OOD", func() { + for _, suit := range []struct { + topology string + createdNameMatcher types.GomegaMatcher + firstMockObjects func(*clusterTransformContext) []client.Object + secondMockObjects func(*clusterTransformContext) []client.Object + }{ + { + topology: clusterTopologyCompNShardingOOD, + createdNameMatcher: HavePrefix(sharding1aName), + firstMockObjects: func(transCtx *clusterTransformContext) []client.Object { + return []client.Object{ + mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + } + }, + secondMockObjects: func(transCtx *clusterTransformContext) []client.Object { + return []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + } + }, + }, + { + topology: clusterTopologyShardingNCompOOD, + createdNameMatcher: Equal(comp1aName), + firstMockObjects: func(transCtx *clusterTransformContext) []client.Object { + return []client.Object{ + mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + } + }, + secondMockObjects: func(transCtx *clusterTransformContext) []client.Object { + return []client.Object{ + mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) { + comp.Status.Phase = appsv1.RunningClusterCompPhase + }), + } + }, + }, + } { + By(suit.topology) + transformer, transCtx, dag := newTransformerNCtx(suit.topology, func(f *testapps.MockClusterFactory) { + f.AddSharding(sharding1aName, "", "") + }) + + // mock first component status as running + reader := &mockReader{objs: suit.firstMockObjects(transCtx)} + transCtx.Client = model.NewGraphClient(reader) + + // sharding1aName(comp1aName) is not ready (exist) when updating comp1aName(sharding1aName) + err := transformer.Transform(transCtx, dag) + Expect(err).ShouldNot(BeNil()) + Expect(ictrlutil.IsDelayedRequeueError(err)).Should(BeTrue()) + + // check another component under provisioning + graphCli := transCtx.Client.(model.GraphClient) + objs := graphCli.FindAll(dag, &appsv1.Component{}) + Expect(len(objs)).Should(Equal(1)) + for _, obj := range objs { + comp := obj.(*appsv1.Component) + Expect(component.ShortName(transCtx.Cluster.Name, comp.Name)).Should(suit.createdNameMatcher) + Expect(graphCli.IsAction(dag, comp, model.ActionCreatePtr())).Should(BeTrue()) + } + + // mock another component status as running + reader.objs = append(reader.objs, suit.secondMockObjects(transCtx)...) + + // try again + err = transformer.Transform(transCtx, newDAG(graphCli, transCtx.Cluster)) + Expect(err).Should(BeNil()) + } + }) + }) }) diff --git a/controllers/apps/transformer_cluster_deletion.go b/controllers/apps/transformer_cluster_deletion.go index c8edd658296..f0790b312bb 100644 --- a/controllers/apps/transformer_cluster_deletion.go +++ b/controllers/apps/transformer_cluster_deletion.go @@ -34,7 +34,6 @@ import ( kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" ) @@ -71,14 +70,14 @@ func (t *clusterDeletionTransformer) Transform(ctx graph.TransformContext, dag * transCtx.EventRecorder.Eventf(cluster, corev1.EventTypeNormal, constant.ReasonDeletingCR, "Deleting %s: %s", strings.ToLower(cluster.GetObjectKind().GroupVersionKind().Kind), cluster.GetName()) - // firstly, delete components in the order that topology defined. - deleteCompSet, err := deleteCompsInOrder4Terminate(transCtx, dag) + // firstly, delete components and shardings in the order that topology defined. + deleteSet, err := deleteCompNShardingInOrder4Terminate(transCtx, dag) if err != nil { return err } - if len(deleteCompSet) > 0 { + if len(deleteSet) > 0 { // wait for the components to be deleted to trigger the next reconcile - transCtx.Logger.Info(fmt.Sprintf("wait for the components to be deleted: %v", deleteCompSet)) + transCtx.Logger.Info(fmt.Sprintf("wait for the components and shardings to be deleted: %v", deleteSet)) return nil } @@ -196,20 +195,20 @@ func shouldSkipObjOwnedByComp(obj client.Object, cluster kbappsv1.Cluster) bool return true } -func deleteCompsInOrder4Terminate(transCtx *clusterTransformContext, dag *graph.DAG) (sets.Set[string], error) { - compNameSet, err := component.GetClusterComponentShortNameSet(transCtx.Context, transCtx.Client, transCtx.Cluster) +func deleteCompNShardingInOrder4Terminate(transCtx *clusterTransformContext, dag *graph.DAG) (sets.Set[string], error) { + nameSet, err := clusterRunningCompNShardingSet(transCtx.Context, transCtx.Client, transCtx.Cluster) if err != nil { return nil, err } - if len(compNameSet) == 0 { + if len(nameSet) == 0 { return nil, nil } if err = loadNCheckClusterDefinition(transCtx, transCtx.Cluster); err != nil { return nil, err } - err = deleteCompsInOrder(transCtx, dag, compNameSet, true) + err = deleteCompNShardingInOrder(transCtx, dag, nameSet, nil) if err != nil { return nil, err } - return compNameSet, nil + return nameSet, nil } diff --git a/controllers/apps/transformer_cluster_deletion_test.go b/controllers/apps/transformer_cluster_deletion_test.go index 8d16cefdeb1..7b90fac7650 100644 --- a/controllers/apps/transformer_cluster_deletion_test.go +++ b/controllers/apps/transformer_cluster_deletion_test.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" @@ -81,18 +82,21 @@ var _ = Describe("clusterDeletionTransformer", func() { ObjectMeta: metav1.ObjectMeta{ Namespace: testCtx.DefaultNamespace, Name: "test-cluster-comp1", + Labels: map[string]string{constant.AppInstanceLabelKey: cluster.Name}, }, }, &appsv1.Component{ ObjectMeta: metav1.ObjectMeta{ Namespace: testCtx.DefaultNamespace, Name: "test-cluster-comp2", + Labels: map[string]string{constant.AppInstanceLabelKey: cluster.Name}, }, }, &appsv1.Component{ ObjectMeta: metav1.ObjectMeta{ Namespace: testCtx.DefaultNamespace, Name: "test-cluster-comp3", + Labels: map[string]string{constant.AppInstanceLabelKey: cluster.Name}, }, }, }, @@ -105,7 +109,7 @@ var _ = Describe("clusterDeletionTransformer", func() { Logger: logger, Cluster: cluster.DeepCopy(), OrigCluster: cluster, - ClusterDef: clusterDef, + clusterDef: clusterDef, } dag = newDag(transCtx.Client.(model.GraphClient)) }) diff --git a/controllers/apps/transformer_cluster_meta.go b/controllers/apps/transformer_cluster_meta.go index 0e882d066d6..13285913648 100644 --- a/controllers/apps/transformer_cluster_meta.go +++ b/controllers/apps/transformer_cluster_meta.go @@ -26,11 +26,11 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/graph" ) -type clusterAssureMetaTransformer struct{} +type clusterMetaTransformer struct{} -var _ graph.Transformer = &clusterAssureMetaTransformer{} +var _ graph.Transformer = &clusterMetaTransformer{} -func (t *clusterAssureMetaTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { +func (t *clusterMetaTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { transCtx, _ := ctx.(*clusterTransformContext) cluster := transCtx.Cluster diff --git a/controllers/apps/transformer_cluster_normalization.go b/controllers/apps/transformer_cluster_normalization.go new file mode 100644 index 00000000000..e0d8f6a22a9 --- /dev/null +++ b/controllers/apps/transformer_cluster_normalization.go @@ -0,0 +1,398 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package apps + +import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/component" + "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/model" + "github.com/apecloud/kubeblocks/pkg/controllerutil" +) + +// clusterNormalizationTransformer handles the cluster API conversion. +type clusterNormalizationTransformer struct{} + +var _ graph.Transformer = &clusterNormalizationTransformer{} + +func (t *clusterNormalizationTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + transCtx, _ := ctx.(*clusterTransformContext) + cluster := transCtx.Cluster + if model.IsObjectDeleting(transCtx.OrigCluster) { + return nil + } + + var err error + defer func() { + setProvisioningStartedCondition(&cluster.Status.Conditions, cluster.Name, cluster.Generation, err) + }() + + // resolve all components and shardings from topology or specified + transCtx.components, transCtx.shardings, err = t.resolveCompsNShardings(transCtx, cluster) + if err != nil { + return err + } + + // resolve sharding and component definitions referenced for shardings + if err = t.resolveDefinitions4Shardings(transCtx); err != nil { + return err + } + + // resolve component definitions referenced for components + if err = t.resolveDefinitions4Components(transCtx); err != nil { + return err + } + + // write-back the resolved definitions and service versions to cluster spec. + t.writeBackCompNShardingSpecs(transCtx) + + // build and validate all component specs for components and shardings + if err = t.validateNBuildAllCompSpecs(transCtx, cluster); err != nil { + return err + } + + return nil +} + +func (t *clusterNormalizationTransformer) resolveCompsNShardings(transCtx *clusterTransformContext, + cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, []*appsv1.ClusterSharding, error) { + if withClusterTopology(cluster) { + return t.resolveCompsNShardingsFromTopology(transCtx.clusterDef, cluster) + } + if withClusterUserDefined(cluster) { + return t.resolveCompsNShardingsFromSpecified(transCtx, cluster) + } + return nil, nil, nil +} + +func (t *clusterNormalizationTransformer) resolveCompsNShardingsFromTopology(clusterDef *appsv1.ClusterDefinition, + cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, []*appsv1.ClusterSharding, error) { + topology := referredClusterTopology(clusterDef, cluster.Spec.Topology) + if topology == nil { + return nil, nil, fmt.Errorf("referred cluster topology not found : %s", cluster.Spec.Topology) + } + + comps, err := t.resolveCompsFromTopology(*topology, cluster) + if err != nil { + return nil, nil, err + } + + shardings, err := t.resolveShardingsFromTopology(*topology, cluster) + if err != nil { + return nil, nil, err + } + return comps, shardings, nil +} + +func (t *clusterNormalizationTransformer) resolveCompsFromTopology(topology appsv1.ClusterTopology, + cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, error) { + newCompSpec := func(comp appsv1.ClusterTopologyComponent) *appsv1.ClusterComponentSpec { + return &appsv1.ClusterComponentSpec{ + Name: comp.Name, + ComponentDef: comp.CompDef, + } + } + + mergeCompSpec := func(comp appsv1.ClusterTopologyComponent, compSpec *appsv1.ClusterComponentSpec) *appsv1.ClusterComponentSpec { + if len(compSpec.ComponentDef) == 0 { + compSpec.ComponentDef = comp.CompDef + } + return compSpec + } + + specifiedCompSpecs := make(map[string]*appsv1.ClusterComponentSpec) + for i, compSpec := range cluster.Spec.ComponentSpecs { + specifiedCompSpecs[compSpec.Name] = cluster.Spec.ComponentSpecs[i].DeepCopy() + } + + compSpecs := make([]*appsv1.ClusterComponentSpec, 0) + for i := range topology.Components { + comp := topology.Components[i] + if _, ok := specifiedCompSpecs[comp.Name]; ok { + compSpecs = append(compSpecs, mergeCompSpec(comp, specifiedCompSpecs[comp.Name])) + } else { + compSpecs = append(compSpecs, newCompSpec(comp)) + } + } + return compSpecs, nil +} + +func (t *clusterNormalizationTransformer) resolveShardingsFromTopology(topology appsv1.ClusterTopology, + cluster *appsv1.Cluster) ([]*appsv1.ClusterSharding, error) { + newSharding := func(sharding appsv1.ClusterTopologySharding) *appsv1.ClusterSharding { + return &appsv1.ClusterSharding{ + Name: sharding.Name, + ShardingDef: sharding.ShardingDef, + } + } + + mergeSharding := func(sharding appsv1.ClusterTopologySharding, spec *appsv1.ClusterSharding) *appsv1.ClusterSharding { + if len(spec.ShardingDef) == 0 { + spec.ShardingDef = sharding.ShardingDef + } + return spec + } + + specified := make(map[string]*appsv1.ClusterSharding) + for i, sharding := range cluster.Spec.Shardings { + specified[sharding.Name] = cluster.Spec.Shardings[i].DeepCopy() + } + + shardings := make([]*appsv1.ClusterSharding, 0) + for i := range topology.Shardings { + sharding := topology.Shardings[i] + if _, ok := specified[sharding.Name]; ok { + shardings = append(shardings, mergeSharding(sharding, specified[sharding.Name])) + } else { + shardings = append(shardings, newSharding(sharding)) + } + } + return shardings, nil +} + +func (t *clusterNormalizationTransformer) resolveCompsNShardingsFromSpecified(transCtx *clusterTransformContext, + cluster *appsv1.Cluster) ([]*appsv1.ClusterComponentSpec, []*appsv1.ClusterSharding, error) { + comps := make([]*appsv1.ClusterComponentSpec, 0) + for i := range cluster.Spec.ComponentSpecs { + comps = append(comps, cluster.Spec.ComponentSpecs[i].DeepCopy()) + } + shardings := make([]*appsv1.ClusterSharding, 0) + for i := range cluster.Spec.Shardings { + shardings = append(shardings, cluster.Spec.Shardings[i].DeepCopy()) + } + return comps, shardings, nil +} + +func (t *clusterNormalizationTransformer) resolveDefinitions4Shardings(transCtx *clusterTransformContext) error { + if len(transCtx.shardings) != 0 { + transCtx.shardingDefs = make(map[string]*appsv1.ShardingDefinition) + if transCtx.componentDefs == nil { + transCtx.componentDefs = make(map[string]*appsv1.ComponentDefinition) + } + for i, sharding := range transCtx.shardings { + shardingDef, compDef, serviceVersion, err := t.resolveShardingNCompDefinition(transCtx, sharding) + if err != nil { + return err + } + if shardingDef != nil { + transCtx.shardingDefs[shardingDef.Name] = shardingDef + // set the shardingDef as resolved + transCtx.shardings[i].ShardingDef = shardingDef.Name + } + transCtx.componentDefs[compDef.Name] = compDef + // set the componentDef and serviceVersion of template as resolved + transCtx.shardings[i].Template.ComponentDef = compDef.Name + transCtx.shardings[i].Template.ServiceVersion = serviceVersion + } + } + return nil +} + +func (t *clusterNormalizationTransformer) resolveShardingNCompDefinition(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding) (*appsv1.ShardingDefinition, *appsv1.ComponentDefinition, string, error) { + comp, err := t.firstShardingComponent(transCtx, sharding) + if err != nil { + return nil, nil, "", err + } + + var shardingDef *appsv1.ShardingDefinition + shardingDefName := t.shardingDefinitionName(sharding, comp) + if len(shardingDefName) > 0 { + shardingDef, err = resolveShardingDefinition(transCtx.Context, transCtx.Client, shardingDefName) + if err != nil { + return nil, nil, "", err + } + } + + spec := sharding.Template + compDef, serviceVersion, err := t.resolveCompDefinitionNServiceVersionWithComp(transCtx, &spec, comp) + if err != nil { + return nil, nil, "", err + } + + return shardingDef, compDef, serviceVersion, err +} + +func (t *clusterNormalizationTransformer) firstShardingComponent(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding) (*appsv1.Component, error) { + var ( + ctx = transCtx.Context + cli = transCtx.Client + cluster = transCtx.Cluster + ) + + compList := &appsv1.ComponentList{} + ml := client.MatchingLabels{ + constant.AppInstanceLabelKey: cluster.Name, + constant.KBAppShardingNameLabelKey: sharding.Name, + } + if err := cli.List(ctx, compList, client.InNamespace(cluster.Namespace), ml, client.Limit(1)); err != nil { + return nil, err + } + if len(compList.Items) == 0 { + return nil, nil + } + return &compList.Items[0], nil +} + +func (t *clusterNormalizationTransformer) shardingDefinitionName(sharding *appsv1.ClusterSharding, comp *appsv1.Component) string { + if comp != nil { + shardingDefName, ok := comp.Labels[constant.ShardingDefLabelKey] + if ok { + return shardingDefName + } + } + return sharding.ShardingDef +} + +func (t *clusterNormalizationTransformer) resolveDefinitions4Components(transCtx *clusterTransformContext) error { + if transCtx.componentDefs == nil { + transCtx.componentDefs = make(map[string]*appsv1.ComponentDefinition) + } + for i, compSpec := range transCtx.components { + compDef, serviceVersion, err := t.resolveCompDefinitionNServiceVersion(transCtx, compSpec) + if err != nil { + return err + } + transCtx.componentDefs[compDef.Name] = compDef + // set the componentDef and serviceVersion as resolved + transCtx.components[i].ComponentDef = compDef.Name + transCtx.components[i].ServiceVersion = serviceVersion + } + return nil +} + +func (t *clusterNormalizationTransformer) resolveCompDefinitionNServiceVersion(transCtx *clusterTransformContext, + compSpec *appsv1.ClusterComponentSpec) (*appsv1.ComponentDefinition, string, error) { + var ( + ctx = transCtx.Context + cli = transCtx.Client + cluster = transCtx.Cluster + ) + comp := &appsv1.Component{} + err := cli.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: component.FullName(cluster.Name, compSpec.Name)}, comp) + if err != nil && !apierrors.IsNotFound(err) { + return nil, "", err + } + + if apierrors.IsNotFound(err) { + return t.resolveCompDefinitionNServiceVersionWithComp(transCtx, compSpec, nil) + } + return t.resolveCompDefinitionNServiceVersionWithComp(transCtx, compSpec, comp) +} + +func (t *clusterNormalizationTransformer) resolveCompDefinitionNServiceVersionWithComp(transCtx *clusterTransformContext, + compSpec *appsv1.ClusterComponentSpec, comp *appsv1.Component) (*appsv1.ComponentDefinition, string, error) { + var ( + ctx = transCtx.Context + cli = transCtx.Client + ) + if comp == nil || t.checkCompUpgrade(compSpec, comp) { + return resolveCompDefinitionNServiceVersion(ctx, cli, compSpec.ComponentDef, compSpec.ServiceVersion) + } + return resolveCompDefinitionNServiceVersion(ctx, cli, comp.Spec.CompDef, comp.Spec.ServiceVersion) +} + +func (t *clusterNormalizationTransformer) checkCompUpgrade(compSpec *appsv1.ClusterComponentSpec, comp *appsv1.Component) bool { + return compSpec.ServiceVersion != comp.Spec.ServiceVersion || compSpec.ComponentDef != comp.Spec.CompDef +} + +func (t *clusterNormalizationTransformer) writeBackCompNShardingSpecs(transCtx *clusterTransformContext) { + if len(transCtx.components) > 0 { + comps := make([]appsv1.ClusterComponentSpec, 0) + for i := range transCtx.components { + comps = append(comps, *transCtx.components[i]) + } + transCtx.Cluster.Spec.ComponentSpecs = comps + } + if len(transCtx.shardings) > 0 { + shardings := make([]appsv1.ClusterSharding, 0) + for i := range transCtx.shardings { + shardings = append(shardings, *transCtx.shardings[i]) + } + transCtx.Cluster.Spec.Shardings = shardings + } +} + +func (t *clusterNormalizationTransformer) validateNBuildAllCompSpecs(transCtx *clusterTransformContext, cluster *appsv1.Cluster) error { + var err error + if err = t.validateCompNShardingUnique(transCtx); err != nil { + return err + } + + if err = t.validateShardingShards(transCtx); err != nil { + return err + } + + transCtx.shardingComps, err = t.buildShardingComps(transCtx, cluster) + if err != nil { + return err + } + return nil +} + +func (t *clusterNormalizationTransformer) validateCompNShardingUnique(transCtx *clusterTransformContext) error { + if len(transCtx.shardings) == 0 || len(transCtx.components) == 0 { + return nil + } + + names := sets.New[string]() + for _, comp := range transCtx.components { + names.Insert(comp.Name) + } + for _, sharding := range transCtx.shardings { + if names.Has(sharding.Name) { + return fmt.Errorf(`duplicate name "%s" between spec.compSpecs and spec.shardings`, sharding.Name) + } + } + return nil +} + +func (t *clusterNormalizationTransformer) validateShardingShards(transCtx *clusterTransformContext) error { + for _, sharding := range transCtx.shardings { + shardingDef, ok := transCtx.shardingDefs[sharding.ShardingDef] + if ok && shardingDef != nil { + if err := validateShardingShards(shardingDef, sharding); err != nil { + return err + } + } + } + return nil +} + +func (t *clusterNormalizationTransformer) buildShardingComps(transCtx *clusterTransformContext, cluster *appsv1.Cluster) (map[string][]*appsv1.ClusterComponentSpec, error) { + shardingComps := make(map[string][]*appsv1.ClusterComponentSpec) + for _, sharding := range transCtx.shardings { + comps, err := controllerutil.GenShardingCompSpecList(transCtx.Context, transCtx.Client, cluster, sharding) + if err != nil { + return nil, err + } + shardingComps[sharding.Name] = comps + } + return shardingComps, nil +} diff --git a/controllers/apps/transformer_cluster_placement.go b/controllers/apps/transformer_cluster_placement.go index 920eeb095c4..4921b46507f 100644 --- a/controllers/apps/transformer_cluster_placement.go +++ b/controllers/apps/transformer_cluster_placement.go @@ -24,6 +24,7 @@ import ( "slices" "strings" + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" @@ -92,8 +93,8 @@ func (t *clusterPlacementTransformer) assign(transCtx *clusterTransformContext) func (t *clusterPlacementTransformer) maxReplicas(transCtx *clusterTransformContext) int { replicas := 0 - for _, comp := range transCtx.ComponentSpecs { - replicas = max(replicas, int(comp.Replicas)) - } + transCtx.traverse(func(spec *appsv1.ClusterComponentSpec) { + replicas = max(replicas, int(spec.Replicas)) + }) return replicas } diff --git a/controllers/apps/transformer_cluster_restore.go b/controllers/apps/transformer_cluster_restore.go index 0cb6543a926..969dfcec564 100644 --- a/controllers/apps/transformer_cluster_restore.go +++ b/controllers/apps/transformer_cluster_restore.go @@ -52,22 +52,22 @@ func (c *clusterRestoreTransformer) Transform(ctx graph.TransformContext, dag *g // when restoring a sharded cluster, it is essential to specify the 'sourceTarget' from which data should be restored for each sharded component. // to achieve this, we allocate the source target for each component using annotations. - for i := range c.Cluster.Spec.ShardingSpecs { - shardingSpec := c.Cluster.Spec.ShardingSpecs[i] - backupSource, ok := backupMap[shardingSpec.Name] + for i := range c.Cluster.Spec.Shardings { + spec := c.Cluster.Spec.Shardings[i] + backupSource, ok := backupMap[spec.Name] if !ok { continue } - backup, err := plan.GetBackupFromClusterAnnotation(c.Context, c.Client, backupSource, shardingSpec.Name, c.Cluster.Namespace) + backup, err := plan.GetBackupFromClusterAnnotation(c.Context, c.Client, backupSource, spec.Name, c.Cluster.Namespace) if err != nil { return err } - if len(backup.Status.Targets) > int(shardingSpec.Shards) { + if len(backup.Status.Targets) > int(spec.Shards) { return intctrlutil.NewErrorf(intctrlutil.ErrorTypeRestoreFailed, `the source targets count of the backup "%s" must be equal to or greater than the count of the shard components "%s"`, - backup.Name, shardingSpec.Name) + backup.Name, spec.Name) } - shardComponents, err := intctrlutil.ListShardingComponents(c.Context, c.Client, c.Cluster, shardingSpec.Name) + shardComponents, err := intctrlutil.ListShardingComponents(c.Context, c.Client, c.Cluster, spec.Name) if err != nil { return err } @@ -84,12 +84,12 @@ func (c *clusterRestoreTransformer) Transform(ctx graph.TransformContext, dag *g if targetName, ok := v.Annotations[constant.BackupSourceTargetAnnotationKey]; ok { compName := v.Labels[constant.KBAppComponentLabelKey] allocateTargetMap[targetName] = compName - c.Annotations[compName][constant.BackupSourceTargetAnnotationKey] = targetName + c.annotations[compName][constant.BackupSourceTargetAnnotationKey] = targetName } } if len(allocateTargetMap) == len(backup.Status.Targets) { // check if the restore is completed when all source target have allocated. - if err = c.cleanupRestoreAnnotationForSharding(dag, shardingSpec.Name, restoreDoneForShardComponents); err != nil { + if err = c.cleanupRestoreAnnotationForSharding(dag, spec.Name, restoreDoneForShardComponents); err != nil { return err } continue @@ -98,11 +98,11 @@ func (c *clusterRestoreTransformer) Transform(ctx graph.TransformContext, dag *g if _, ok = allocateTargetMap[target.Name]; ok { continue } - for _, compSpec := range c.ShardingComponentSpecs[shardingSpec.Name] { - if _, ok = c.Annotations[compSpec.Name][constant.BackupSourceTargetAnnotationKey]; ok { + for _, compSpec := range c.shardingComps[spec.Name] { + if _, ok = c.annotations[compSpec.Name][constant.BackupSourceTargetAnnotationKey]; ok { continue } - c.Annotations[compSpec.Name][constant.BackupSourceTargetAnnotationKey] = target.Name + c.annotations[compSpec.Name][constant.BackupSourceTargetAnnotationKey] = target.Name break } } diff --git a/controllers/apps/transformer_cluster_service.go b/controllers/apps/transformer_cluster_service.go index 537d282b911..16944b42000 100644 --- a/controllers/apps/transformer_cluster_service.go +++ b/controllers/apps/transformer_cluster_service.go @@ -118,7 +118,7 @@ func (t *clusterServiceTransformer) shardingService(cluster *appsv1.Cluster, ser if len(service.ComponentSelector) == 0 { return false, nil } - for _, spec := range cluster.Spec.ShardingSpecs { + for _, spec := range cluster.Spec.Shardings { if spec.Name == service.ComponentSelector { return true, nil } @@ -170,14 +170,14 @@ func (t *clusterServiceTransformer) checkComponentDef(transCtx *clusterTransform selector := service.ComponentSelector checkedCompDef := func(compDefName string) (*appsv1.ComponentDefinition, error) { - compDef, ok := transCtx.ComponentDefs[compDefName] + compDef, ok := transCtx.componentDefs[compDefName] if !ok { return nil, fmt.Errorf("the component definition of service selector is not defined, service: %s, component: %s", service.Name, selector) } return compDef, nil } - for _, spec := range cluster.Spec.ShardingSpecs { + for _, spec := range cluster.Spec.Shardings { if spec.Name == selector { return checkedCompDef(spec.Template.ComponentDef) } diff --git a/controllers/apps/transformer_cluster_service_test.go b/controllers/apps/transformer_cluster_service_test.go index 309296a7dc0..4ccc4f749dc 100644 --- a/controllers/apps/transformer_cluster_service_test.go +++ b/controllers/apps/transformer_cluster_service_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" "github.com/apecloud/kubeblocks/pkg/controllerutil" @@ -91,6 +92,7 @@ var _ = Describe("cluster service transformer test", func() { ObjectMeta: metav1.ObjectMeta{ Namespace: testCtx.DefaultNamespace, Name: clusterServiceName(clusterName, testapps.ServiceNodePortName), + Labels: constant.GetClusterLabels(clusterName), }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeNodePort, @@ -102,7 +104,6 @@ var _ = Describe("cluster service transformer test", func() { } Context("cluster service", func() { - It("deletion", func() { reader.objs = append(reader.objs, clusterNodePortService()) // remove cluster services @@ -157,5 +158,4 @@ var _ = Describe("cluster service transformer test", func() { Expect(graphCli.IsAction(dag, svc, model.ActionUpdatePtr())).Should(BeTrue()) } }) - }) diff --git a/controllers/apps/transformer_cluster_sharding_account.go b/controllers/apps/transformer_cluster_sharding_account.go new file mode 100644 index 00000000000..5af33550264 --- /dev/null +++ b/controllers/apps/transformer_cluster_sharding_account.go @@ -0,0 +1,239 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package apps + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/common" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/builder" + "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/model" +) + +// clusterShardingAccountTransformer handles shared system accounts for sharding. +type clusterShardingAccountTransformer struct{} + +var _ graph.Transformer = &clusterShardingAccountTransformer{} + +func (t *clusterShardingAccountTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { + transCtx, _ := ctx.(*clusterTransformContext) + if model.IsObjectDeleting(transCtx.Cluster) { + return nil + } + + if common.IsCompactMode(transCtx.Cluster.Annotations) { + transCtx.V(1).Info("Cluster is in compact mode, no need to create account related objects", "cluster", client.ObjectKeyFromObject(transCtx.Cluster)) + return nil + } + + graphCli, _ := transCtx.Client.(model.GraphClient) + return t.reconcileShardingAccounts(transCtx, graphCli, dag) +} + +func (t *clusterShardingAccountTransformer) reconcileShardingAccounts(transCtx *clusterTransformContext, + graphCli model.GraphClient, dag *graph.DAG) error { + for _, sharding := range transCtx.shardings { + shardDef, ok := transCtx.shardingDefs[sharding.ShardingDef] + if ok { + for _, account := range shardDef.Spec.SystemAccounts { + if account.Shared != nil && *account.Shared { + if err := t.reconcileShardingAccount(transCtx, graphCli, dag, sharding, account.Name); err != nil { + return err + } + } + } + } + } + return nil +} + +func (t *clusterShardingAccountTransformer) reconcileShardingAccount(transCtx *clusterTransformContext, + graphCli model.GraphClient, dag *graph.DAG, sharding *appsv1.ClusterSharding, accountName string) error { + exist, err := t.checkSystemAccountSecret(transCtx, sharding, accountName) + if err != nil { + return err + } + if !exist { + obj, err := t.newSystemAccountSecret(transCtx, sharding, accountName) + if err != nil { + return err + } + graphCli.Create(dag, obj) + } + + // TODO: update + + t.rewriteSystemAccount(transCtx, sharding, accountName) + + return nil +} + +func (t *clusterShardingAccountTransformer) checkSystemAccountSecret(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding, accountName string) (bool, error) { + var ( + cluster = transCtx.Cluster + ) + secretKey := types.NamespacedName{ + Namespace: cluster.Namespace, + Name: shardingAccountSecretName(cluster.Name, sharding.Name, accountName), + } + secret := &corev1.Secret{} + err := transCtx.GetClient().Get(transCtx.GetContext(), secretKey, secret) + if err != nil && !apierrors.IsNotFound(err) { + return false, err + } + return !apierrors.IsNotFound(err), nil +} + +func (t *clusterShardingAccountTransformer) newSystemAccountSecret(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding, accountName string) (*corev1.Secret, error) { + account, err := t.definedSystemAccount(transCtx, sharding, accountName) + if err != nil { + return nil, err + } + + var password []byte + switch { + case account.SecretRef != nil: + var err error + if password, err = t.getPasswordFromSecret(transCtx, account); err != nil { + return nil, err + } + default: + password = t.buildPassword(account) + } + return t.newAccountSecretWithPassword(transCtx, sharding, accountName, password) +} + +func (t *clusterShardingAccountTransformer) definedSystemAccount(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding, accountName string) (appsv1.SystemAccount, error) { + var compAccount *appsv1.ComponentSystemAccount + for i := range sharding.Template.SystemAccounts { + if sharding.Template.SystemAccounts[i].Name == accountName { + compAccount = &sharding.Template.SystemAccounts[i] + break + } + } + + compDef, ok := transCtx.componentDefs[sharding.Template.ComponentDef] + if !ok || compDef == nil { + return appsv1.SystemAccount{}, fmt.Errorf("component definition %s not found for sharding %s", sharding.Template.ComponentDef, sharding.Name) + } + + override := func(account *appsv1.SystemAccount) appsv1.SystemAccount { + if compAccount != nil { + if compAccount.PasswordConfig != nil { + account.PasswordGenerationPolicy = *compAccount.PasswordConfig + } + account.SecretRef = compAccount.SecretRef + } + return *account + } + + for i, account := range compDef.Spec.SystemAccounts { + if account.Name == accountName { + return override(compDef.Spec.SystemAccounts[i].DeepCopy()), nil + } + } + return appsv1.SystemAccount{}, fmt.Errorf("system account %s not found in component definition %s", accountName, compDef.Name) +} + +func (t *clusterShardingAccountTransformer) getPasswordFromSecret(ctx graph.TransformContext, account appsv1.SystemAccount) ([]byte, error) { + secretKey := types.NamespacedName{ + Namespace: account.SecretRef.Namespace, + Name: account.SecretRef.Name, + } + secret := &corev1.Secret{} + if err := ctx.GetClient().Get(ctx.GetContext(), secretKey, secret); err != nil { + return nil, err + } + if len(secret.Data) == 0 || len(secret.Data[constant.AccountPasswdForSecret]) == 0 { + return nil, fmt.Errorf("referenced account secret has no required credential field") + } + return secret.Data[constant.AccountPasswdForSecret], nil +} + +func (t *clusterShardingAccountTransformer) buildPassword(account appsv1.SystemAccount) []byte { + // TODO: restore + return t.generatePassword(account) +} + +func (t *clusterShardingAccountTransformer) generatePassword(account appsv1.SystemAccount) []byte { + config := account.PasswordGenerationPolicy + passwd, _ := common.GeneratePassword((int)(config.Length), (int)(config.NumDigits), (int)(config.NumSymbols), false, config.Seed) + switch config.LetterCase { + case appsv1.UpperCases: + passwd = strings.ToUpper(passwd) + case appsv1.LowerCases: + passwd = strings.ToLower(passwd) + } + return []byte(passwd) +} + +func (t *clusterShardingAccountTransformer) newAccountSecretWithPassword(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding, accountName string, password []byte) (*corev1.Secret, error) { + var ( + cluster = transCtx.Cluster + ) + shardingLabels := map[string]string{ + constant.KBAppShardingNameLabelKey: sharding.Name, + } + secret := builder.NewSecretBuilder(cluster.Namespace, shardingAccountSecretName(cluster.Name, sharding.Name, accountName)). + AddLabelsInMap(constant.GetClusterLabels(cluster.Name, shardingLabels)). + PutData(constant.AccountNameForSecret, []byte(accountName)). + PutData(constant.AccountPasswdForSecret, password). + SetImmutable(true). + GetObject() + return secret, nil +} + +func (t *clusterShardingAccountTransformer) rewriteSystemAccount(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding, accountName string) { + var ( + cluster = transCtx.Cluster + ) + account := appsv1.ComponentSystemAccount{ + Name: accountName, + SecretRef: &appsv1.ProvisionSecretRef{ + Name: shardingAccountSecretName(cluster.Name, sharding.Name, accountName), + Namespace: cluster.Namespace, + }, + } + for i := range sharding.Template.SystemAccounts { + if sharding.Template.SystemAccounts[i].Name == accountName { + sharding.Template.SystemAccounts[i] = account + return + } + } + sharding.Template.SystemAccounts = []appsv1.ComponentSystemAccount{account} +} + +func shardingAccountSecretName(cluster, sharding, account string) string { + return fmt.Sprintf("%s-%s-%s", cluster, sharding, account) +} diff --git a/controllers/apps/transformer_cluster_shared_account.go b/controllers/apps/transformer_cluster_shared_account.go deleted file mode 100644 index a52932660c3..00000000000 --- a/controllers/apps/transformer_cluster_shared_account.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package apps - -import ( - "strings" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" - "github.com/apecloud/kubeblocks/pkg/common" - "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/controller/builder" - "github.com/apecloud/kubeblocks/pkg/controller/graph" - "github.com/apecloud/kubeblocks/pkg/controller/model" -) - -// clusterSharedAccountTransformer handles the shared system accounts between components in a cluster. -type clusterSharedAccountTransformer struct{} - -var _ graph.Transformer = &clusterSharedAccountTransformer{} - -func (t *clusterSharedAccountTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { - transCtx, _ := ctx.(*clusterTransformContext) - if model.IsObjectDeleting(transCtx.Cluster) { - return nil - } - - if common.IsCompactMode(transCtx.Cluster.Annotations) { - transCtx.V(1).Info("Cluster is in compact mode, no need to create account related objects", "cluster", client.ObjectKeyFromObject(transCtx.Cluster)) - return nil - } - - // currently, we only support shared system account for sharding components - graphCli, _ := transCtx.Client.(model.GraphClient) - return t.reconcileShardingsSharedAccounts(transCtx, graphCli, dag) -} - -func (t *clusterSharedAccountTransformer) reconcileShardingsSharedAccounts(transCtx *clusterTransformContext, - graphCli model.GraphClient, dag *graph.DAG) error { - if len(transCtx.Cluster.Spec.ShardingSpecs) == 0 { - return nil - } - - for _, shardingSpec := range transCtx.Cluster.Spec.ShardingSpecs { - if len(shardingSpec.Template.SystemAccounts) == 0 { - return nil - } - for i, account := range shardingSpec.Template.SystemAccounts { - needCreate, err := t.needCreateSharedAccount(transCtx, &account, shardingSpec) - if err != nil { - return err - } - if !needCreate { - continue - } - if err := t.createNConvertToSharedAccountSecret(transCtx, &account, shardingSpec, graphCli, dag); err != nil { - return err - } - shardingSpec.Template.SystemAccounts[i] = account - } - } - return nil -} - -func (t *clusterSharedAccountTransformer) needCreateSharedAccount(transCtx *clusterTransformContext, - account *appsv1.ComponentSystemAccount, shardingSpec appsv1.ShardingSpec) (bool, error) { - // respect the secretRef if it is set - if account.SecretRef != nil { - return false, nil - } - - // if seed is not set, we consider it does not need to share the same account secret - // TODO: wo may support another way to judge if the need to create shared account secret in the future - if account.PasswordConfig != nil && len(account.PasswordConfig.Seed) == 0 { - return false, nil - } - - secretName := constant.GenerateShardingSharedAccountSecretName(transCtx.Cluster.Name, shardingSpec.Name, account.Name) - if secret, err := t.checkShardingSharedAccountSecretExist(transCtx, transCtx.Cluster, secretName); err != nil { - return false, err - } else if secret != nil { - return false, nil - } - - return true, nil -} - -func (t *clusterSharedAccountTransformer) createNConvertToSharedAccountSecret(transCtx *clusterTransformContext, - account *appsv1.ComponentSystemAccount, shardingSpec appsv1.ShardingSpec, graphCli model.GraphClient, dag *graph.DAG) error { - // Create the shared account secret if it does not exist - secretName := constant.GenerateShardingSharedAccountSecretName(transCtx.Cluster.Name, shardingSpec.Name, account.Name) - secret, err := t.buildAccountSecret(transCtx.Cluster, *account, shardingSpec.Name, secretName) - if err != nil { - return err - } - graphCli.Create(dag, secret) - - // Update account SecretRef to the shared secret - account.SecretRef = &appsv1.ProvisionSecretRef{ - Name: secret.Name, - Namespace: transCtx.Cluster.Namespace, - } - - return nil -} - -func (t *clusterSharedAccountTransformer) checkShardingSharedAccountSecretExist(transCtx *clusterTransformContext, - cluster *appsv1.Cluster, secretName string) (*corev1.Secret, error) { - secretKey := types.NamespacedName{ - Namespace: cluster.Namespace, - Name: secretName, - } - secret := &corev1.Secret{} - err := transCtx.GetClient().Get(transCtx.GetContext(), secretKey, secret) - switch { - case err == nil: - return secret, nil - case apierrors.IsNotFound(err): - return nil, nil - default: - return nil, err - } -} - -func (t *clusterSharedAccountTransformer) buildAccountSecret(cluster *appsv1.Cluster, - account appsv1.ComponentSystemAccount, shardingName, secretName string) (*corev1.Secret, error) { - password := t.generatePassword(account) - return t.buildAccountSecretWithPassword(cluster, account, shardingName, secretName, password) -} - -func (t *clusterSharedAccountTransformer) generatePassword(account appsv1.ComponentSystemAccount) []byte { - config := account.PasswordConfig - passwd, _ := common.GeneratePassword((int)(config.Length), (int)(config.NumDigits), (int)(config.NumSymbols), false, "") - switch config.LetterCase { - case appsv1.UpperCases: - passwd = strings.ToUpper(passwd) - case appsv1.LowerCases: - passwd = strings.ToLower(passwd) - } - return []byte(passwd) -} - -func (t *clusterSharedAccountTransformer) buildAccountSecretWithPassword(cluster *appsv1.Cluster, - account appsv1.ComponentSystemAccount, shardingName, secretName string, password []byte) (*corev1.Secret, error) { - shardingLabels := map[string]string{ - constant.KBAppShardingNameLabelKey: shardingName, - } - secret := builder.NewSecretBuilder(cluster.Namespace, secretName). - AddLabelsInMap(constant.GetClusterLabels(cluster.Name, shardingLabels)). - PutData(constant.AccountNameForSecret, []byte(account.Name)). - PutData(constant.AccountPasswdForSecret, password). - SetImmutable(true). - GetObject() - return secret, nil -} diff --git a/controllers/apps/transformer_cluster_status.go b/controllers/apps/transformer_cluster_status.go index 8f210bfa6e6..d0f8716069e 100644 --- a/controllers/apps/transformer_cluster_status.go +++ b/controllers/apps/transformer_cluster_status.go @@ -22,6 +22,7 @@ package apps import ( "fmt" + "golang.org/x/exp/maps" "k8s.io/apimachinery/pkg/api/meta" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -29,12 +30,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/model" ) -type clusterStatusTransformer struct { - // replicasNotReadyCompNames records the component names that are not ready. - notReadyCompNames map[string]struct{} - // replicasNotReadyCompNames records the component names which replicas are not ready. - replicasNotReadyCompNames map[string]struct{} -} +type clusterStatusTransformer struct{} var _ graph.Transformer = &clusterStatusTransformer{} @@ -44,19 +40,14 @@ func (t *clusterStatusTransformer) Transform(ctx graph.TransformContext, dag *gr cluster := transCtx.Cluster graphCli, _ := transCtx.Client.(model.GraphClient) - updateObservedGeneration := func() { - cluster.Status.ObservedGeneration = cluster.Generation - cluster.Status.ClusterDefGeneration = transCtx.ClusterDef.Generation - } - switch { case origCluster.IsUpdating(): transCtx.Logger.Info(fmt.Sprintf("update cluster status after applying resources, generation: %d", cluster.Generation)) - updateObservedGeneration() + cluster.Status.ObservedGeneration = cluster.Generation t.markClusterDagStatusAction(graphCli, dag, origCluster, cluster) case origCluster.IsStatusUpdating(): defer func() { t.markClusterDagStatusAction(graphCli, dag, origCluster, cluster) }() - // reconcile the phase and conditions of the Cluster.status + // reconcile the phase and conditions of the cluster.status if err := t.reconcileClusterStatus(transCtx, cluster); err != nil { return err } @@ -71,7 +62,7 @@ func (t *clusterStatusTransformer) Transform(ctx graph.TransformContext, dag *gr func (t *clusterStatusTransformer) markClusterDagStatusAction(graphCli model.GraphClient, dag *graph.DAG, origCluster, cluster *appsv1.Cluster) { if vertex := graphCli.FindMatchedVertex(dag, cluster); vertex != nil { - // check if the component needs to do other action. + // check if the cluster needs to do other action. ov, _ := vertex.(*model.ObjectVertex) if ov.Action != model.ActionNoopPtr() { return @@ -80,7 +71,86 @@ func (t *clusterStatusTransformer) markClusterDagStatusAction(graphCli model.Gra graphCli.Status(dag, origCluster, cluster) } -func (t *clusterStatusTransformer) reconcileClusterPhase(cluster *appsv1.Cluster) { +func (t *clusterStatusTransformer) reconcileClusterStatus(transCtx *clusterTransformContext, cluster *appsv1.Cluster) error { + if len(cluster.Status.Components) == 0 && len(cluster.Status.Shardings) == 0 { + return nil + } + + // t.removeDeletedCompNSharding(transCtx, cluster) + + oldPhase := t.reconcileClusterPhase(cluster) + + t.syncClusterConditions(cluster, oldPhase) + + return nil +} + +// func (t *clusterStatusTransformer) removeDeletedCompNSharding(transCtx *clusterTransformContext, cluster *appsv1.Cluster) { +// func() { +// tmp := map[string]appsv1.ClusterComponentStatus{} +// compsStatus := cluster.Status.Components +// for _, v := range transCtx.components { +// if status, ok := compsStatus[v.Name]; ok { +// tmp[v.Name] = status +// } +// } +// cluster.Status.Components = tmp +// }() +// func() { +// tmp := map[string]appsv1.ClusterComponentStatus{} +// shardingsStatus := cluster.Status.Shardings +// for _, v := range transCtx.shardings { +// if status, ok := shardingsStatus[v.Name]; ok { +// tmp[v.Name] = status +// } +// } +// cluster.Status.Shardings = tmp +// }() +// } + +func (t *clusterStatusTransformer) reconcileClusterPhase(cluster *appsv1.Cluster) appsv1.ClusterPhase { + statusList := make([]appsv1.ClusterComponentStatus, 0) + if cluster.Status.Components != nil { + statusList = append(statusList, maps.Values(cluster.Status.Components)...) + } + if cluster.Status.Shardings != nil { + statusList = append(statusList, maps.Values(cluster.Status.Shardings)...) + } + newPhase := composeClusterPhase(statusList) + + phase := cluster.Status.Phase + if newPhase != "" { + cluster.Status.Phase = newPhase + } + return phase +} + +func (t *clusterStatusTransformer) syncClusterConditions(cluster *appsv1.Cluster, oldPhase appsv1.ClusterPhase) { + if cluster.Status.Phase == appsv1.RunningClusterPhase && oldPhase != cluster.Status.Phase { + meta.SetStatusCondition(&cluster.Status.Conditions, newClusterReadyCondition(cluster.Name)) + return + } + + kindNames := map[string][]string{} + for kind, statusMap := range map[string]map[string]appsv1.ClusterComponentStatus{ + "component": cluster.Status.Components, + "sharding": cluster.Status.Shardings, + } { + for name, status := range statusMap { + if status.Phase == appsv1.AbnormalClusterCompPhase || status.Phase == appsv1.FailedClusterCompPhase { + if _, ok := kindNames[kind]; !ok { + kindNames[kind] = []string{} + } + kindNames[kind] = append(kindNames[kind], name) + } + } + } + if len(kindNames) > 0 { + meta.SetStatusCondition(&cluster.Status.Conditions, newClusterNotReadyCondition(cluster.Name, kindNames)) + } +} + +func composeClusterPhase(statusList []appsv1.ClusterComponentStatus) appsv1.ClusterPhase { var ( isAllComponentCreating = true isAllComponentRunning = true @@ -98,7 +168,7 @@ func (t *clusterStatusTransformer) reconcileClusterPhase(cluster *appsv1.Cluster } return false } - for _, status := range cluster.Status.Components { + for _, status := range statusList { phase := status.Phase if !isPhaseIn(phase, appsv1.CreatingClusterCompPhase) { isAllComponentCreating = false @@ -106,9 +176,7 @@ func (t *clusterStatusTransformer) reconcileClusterPhase(cluster *appsv1.Cluster if !isPhaseIn(phase, appsv1.RunningClusterCompPhase) { isAllComponentRunning = false } - if !isPhaseIn(phase, appsv1.CreatingClusterCompPhase, - appsv1.RunningClusterCompPhase, - appsv1.UpdatingClusterCompPhase) { + if !isPhaseIn(phase, appsv1.CreatingClusterCompPhase, appsv1.RunningClusterCompPhase, appsv1.UpdatingClusterCompPhase) { isAllComponentWorking = false } if isPhaseIn(phase, appsv1.StoppingClusterCompPhase) { @@ -127,125 +195,20 @@ func (t *clusterStatusTransformer) reconcileClusterPhase(cluster *appsv1.Cluster switch { case isAllComponentRunning: - if cluster.Status.Phase != appsv1.RunningClusterPhase { - t.syncClusterPhaseToRunning(cluster) - } + return appsv1.RunningClusterPhase case isAllComponentCreating: - cluster.Status.Phase = appsv1.CreatingClusterPhase + return appsv1.CreatingClusterPhase case isAllComponentWorking: - cluster.Status.Phase = appsv1.UpdatingClusterPhase + return appsv1.UpdatingClusterPhase case isAllComponentStopped: - if cluster.Status.Phase != appsv1.StoppedClusterPhase { - t.syncClusterPhaseToStopped(cluster) - } + return appsv1.StoppedClusterPhase case hasComponentStopping: - cluster.Status.Phase = appsv1.StoppingClusterPhase + return appsv1.StoppingClusterPhase case isAllComponentFailed: - cluster.Status.Phase = appsv1.FailedClusterPhase + return appsv1.FailedClusterPhase case hasComponentAbnormalOrFailed: - cluster.Status.Phase = appsv1.AbnormalClusterPhase + return appsv1.AbnormalClusterPhase default: - // nothing + return "" } } - -// reconcileClusterStatus reconciles phase and conditions of the Cluster.status. -func (t *clusterStatusTransformer) reconcileClusterStatus(transCtx *clusterTransformContext, cluster *appsv1.Cluster) error { - if len(cluster.Status.Components) == 0 { - return nil - } - initClusterStatusParams := func() { - t.notReadyCompNames = map[string]struct{}{} - t.replicasNotReadyCompNames = map[string]struct{}{} - } - initClusterStatusParams() - - // removes the invalid component of status.components which is deleted from spec.components. - t.removeInvalidCompStatus(transCtx, cluster) - - // do analysis of Cluster.Status.component and update the results to status synchronizer. - t.doAnalysisAndUpdateSynchronizer(cluster) - - // handle the ready condition. - t.syncReadyConditionForCluster(cluster) - - // sync the cluster phase. - t.reconcileClusterPhase(cluster) - - // removes the component of status.components which is created by simplified API. - t.removeInnerCompStatus(transCtx, cluster) - return nil -} - -// removeInvalidCompStatus removes the invalid component of status.components which is deleted from spec.components. -func (t *clusterStatusTransformer) removeInvalidCompStatus(transCtx *clusterTransformContext, cluster *appsv1.Cluster) { - // removes deleted components and keeps created components by simplified API - t.removeCompStatus(cluster, transCtx.ComponentSpecs) -} - -// removeInnerCompStatus removes the component of status.components which is created by simplified API. -func (t *clusterStatusTransformer) removeInnerCompStatus(transCtx *clusterTransformContext, cluster *appsv1.Cluster) { - compSpecs := make([]*appsv1.ClusterComponentSpec, 0) - for i := range cluster.Spec.ComponentSpecs { - compSpecs = append(compSpecs, &cluster.Spec.ComponentSpecs[i]) - } - // TODO: how to display the status of sharding components - /* for _, v := range transCtx.ShardingComponentSpecs { - compSpecs = append(compSpecs, v...) - }*/ - t.removeCompStatus(cluster, compSpecs) -} - -// removeCompStatus removes the component of status.components which is not in comp specs. -func (t *clusterStatusTransformer) removeCompStatus(cluster *appsv1.Cluster, compSpecs []*appsv1.ClusterComponentSpec) { - tmpCompsStatus := map[string]appsv1.ClusterComponentStatus{} - compsStatus := cluster.Status.Components - for _, v := range compSpecs { - if compStatus, ok := compsStatus[v.Name]; ok { - tmpCompsStatus[v.Name] = compStatus - } - } - // keep valid components' status - cluster.Status.Components = tmpCompsStatus -} - -// doAnalysisAndUpdateSynchronizer analyzes the Cluster.Status.Components and updates the results to the synchronizer. -func (t *clusterStatusTransformer) doAnalysisAndUpdateSynchronizer(cluster *appsv1.Cluster) { - // analysis the status of components and calculate the cluster phase. - for k, v := range cluster.Status.Components { - // if v.PodsReady == nil || !*v.PodsReady { - // t.replicasNotReadyCompNames[k] = struct{}{} - // t.notReadyCompNames[k] = struct{}{} - // } - switch v.Phase { - case appsv1.AbnormalClusterCompPhase, appsv1.FailedClusterCompPhase: - t.notReadyCompNames[k] = struct{}{} - } - } -} - -// syncReadyConditionForCluster syncs the cluster conditions with ClusterReady and ReplicasReady type. -func (t *clusterStatusTransformer) syncReadyConditionForCluster(cluster *appsv1.Cluster) { - if len(t.replicasNotReadyCompNames) == 0 { - // if all replicas of cluster are ready, set ReasonAllReplicasReady to status.conditions - readyCondition := newAllReplicasPodsReadyConditions() - meta.SetStatusCondition(&cluster.Status.Conditions, readyCondition) - } else { - meta.SetStatusCondition(&cluster.Status.Conditions, newReplicasNotReadyCondition(t.replicasNotReadyCompNames)) - } - - if len(t.notReadyCompNames) > 0 { - meta.SetStatusCondition(&cluster.Status.Conditions, newComponentsNotReadyCondition(t.notReadyCompNames)) - } -} - -// syncClusterPhaseToRunning syncs the cluster phase to Running. -func (t *clusterStatusTransformer) syncClusterPhaseToRunning(cluster *appsv1.Cluster) { - cluster.Status.Phase = appsv1.RunningClusterPhase - meta.SetStatusCondition(&cluster.Status.Conditions, newClusterReadyCondition(cluster.Name)) -} - -// syncClusterPhaseToStopped syncs the cluster phase to Stopped. -func (t *clusterStatusTransformer) syncClusterPhaseToStopped(cluster *appsv1.Cluster) { - cluster.Status.Phase = appsv1.StoppedClusterPhase -} diff --git a/controllers/apps/transformer_cluster_load_resources.go b/controllers/apps/transformer_cluster_validation.go similarity index 75% rename from controllers/apps/transformer_cluster_load_resources.go rename to controllers/apps/transformer_cluster_validation.go index 07ac4103f3b..e92e186962d 100644 --- a/controllers/apps/transformer_cluster_load_resources.go +++ b/controllers/apps/transformer_cluster_validation.go @@ -31,12 +31,12 @@ import ( "github.com/apecloud/kubeblocks/pkg/generics" ) -// clusterLoadRefResourcesTransformer loads and validates referenced resources (cd & cv). -type clusterLoadRefResourcesTransformer struct{} +// clusterValidationTransformer validates the cluster spec. +type clusterValidationTransformer struct{} -var _ graph.Transformer = &clusterLoadRefResourcesTransformer{} +var _ graph.Transformer = &clusterValidationTransformer{} -func (t *clusterLoadRefResourcesTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { +func (t *clusterValidationTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { transCtx, _ := ctx.(*clusterTransformContext) cluster := transCtx.Cluster @@ -55,7 +55,7 @@ func (t *clusterLoadRefResourcesTransformer) Transform(ctx graph.TransformContex return newRequeueError(requeueDuration, err.Error()) } - if err = t.checkAllCompDefinition(cluster); err != nil { + if err = t.checkDefinitionNamePattern(cluster); err != nil { return newRequeueError(requeueDuration, err.Error()) } @@ -69,39 +69,41 @@ func (t *clusterLoadRefResourcesTransformer) Transform(ctx graph.TransformContex return nil } -func (t *clusterLoadRefResourcesTransformer) apiValidation(cluster *appsv1.Cluster) error { - if withClusterTopology(cluster) || - withClusterUserDefined(cluster) { +func (t *clusterValidationTransformer) apiValidation(cluster *appsv1.Cluster) error { + if withClusterTopology(cluster) || withClusterUserDefined(cluster) { return nil } return fmt.Errorf("cluster API validate error, clusterDef: %s, topology: %s, comps: %d", cluster.Spec.ClusterDef, cluster.Spec.Topology, clusterCompCnt(cluster)) } -func (t *clusterLoadRefResourcesTransformer) checkAllCompDefinition(cluster *appsv1.Cluster) error { - validate := func(spec appsv1.ClusterComponentSpec) error { - if len(spec.ComponentDef) > 0 { - if err := component.ValidateCompDefRegexp(spec.ComponentDef); err != nil { - return errors.Wrapf(err, "invalid reference component definition name pattern: %s", spec.ComponentDef) +func (t *clusterValidationTransformer) checkDefinitionNamePattern(cluster *appsv1.Cluster) error { + validate := func(name string) error { + if len(name) > 0 { + if err := component.ValidateDefNameRegexp(name); err != nil { + return errors.Wrapf(err, "invalid reference component/sharding definition name: %s", name) } } return nil } for _, compSpec := range cluster.Spec.ComponentSpecs { - if err := validate(compSpec); err != nil { + if err := validate(compSpec.ComponentDef); err != nil { return err } } - for _, shardingSpec := range cluster.Spec.ShardingSpecs { - if err := validate(shardingSpec.Template); err != nil { + for _, spec := range cluster.Spec.Shardings { + if err := validate(spec.ShardingDef); err != nil { + return err + } + if err := validate(spec.Template.ComponentDef); err != nil { return err } } return nil } -func (t *clusterLoadRefResourcesTransformer) checkNUpdateClusterTopology(transCtx *clusterTransformContext, cluster *appsv1.Cluster) error { - clusterTopology := referredClusterTopology(transCtx.ClusterDef, cluster.Spec.Topology) +func (t *clusterValidationTransformer) checkNUpdateClusterTopology(transCtx *clusterTransformContext, cluster *appsv1.Cluster) error { + clusterTopology := referredClusterTopology(transCtx.clusterDef, cluster.Spec.Topology) if clusterTopology == nil { return fmt.Errorf("specified cluster topology not found: %s", cluster.Spec.Topology) } @@ -143,7 +145,7 @@ func loadNCheckClusterDefinition(transCtx *clusterTransformContext, cluster *app if cd == nil { cd = &appsv1.ClusterDefinition{} } - transCtx.ClusterDef = cd + transCtx.clusterDef = cd return nil } @@ -165,7 +167,7 @@ func clusterCompCnt(cluster *appsv1.Cluster) int { func clusterCompCntWithFunc(cluster *appsv1.Cluster, match func(spec appsv1.ClusterComponentSpec) bool) int { cnt := generics.CountFunc(cluster.Spec.ComponentSpecs, match) - for _, sharding := range cluster.Spec.ShardingSpecs { + for _, sharding := range cluster.Spec.Shardings { if match(sharding.Template) { cnt += int(sharding.Shards) } diff --git a/controllers/apps/transformer_cluster_load_resources_test.go b/controllers/apps/transformer_cluster_validation_test.go similarity index 96% rename from controllers/apps/transformer_cluster_load_resources_test.go rename to controllers/apps/transformer_cluster_validation_test.go index bf9431a2425..ea7951c6232 100644 --- a/controllers/apps/transformer_cluster_load_resources_test.go +++ b/controllers/apps/transformer_cluster_validation_test.go @@ -26,7 +26,7 @@ import ( appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" ) -var _ = Describe("cluster load resources transformer test", func() { +var _ = Describe("cluster validation transformer test", func() { Context("cluster api validation", func() { It("with cluster topology", func() { By("explicitly specify topology") diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml index c0d7783c79b..749cf133be5 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml @@ -39,13 +39,14 @@ spec: offering a variety of topological configurations to meet diverse deployment needs and scenarios. - It includes a list of Components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. + It includes a list of Components and/or Shardings, each linked to a ComponentDefinition or a ShardingDefinition, + which enhances reusability and reduce redundancy. For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, simplifying the setup of new systems. - Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for Components, - ensuring a controlled and predictable management of component lifecycles. + Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown between Components and/or Shardings, + ensuring a controlled and predictable management of cluster lifecycles. properties: apiVersion: description: |- @@ -111,7 +112,6 @@ spec: - name type: object maxItems: 128 - minItems: 1 type: array default: description: |- @@ -132,40 +132,80 @@ spec: properties: provision: description: |- - Specifies the order for creating and initializing components. - This is designed for components that depend on one another. Components without dependencies can be grouped together. + Specifies the order for creating and initializing entities. + This is designed for entities that depend on one another. Entities without dependencies can be grouped together. - Components that can be provisioned independently or have no dependencies can be listed together in the same stage, + Entities that can be provisioned independently or have no dependencies can be listed together in the same stage, separated by commas. items: type: string type: array terminate: description: |- - Outlines the order for stopping and deleting components. - This sequence is designed for components that require a graceful shutdown or have interdependencies. + Outlines the order for stopping and deleting entities. + This sequence is designed for entities that require a graceful shutdown or have interdependencies. - Components that can be terminated independently or have no dependencies can be listed together in the same stage, + Entities that can be terminated independently or have no dependencies can be listed together in the same stage, separated by commas. items: type: string type: array update: description: |- - Update determines the order for updating components' specifications, such as image upgrades or resource scaling. - This sequence is designed for components that have dependencies or require specific update procedures. + Update determines the order for updating entities' specifications, such as image upgrades or resource scaling. + This sequence is designed for entities that have dependencies or require specific update procedures. - Components that can be updated independently or have no dependencies can be listed together in the same stage, + Entities that can be updated independently or have no dependencies can be listed together in the same stage, separated by commas. items: type: string type: array type: object + shardings: + description: Shardings specifies the shardings in the topology. + items: + description: ClusterTopologySharding defines a sharding within + a ClusterTopology. + properties: + name: + description: |- + Defines the unique identifier of the sharding within the cluster topology. + It follows IANA Service naming rules and is used as part of the Service's DNS name. + The name must start with a lowercase letter, can contain lowercase letters, numbers, + and hyphens, and must end with a lowercase letter or number. + + + Cannot be updated once set. + maxLength: 16 + pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ + type: string + shardingDef: + description: |- + Specifies the sharding definition that defines the characteristics and behavior of the sharding. + + + The system selects the ShardingDefinition CR with the latest version that matches the pattern. + This approach allows: + + + 1. Precise selection by providing the exact name of a ShardingDefinition CR. + 2. Flexible and automatic selection of the most up-to-date ShardingDefinition CR + by specifying a regular expression pattern. + + + Once set, this field cannot be updated. + maxLength: 64 + type: string + required: + - name + - shardingDef + type: object + maxItems: 128 + type: array required: - - components - name type: object maxItems: 128 diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index f81831b088c..f6ff2897e8e 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -174,7 +174,7 @@ spec: This field allows for detailed configuration of each Component within the Cluster. - Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. + Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. items: description: ClusterComponentSpec defines the specification of a Component within a Cluster. @@ -3752,7 +3752,7 @@ spec: description: |- Specifies the Component's name. It's part of the Service DNS name and must comply with the IANA service naming rule. - The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), + The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), but required otherwise. maxLength: 22 pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ @@ -8324,7 +8324,7 @@ spec: services: description: |- Defines a list of additional Services that are exposed by a Cluster. - This field allows Services of selected Components, either from `componentSpecs` or `shardingSpecs` to be exposed, + This field allows Services of selected Components, either from `componentSpecs` or `shardings` to be exposed, alongside Services defined with ComponentService. @@ -8736,21 +8736,21 @@ spec: type: object type: array x-kubernetes-preserve-unknown-fields: true - shardingSpecs: + shardings: description: |- - Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. - Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. + Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. + Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations. This field supports dynamic resharding by facilitating the addition or removal of shards - through the `shards` field in ShardingSpec. + through the `shards` field in ClusterSharding. - Note: `shardingSpecs` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. + Note: `shardings` and `componentSpecs` cannot both be empty; at least one must be defined to configure a Cluster. items: description: |- - ShardingSpec defines how KubeBlocks manage dynamic provisioned shards. + ClusterSharding defines how KubeBlocks manage dynamic provisioned shards. A typical design pattern for distributed databases is to distribute data across multiple shards, with each shard consisting of multiple replicas. Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components @@ -8760,33 +8760,45 @@ spec: name: description: |- Represents the common parent part of all shard names. + + This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. - It is used to generate the names of underlying Components following the pattern `$(shardingSpec.name)-$(ShardID)`. + It is used to generate the names of underlying Components following the pattern `$(clusterSharding.name)-$(ShardID)`. ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. For example, if the sharding specification name is "my-shard" and the ShardID is "abc", the resulting Component name would be "my-shard-abc". - Note that the name defined in Component template(`shardingSpec.template.name`) will be disregarded - when generating the Component names of the shards. The `shardingSpec.name` field takes precedence. + Note that the name defined in Component template(`clusterSharding.template.name`) will be disregarded + when generating the Component names of the shards. The `clusterSharding.name` field takes precedence. maxLength: 15 pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string x-kubernetes-validations: - message: name is immutable rule: self == oldSelf + shardingDef: + description: |- + Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. + + + The full name or regular expression is supported to match the ShardingDefinition. + maxLength: 64 + type: string shards: description: |- Specifies the desired number of shards. + + Users can declare the desired number of shards through this field. KubeBlocks dynamically creates and deletes Components based on the difference between the desired and actual number of shards. KubeBlocks provides lifecycle management for sharding, including: - - Executing the postProvision Action defined in the ComponentDefinition when the number of shards increases. + - Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. This allows for custom actions to be performed after a new shard is provisioned. - - Executing the preTerminate Action defined in the ComponentDefinition when the number of shards decreases. + - Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. This enables custom cleanup or data migration tasks to be executed before a shard is terminated. Resources and data associated with the corresponding Component will also be deleted. format: int32 @@ -8796,9 +8808,11 @@ spec: template: description: |- The template for generating Components for shards, where each shard consists of one Component. + + This field is of type ClusterComponentSpec, which encapsulates all the required details and definitions for creating and managing the Components. - KubeBlocks uses this template to generate a set of identical Components or shards. + KubeBlocks uses this template to generate a set of identical Components of shards. All the generated Components will have the same specifications and definitions as specified in the `template` field. @@ -12429,7 +12443,7 @@ spec: description: |- Specifies the Component's name. It's part of the Service DNS name and must comply with the IANA service naming rule. - The name is optional when ClusterComponentSpec is used as a template (e.g., in `shardingSpec`), + The name is optional when ClusterComponentSpec is used as a template (e.g., in `clusterSharding`), but required otherwise. maxLength: 22 pattern: ^[a-z]([a-z0-9\-]*[a-z0-9])?$ @@ -15918,10 +15932,6 @@ spec: status: description: ClusterStatus defines the observed state of the Cluster. properties: - clusterDefGeneration: - description: Represents the generation number of the referenced ClusterDefinition. - format: int64 - type: integer components: additionalProperties: description: ClusterComponentStatus records Component status. @@ -16048,6 +16058,33 @@ spec: - Failed - Abnormal type: string + shardings: + additionalProperties: + description: ClusterComponentStatus records Component status. + properties: + message: + additionalProperties: + type: string + description: |- + Records detailed information about the Component in its current phase. + The keys are either podName, deployName, or statefulSetName, formatted as 'ObjectKind/Name'. + type: object + phase: + description: Specifies the current state of the Component. + enum: + - Creating + - Running + - Updating + - Stopping + - Stopped + - Deleting + - Failed + - Abnormal + type: string + type: object + description: Records the current status information of all shardings + within the Cluster. + type: object type: object type: object served: true diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md index 20f8c1864e1..bafdf2a1901 100644 --- a/docs/developer_docs/api-reference/cluster.md +++ b/docs/developer_docs/api-reference/cluster.md @@ -200,26 +200,26 @@ The WipeOut policy is particularly risky in production environments (Optional)

Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. This field allows for detailed configuration of each Component within the Cluster.

-

Note: shardingSpecs and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

+

Note: shardings and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

-shardingSpecs
+shardings
- -[]ShardingSpec + +[]ClusterSharding (Optional) -

Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. -Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. +

Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. +Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations.

This field supports dynamic resharding by facilitating the addition or removal of shards -through the shards field in ShardingSpec.

-

Note: shardingSpecs and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

+through the shards field in ClusterSharding.

+

Note: shardings and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

@@ -260,7 +260,7 @@ SchedulingPolicy (Optional)

Defines a list of additional Services that are exposed by a Cluster. -This field allows Services of selected Components, either from componentSpecs or shardingSpecs to be exposed, +This field allows Services of selected Components, either from componentSpecs or shardings to be exposed, alongside Services defined with ComponentService.

Services defined here can be referenced by other clusters using the ServiceRefClusterSelector.

@@ -301,11 +301,12 @@ ClusterStatus

ClusterDefinition defines the topology for databases or storage systems, offering a variety of topological configurations to meet diverse deployment needs and scenarios.

-

It includes a list of Components, each linked to a ComponentDefinition, which enhances reusability and reduce redundancy. +

It includes a list of Components and/or Shardings, each linked to a ComponentDefinition or a ShardingDefinition, +which enhances reusability and reduce redundancy. For example, widely used components such as etcd and Zookeeper can be defined once and reused across multiple ClusterDefinitions, simplifying the setup of new systems.

-

Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown for Components, -ensuring a controlled and predictable management of component lifecycles.

+

Additionally, ClusterDefinition also specifies the sequence of startup, upgrade, and shutdown between Components and/or Shardings, +ensuring a controlled and predictable management of cluster lifecycles.

@@ -2406,7 +2407,7 @@ If set to true, a separate Service will be created for each Pod in the Cluster.<

ClusterComponentSpec

-(Appears on:ClusterSpec, ShardingSpec) +(Appears on:ClusterSharding, ClusterSpec)

ClusterComponentSpec defines the specification of a Component within a Cluster.

@@ -2430,7 +2431,7 @@ string (Optional)

Specifies the Component’s name. It’s part of the Service DNS name and must comply with the IANA service naming rule. -The name is optional when ClusterComponentSpec is used as a template (e.g., in shardingSpec), +The name is optional when ClusterComponentSpec is used as a template (e.g., in clusterSharding), but required otherwise.

@@ -3258,6 +3259,101 @@ string
+

ClusterSharding +

+

+(Appears on:ClusterSpec) +

+
+

ClusterSharding defines how KubeBlocks manage dynamic provisioned shards. +A typical design pattern for distributed databases is to distribute data across multiple shards, +with each shard consisting of multiple replicas. +Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components +using a template when shards are added. +When shards are removed, the corresponding Components are also deleted.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

Represents the common parent part of all shard names.

+

This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. +It is used to generate the names of underlying Components following the pattern $(clusterSharding.name)-$(ShardID). +ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. +For example, if the sharding specification name is “my-shard” and the ShardID is “abc”, the resulting Component name +would be “my-shard-abc”.

+

Note that the name defined in Component template(clusterSharding.template.name) will be disregarded +when generating the Component names of the shards. The clusterSharding.name field takes precedence.

+
+shardingDef
+ +string + +
+(Optional) +

Specifies the ShardingDefinition custom resource (CR) that defines the sharding’s characteristics and behavior.

+

The full name or regular expression is supported to match the ShardingDefinition.

+
+template
+ + +ClusterComponentSpec + + +
+

The template for generating Components for shards, where each shard consists of one Component.

+

This field is of type ClusterComponentSpec, which encapsulates all the required details and +definitions for creating and managing the Components. +KubeBlocks uses this template to generate a set of identical Components of shards. +All the generated Components will have the same specifications and definitions as specified in the template field.

+

This allows for the creation of multiple Components with consistent configurations, +enabling sharding and distribution of workloads across Components.

+
+shards
+ +int32 + +
+

Specifies the desired number of shards.

+

Users can declare the desired number of shards through this field. +KubeBlocks dynamically creates and deletes Components based on the difference +between the desired and actual number of shards. +KubeBlocks provides lifecycle management for sharding, including:

+
    +
  • Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. +This allows for custom actions to be performed after a new shard is provisioned.
  • +
  • Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. +This enables custom cleanup or data migration tasks to be executed before a shard is terminated. +Resources and data associated with the corresponding Component will also be deleted.
  • +
+

ClusterSpec

@@ -3355,26 +3451,26 @@ The WipeOut policy is particularly risky in production environments (Optional)

Specifies a list of ClusterComponentSpec objects used to define the individual Components that make up a Cluster. This field allows for detailed configuration of each Component within the Cluster.

-

Note: shardingSpecs and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

+

Note: shardings and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

-shardingSpecs
+shardings
- -[]ShardingSpec + +[]ClusterSharding (Optional) -

Specifies a list of ShardingSpec objects that manage the sharding topology for Cluster Components. -Each ShardingSpec organizes components into shards, with each shard corresponding to a Component. +

Specifies a list of ClusterSharding objects that manage the sharding topology for Cluster Components. +Each ClusterSharding organizes components into shards, with each shard corresponding to a Component. Components within a shard are all based on a common ClusterComponentSpec template, ensuring uniform configurations.

This field supports dynamic resharding by facilitating the addition or removal of shards -through the shards field in ShardingSpec.

-

Note: shardingSpecs and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

+through the shards field in ClusterSharding.

+

Note: shardings and componentSpecs cannot both be empty; at least one must be defined to configure a Cluster.

@@ -3415,7 +3511,7 @@ SchedulingPolicy (Optional)

Defines a list of additional Services that are exposed by a Cluster. -This field allows Services of selected Components, either from componentSpecs or shardingSpecs to be exposed, +This field allows Services of selected Components, either from componentSpecs or shardings to be exposed, alongside Services defined with ComponentService.

Services defined here can be referenced by other clusters using the ServiceRefClusterSelector.

@@ -3507,14 +3603,16 @@ map[string]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentStatus -clusterDefGeneration
+shardings
-int64 + +map[string]github.com/apecloud/kubeblocks/apis/apps/v1.ClusterComponentStatus + (Optional) -

Represents the generation number of the referenced ClusterDefinition.

+

Records the current status information of all shardings within the Cluster.

@@ -3575,11 +3673,26 @@ Cannot be updated.

+(Optional)

Components specifies the components in the topology.

+shardings
+ + +[]ClusterTopologySharding + + + + +(Optional) +

Shardings specifies the shardings in the topology.

+ + + + orders
@@ -3691,9 +3804,9 @@ These groups are processed sequentially, allowing precise control based on compo (Optional) -

Specifies the order for creating and initializing components. -This is designed for components that depend on one another. Components without dependencies can be grouped together.

-

Components that can be provisioned independently or have no dependencies can be listed together in the same stage, +

Specifies the order for creating and initializing entities. +This is designed for entities that depend on one another. Entities without dependencies can be grouped together.

+

Entities that can be provisioned independently or have no dependencies can be listed together in the same stage, separated by commas.

@@ -3706,9 +3819,9 @@ separated by commas.

(Optional) -

Outlines the order for stopping and deleting components. -This sequence is designed for components that require a graceful shutdown or have interdependencies.

-

Components that can be terminated independently or have no dependencies can be listed together in the same stage, +

Outlines the order for stopping and deleting entities. +This sequence is designed for entities that require a graceful shutdown or have interdependencies.

+

Entities that can be terminated independently or have no dependencies can be listed together in the same stage, separated by commas.

@@ -3721,14 +3834,66 @@ separated by commas.

(Optional) -

Update determines the order for updating components’ specifications, such as image upgrades or resource scaling. -This sequence is designed for components that have dependencies or require specific update procedures.

-

Components that can be updated independently or have no dependencies can be listed together in the same stage, +

Update determines the order for updating entities’ specifications, such as image upgrades or resource scaling. +This sequence is designed for entities that have dependencies or require specific update procedures.

+

Entities that can be updated independently or have no dependencies can be listed together in the same stage, separated by commas.

+

ClusterTopologySharding +

+

+(Appears on:ClusterTopology) +

+
+

ClusterTopologySharding defines a sharding within a ClusterTopology.

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

Defines the unique identifier of the sharding within the cluster topology. +It follows IANA Service naming rules and is used as part of the Service’s DNS name. +The name must start with a lowercase letter, can contain lowercase letters, numbers, +and hyphens, and must end with a lowercase letter or number.

+

Cannot be updated once set.

+
+shardingDef
+ +string + +
+

Specifies the sharding definition that defines the characteristics and behavior of the sharding.

+

The system selects the ShardingDefinition CR with the latest version that matches the pattern. +This approach allows:

+
    +
  1. Precise selection by providing the exact name of a ShardingDefinition CR.
  2. +
  3. Flexible and automatic selection of the most up-to-date ShardingDefinition CR +by specifying a regular expression pattern.
  4. +
+

Once set, this field cannot be updated.

+

ClusterVarSelector

@@ -9773,88 +9938,6 @@ Action -

ShardingSpec -

-

-(Appears on:ClusterSpec) -

-
-

ShardingSpec defines how KubeBlocks manage dynamic provisioned shards. -A typical design pattern for distributed databases is to distribute data across multiple shards, -with each shard consisting of multiple replicas. -Therefore, KubeBlocks supports representing a shard with a Component and dynamically instantiating Components -using a template when shards are added. -When shards are removed, the corresponding Components are also deleted.

-
- - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-name
- -string - -
-

Represents the common parent part of all shard names. -This identifier is included as part of the Service DNS name and must comply with IANA service naming rules. -It is used to generate the names of underlying Components following the pattern $(shardingSpec.name)-$(ShardID). -ShardID is a random string that is appended to the Name to generate unique identifiers for each shard. -For example, if the sharding specification name is “my-shard” and the ShardID is “abc”, the resulting Component name -would be “my-shard-abc”.

-

Note that the name defined in Component template(shardingSpec.template.name) will be disregarded -when generating the Component names of the shards. The shardingSpec.name field takes precedence.

-
-template
- - -ClusterComponentSpec - - -
-

The template for generating Components for shards, where each shard consists of one Component. -This field is of type ClusterComponentSpec, which encapsulates all the required details and -definitions for creating and managing the Components. -KubeBlocks uses this template to generate a set of identical Components or shards. -All the generated Components will have the same specifications and definitions as specified in the template field.

-

This allows for the creation of multiple Components with consistent configurations, -enabling sharding and distribution of workloads across Components.

-
-shards
- -int32 - -
-

Specifies the desired number of shards. -Users can declare the desired number of shards through this field. -KubeBlocks dynamically creates and deletes Components based on the difference -between the desired and actual number of shards. -KubeBlocks provides lifecycle management for sharding, including:

-
    -
  • Executing the postProvision Action defined in the ComponentDefinition when the number of shards increases. -This allows for custom actions to be performed after a new shard is provisioned.
  • -
  • Executing the preTerminate Action defined in the ComponentDefinition when the number of shards decreases. -This enables custom cleanup or data migration tasks to be executed before a shard is terminated. -Resources and data associated with the corresponding Component will also be deleted.
  • -
-

ShardingSystemAccount

diff --git a/pkg/constant/pattern.go b/pkg/constant/pattern.go index e47d8266943..3e2255b2dde 100644 --- a/pkg/constant/pattern.go +++ b/pkg/constant/pattern.go @@ -35,11 +35,6 @@ func GenerateAccountSecretName(clusterName, compName, name string) string { return fmt.Sprintf("%s-%s-account-%s", clusterName, compName, replacedName) } -// GenerateShardingSharedAccountSecretName generates the sharding shared account secret name -func GenerateShardingSharedAccountSecretName(clusterName, shardingName, accountName string) string { - return fmt.Sprintf("%s-%s-%s", clusterName, shardingName, accountName) -} - // GenerateClusterServiceName generates the service name for cluster. func GenerateClusterServiceName(clusterName, svcName string) string { if len(svcName) > 0 { diff --git a/pkg/controller/component/component.go b/pkg/controller/component/component.go index 6081ae2af02..ce4ca03fb52 100644 --- a/pkg/controller/component/component.go +++ b/pkg/controller/component/component.go @@ -25,7 +25,6 @@ import ( "strconv" "strings" - "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -162,32 +161,6 @@ func GetCompNCompDefByName(ctx context.Context, cli client.Reader, namespace, fu return comp, compDef, nil } -// ListClusterComponents lists the components of the cluster. -func ListClusterComponents(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster) ([]appsv1.Component, error) { - compList := &appsv1.ComponentList{} - if err := cli.List(ctx, compList, client.InNamespace(cluster.Namespace), client.MatchingLabels{constant.AppInstanceLabelKey: cluster.Name}); err != nil { - return nil, err - } - return compList.Items, nil -} - -// GetClusterComponentShortNameSet gets the component short name set of the cluster. -func GetClusterComponentShortNameSet(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster) (sets.Set[string], error) { - compList, err := ListClusterComponents(ctx, cli, cluster) - if err != nil { - return nil, err - } - compSet := sets.Set[string]{} - for _, comp := range compList { - compShortName, err := ShortName(cluster.Name, comp.Name) - if err != nil { - return nil, err - } - compSet.Insert(compShortName) - } - return compSet, nil -} - func GetExporter(componentDef appsv1.ComponentDefinitionSpec) *common.Exporter { if componentDef.Exporter != nil { return &common.Exporter{Exporter: *componentDef.Exporter} diff --git a/pkg/controller/component/synthesize_component.go b/pkg/controller/component/synthesize_component.go index a76248834d5..34e7a04c9c7 100644 --- a/pkg/controller/component/synthesize_component.go +++ b/pkg/controller/component/synthesize_component.go @@ -165,9 +165,9 @@ func buildComp2CompDefs(ctx context.Context, cli client.Reader, cluster *appsv1. } } - // build from shardingSpecs - for _, shardingSpec := range cluster.Spec.ShardingSpecs { - shardingComps, err := intctrlutil.ListShardingComponents(ctx, cli, cluster, shardingSpec.Name) + // build from shardings + for _, spec := range cluster.Spec.Shardings { + shardingComps, err := intctrlutil.ListShardingComponents(ctx, cli, cluster, spec.Name) if err != nil { return nil, err } diff --git a/pkg/controller/component/utils.go b/pkg/controller/component/utils.go index 3633a7c5950..b89c29a5a68 100644 --- a/pkg/controller/component/utils.go +++ b/pkg/controller/component/utils.go @@ -38,13 +38,13 @@ func inDataContext() *multicluster.ClientOption { return multicluster.InDataContext() } -func ValidateCompDefRegexp(compDefPattern string) error { - _, err := regexp.Compile(compDefPattern) +func ValidateDefNameRegexp(defNamePattern string) error { + _, err := regexp.Compile(defNamePattern) return err } -func PrefixOrRegexMatched(compDef, compDefPattern string) bool { - if strings.HasPrefix(compDef, compDefPattern) { +func PrefixOrRegexMatched(defName, defNamePattern string) bool { + if strings.HasPrefix(defName, defNamePattern) { return true } @@ -54,17 +54,17 @@ func PrefixOrRegexMatched(compDef, compDefPattern string) bool { } isRegex := false - regex, err := regexp.Compile(compDefPattern) + regex, err := regexp.Compile(defNamePattern) if err == nil { // distinguishing between regular expressions and ordinary strings. - if isRegexpPattern(compDefPattern) { + if isRegexpPattern(defNamePattern) { isRegex = true } } if !isRegex { return false } - return regex.MatchString(compDef) + return regex.MatchString(defName) } func IsHostNetworkEnabled(synthesizedComp *SynthesizedComponent) bool { diff --git a/pkg/controller/component/utils_test.go b/pkg/controller/component/utils_test.go index e4b6c60f095..2a464d150ff 100644 --- a/pkg/controller/component/utils_test.go +++ b/pkg/controller/component/utils_test.go @@ -38,7 +38,7 @@ var _ = Describe("component utils", func() { } for _, expr := range validExpressions { - err := ValidateCompDefRegexp(expr) + err := ValidateDefNameRegexp(expr) Expect(err).Should(BeNil()) } }) @@ -53,7 +53,7 @@ var _ = Describe("component utils", func() { } for _, expr := range invalidExpressions { - err := ValidateCompDefRegexp(expr) + err := ValidateDefNameRegexp(expr) Expect(err).ShouldNot(BeNil()) } }) diff --git a/pkg/controllerutil/cluster_utils.go b/pkg/controllerutil/cluster_utils.go index ae59ad20814..ff779d1daf2 100644 --- a/pkg/controllerutil/cluster_utils.go +++ b/pkg/controllerutil/cluster_utils.go @@ -33,8 +33,8 @@ func GetComponentSpecByName(ctx context.Context, cli client.Reader, if compSpec != nil { return compSpec, nil } - for _, shardingSpec := range cluster.Spec.ShardingSpecs { - shardingCompList, err := listAllShardingCompSpecs(ctx, cli, cluster, &shardingSpec) + for _, sharding := range cluster.Spec.Shardings { + shardingCompList, err := listAllShardingCompSpecs(ctx, cli, cluster, &sharding) if err != nil { return nil, err } diff --git a/pkg/controllerutil/cluster_utils_test.go b/pkg/controllerutil/cluster_utils_test.go index 4b3a17bab0b..92af36d55de 100644 --- a/pkg/controllerutil/cluster_utils_test.go +++ b/pkg/controllerutil/cluster_utils_test.go @@ -73,7 +73,7 @@ var _ = Describe("cluster utils test", func() { cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, ""). SetUID(clusterName). AddComponent(mysqlCompName, compDefName). - AddShardingSpec(mysqlShardingName, compDefName). + AddSharding(mysqlShardingName, "", compDefName). SetShards(0). Create(&testCtx).GetObject() }) diff --git a/pkg/controllerutil/sharding_utils.go b/pkg/controllerutil/sharding_utils.go index b33f2f1af21..08c6712d022 100644 --- a/pkg/controllerutil/sharding_utils.go +++ b/pkg/controllerutil/sharding_utils.go @@ -24,7 +24,6 @@ import ( "fmt" "strings" - "github.com/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -37,10 +36,10 @@ const ( ) func GenShardingCompSpecList(ctx context.Context, cli client.Reader, - cluster *appsv1.Cluster, shardingSpec *appsv1.ShardingSpec) ([]*appsv1.ClusterComponentSpec, error) { + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { compSpecList := make([]*appsv1.ClusterComponentSpec, 0) // list undeleted sharding component specs, the deleting ones are not included - undeletedShardingCompSpecs, err := listUndeletedShardingCompSpecs(ctx, cli, cluster, shardingSpec) + undeletedShardingCompSpecs, err := listUndeletedShardingCompSpecs(ctx, cli, cluster, sharding) if err != nil { return nil, err } @@ -49,14 +48,14 @@ func GenShardingCompSpecList(ctx context.Context, cli client.Reader, for _, existShardingCompSpec := range undeletedShardingCompSpecs { compNameMap[existShardingCompSpec.Name] = existShardingCompSpec.Name } - shardTpl := shardingSpec.Template + shardTpl := sharding.Template switch { - case len(undeletedShardingCompSpecs) == int(shardingSpec.Shards): + case len(undeletedShardingCompSpecs) == int(sharding.Shards): return undeletedShardingCompSpecs, err - case len(undeletedShardingCompSpecs) < int(shardingSpec.Shards): - for i := len(undeletedShardingCompSpecs); i < int(shardingSpec.Shards); i++ { + case len(undeletedShardingCompSpecs) < int(sharding.Shards): + for i := len(undeletedShardingCompSpecs); i < int(sharding.Shards); i++ { shardClusterCompSpec := shardTpl.DeepCopy() - genCompName, err := genRandomShardName(shardingSpec.Name, compNameMap) + genCompName, err := genRandomShardName(sharding.Name, compNameMap) if err != nil { return nil, err } @@ -64,17 +63,17 @@ func GenShardingCompSpecList(ctx context.Context, cli client.Reader, compSpecList = append(compSpecList, shardClusterCompSpec) compNameMap[genCompName] = genCompName } - case len(undeletedShardingCompSpecs) > int(shardingSpec.Shards): + case len(undeletedShardingCompSpecs) > int(sharding.Shards): // TODO: order by? - compSpecList = compSpecList[:int(shardingSpec.Shards)] + compSpecList = compSpecList[:int(sharding.Shards)] } return compSpecList, nil } // listNCheckShardingComponents lists sharding components and checks if the sharding components are correct. It returns undeleted and deleting sharding components. func listNCheckShardingComponents(ctx context.Context, cli client.Reader, - cluster *appsv1.Cluster, shardingSpec *appsv1.ShardingSpec) ([]appsv1.Component, []appsv1.Component, error) { - shardingComps, err := ListShardingComponents(ctx, cli, cluster, shardingSpec.Name) + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]appsv1.Component, []appsv1.Component, error) { + shardingComps, err := ListShardingComponents(ctx, cli, cluster, sharding.Name) if err != nil { return nil, nil, err } @@ -89,9 +88,10 @@ func listNCheckShardingComponents(ctx context.Context, cli client.Reader, } } - if cluster.Generation == cluster.Status.ObservedGeneration && len(undeletedShardingComps) != int(shardingSpec.Shards) { - return nil, nil, errors.New("sharding components are not correct when cluster is not updating") - } + // TODO: ??? + // if cluster.Generation == cluster.Status.ObservedGeneration && len(undeletedShardingComps) != int(sharding.Shards) { + // return nil, nil, errors.New("sharding components are not correct when cluster is not updating") + // } return undeletedShardingComps, deletingShardingComps, nil } @@ -110,28 +110,31 @@ func ListShardingComponents(ctx context.Context, cli client.Reader, } // listUndeletedShardingCompSpecs lists undeleted sharding component specs. -func listUndeletedShardingCompSpecs(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, shardingSpec *appsv1.ShardingSpec) ([]*appsv1.ClusterComponentSpec, error) { - return listShardingCompSpecs(ctx, cli, cluster, shardingSpec, false) +func listUndeletedShardingCompSpecs(ctx context.Context, cli client.Reader, + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { + return listShardingCompSpecs(ctx, cli, cluster, sharding, false) } // listAllShardingCompSpecs lists all sharding component specs, including undeleted and deleting ones. -func listAllShardingCompSpecs(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, shardingSpec *appsv1.ShardingSpec) ([]*appsv1.ClusterComponentSpec, error) { - return listShardingCompSpecs(ctx, cli, cluster, shardingSpec, true) +func listAllShardingCompSpecs(ctx context.Context, cli client.Reader, + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { + return listShardingCompSpecs(ctx, cli, cluster, sharding, true) } // listShardingCompSpecs lists sharding component specs, with an option to include those marked for deletion. -func listShardingCompSpecs(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, shardingSpec *appsv1.ShardingSpec, includeDeleting bool) ([]*appsv1.ClusterComponentSpec, error) { - if shardingSpec == nil { +func listShardingCompSpecs(ctx context.Context, cli client.Reader, + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding, includeDeleting bool) ([]*appsv1.ClusterComponentSpec, error) { + if sharding == nil { return nil, nil } - undeletedShardingComps, deletingShardingComps, err := listNCheckShardingComponents(ctx, cli, cluster, shardingSpec) + undeletedShardingComps, deletingShardingComps, err := listNCheckShardingComponents(ctx, cli, cluster, sharding) if err != nil { return nil, err } compSpecList := make([]*appsv1.ClusterComponentSpec, 0, len(undeletedShardingComps)+len(deletingShardingComps)) - shardTpl := shardingSpec.Template + shardTpl := sharding.Template processComps := func(comps []appsv1.Component) error { for _, comp := range comps { diff --git a/pkg/controllerutil/sharding_utils_test.go b/pkg/controllerutil/sharding_utils_test.go index 0c48ea95a90..e94a9ec9544 100644 --- a/pkg/controllerutil/sharding_utils_test.go +++ b/pkg/controllerutil/sharding_utils_test.go @@ -73,7 +73,7 @@ var _ = Describe("cluster shard component", func() { cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, ""). SetUID(clusterName). AddComponent(mysqlCompName, compDefName). - AddShardingSpec(mysqlShardingName, compDefName). + AddSharding(mysqlShardingName, "", compDefName). SetShards(1). Create(&testCtx).GetObject() }) @@ -90,14 +90,14 @@ var _ = Describe("cluster shard component", func() { compKey := client.ObjectKeyFromObject(mockCompObj) Eventually(testapps.CheckObjExists(&testCtx, compKey, &appsv1.Component{}, true)).Should(Succeed()) - shardingSpec := &appsv1.ShardingSpec{ + sharding := &appsv1.ClusterSharding{ Template: appsv1.ClusterComponentSpec{ Replicas: 2, }, Name: mysqlShardingName, Shards: 2, } - shardingCompSpecList, err := GenShardingCompSpecList(testCtx.Ctx, k8sClient, cluster, shardingSpec) + shardingCompSpecList, err := GenShardingCompSpecList(testCtx.Ctx, k8sClient, cluster, sharding) Expect(err).ShouldNot(HaveOccurred()) Expect(shardingCompSpecList).ShouldNot(BeNil()) Expect(len(shardingCompSpecList)).Should(BeEquivalentTo(2)) diff --git a/pkg/operations/ops_comp_helper.go b/pkg/operations/ops_comp_helper.go index 4481827e24d..64dd1ec9532 100644 --- a/pkg/operations/ops_comp_helper.go +++ b/pkg/operations/ops_comp_helper.go @@ -72,9 +72,9 @@ func (c componentOpsHelper) updateClusterComponentsAndShardings(cluster *appsv1. } } // 1. update the sharding components - for index := range cluster.Spec.ShardingSpecs { - shardingSpec := &cluster.Spec.ShardingSpecs[index] - if err := updateComponentSpecs(&shardingSpec.Template, shardingSpec.Name); err != nil { + for index := range cluster.Spec.Shardings { + sharding := &cluster.Spec.Shardings[index] + if err := updateComponentSpecs(&sharding.Template, sharding.Name); err != nil { return err } } @@ -100,7 +100,7 @@ func (c componentOpsHelper) saveLastConfigurations(opsRes *OpsResource, setLastCompConfiguration(v, lastConfiguration, v.Name) } // 2. record the volumeTemplate of sharding components - for _, v := range opsRes.Cluster.Spec.ShardingSpecs { + for _, v := range opsRes.Cluster.Spec.Shardings { setLastCompConfiguration(v.Template, lastConfiguration, v.Name) } } @@ -127,10 +127,10 @@ func (c componentOpsHelper) cancelComponentOps(ctx context.Context, compSpec := &opsRes.Cluster.Spec.ComponentSpecs[index] rollBackCompSpec(compSpec, lastCompInfos, compSpec.Name) } - // 2. rollback the shardingSpecs - for index := range opsRes.Cluster.Spec.ShardingSpecs { - shardingSpec := &opsRes.Cluster.Spec.ShardingSpecs[index] - rollBackCompSpec(&shardingSpec.Template, lastCompInfos, shardingSpec.Name) + // 2. rollback the shardings + for index := range opsRes.Cluster.Spec.Shardings { + sharding := &opsRes.Cluster.Spec.Shardings[index] + rollBackCompSpec(&sharding.Template, lastCompInfos, sharding.Name) } return cli.Update(ctx, opsRes.Cluster) } @@ -216,19 +216,19 @@ func (c componentOpsHelper) reconcileActionWithComponentOps(reqCtx intctrlutil.R } // 2. handle the sharding status. - for i := range opsRes.Cluster.Spec.ShardingSpecs { - shardingSpec := opsRes.Cluster.Spec.ShardingSpecs[i] - compOps, ok := getCompOps(shardingSpec.Name) + for i := range opsRes.Cluster.Spec.Shardings { + sharding := opsRes.Cluster.Spec.Shardings[i] + compOps, ok := getCompOps(sharding.Name) if !ok { continue } // handle the progress of the components of the sharding. - shardingComps, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, shardingSpec.Name) + shardingComps, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, sharding.Name) if err != nil { return opsRequestPhase, 0, err } for j := range shardingComps { - if err = setProgressResource(&shardingSpec.Template, compOps, + if err = setProgressResource(&sharding.Template, compOps, shardingComps[j].Labels[constant.KBAppComponentLabelKey], true); err != nil { return opsRequestPhase, 0, err } diff --git a/pkg/operations/ops_util.go b/pkg/operations/ops_util.go index 1f893fb72f8..ee4133a3c8b 100644 --- a/pkg/operations/ops_util.go +++ b/pkg/operations/ops_util.go @@ -342,7 +342,7 @@ func getComponentSpecOrShardingTemplate(cluster *appsv1.Cluster, componentName s return &v } } - for _, v := range cluster.Spec.ShardingSpecs { + for _, v := range cluster.Spec.Shardings { if v.Name == componentName { return &v.Template } diff --git a/pkg/operations/restart.go b/pkg/operations/restart.go index 5f52ea3a40e..12df2908c58 100644 --- a/pkg/operations/restart.go +++ b/pkg/operations/restart.go @@ -212,9 +212,9 @@ func (r restartOpsHandler) getCompReplicas(cluster *appsv1.Cluster, compName str if compSpec != nil { return compSpec.Replicas } - shardingSpec := cluster.Spec.GetShardingByName(compName) - if shardingSpec != nil { - return shardingSpec.Template.Replicas + sharding := cluster.Spec.GetShardingByName(compName) + if sharding != nil { + return sharding.Template.Replicas } return 0 } diff --git a/pkg/operations/start.go b/pkg/operations/start.go index 7700a4dd8c4..f83ac4d0b10 100644 --- a/pkg/operations/start.go +++ b/pkg/operations/start.go @@ -63,8 +63,8 @@ func (start StartOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Cl for i := range cluster.Spec.ComponentSpecs { startComp(&cluster.Spec.ComponentSpecs[i]) } - for i := range cluster.Spec.ShardingSpecs { - startComp(&cluster.Spec.ShardingSpecs[i].Template) + for i := range cluster.Spec.Shardings { + startComp(&cluster.Spec.Shardings[i].Template) } return cli.Update(reqCtx.Ctx, cluster) } diff --git a/pkg/operations/stop.go b/pkg/operations/stop.go index 44b945c4b73..35f58ef88e1 100644 --- a/pkg/operations/stop.go +++ b/pkg/operations/stop.go @@ -81,8 +81,8 @@ func (stop StopOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Clie for i := range cluster.Spec.ComponentSpecs { stopComp(&cluster.Spec.ComponentSpecs[i]) } - for i := range cluster.Spec.ShardingSpecs { - stopComp(&cluster.Spec.ShardingSpecs[i].Template) + for i := range cluster.Spec.Shardings { + stopComp(&cluster.Spec.Shardings[i].Template) } return cli.Update(reqCtx.Ctx, cluster) } diff --git a/pkg/operations/upgrade.go b/pkg/operations/upgrade.go index 79f8844abb4..e157dba9dac 100644 --- a/pkg/operations/upgrade.go +++ b/pkg/operations/upgrade.go @@ -198,7 +198,7 @@ func (u upgradeOpsHandler) needUpdateCompDef(upgradeComp opsv1alpha1.UpgradeComp if upgradeComp.ComponentDefinitionName == nil { return false } - // we will ignore the empty ComponentDefinitionName if cluster.Spec.ClusterDef is empty. + // we will ignore the empty ComponentDefinitionName if cluster.Spec.clusterDef is empty. return *upgradeComp.ComponentDefinitionName != "" || (*upgradeComp.ComponentDefinitionName == "" && cluster.Spec.ClusterDef != "") } diff --git a/pkg/operations/volume_expansion.go b/pkg/operations/volume_expansion.go index 9ad5e23299e..a6a99ab4357 100644 --- a/pkg/operations/volume_expansion.go +++ b/pkg/operations/volume_expansion.go @@ -170,17 +170,17 @@ func (ve volumeExpansionOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCt } setVeHelpers(compSpec, compOps, compSpec.Name) } - for _, shardingSpec := range opsRes.Cluster.Spec.ShardingSpecs { - compOps, ok := compOpsHelper.componentOpsSet[shardingSpec.Name] + for _, sharding := range opsRes.Cluster.Spec.Shardings { + compOps, ok := compOpsHelper.componentOpsSet[sharding.Name] if !ok { continue } - shardingComps, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, shardingSpec.Name) + shardingComps, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, sharding.Name) if err != nil { return opsRequestPhase, 0, err } for _, v := range shardingComps { - setVeHelpers(shardingSpec.Template, compOps, v.Labels[constant.KBAppComponentLabelKey]) + setVeHelpers(sharding.Template, compOps, v.Labels[constant.KBAppComponentLabelKey]) } } // reconcile the status.components. when the volume expansion is successful, diff --git a/pkg/testutil/apps/cluster_factory.go b/pkg/testutil/apps/cluster_factory.go index 721b9dc1ca4..d9c31b53543 100644 --- a/pkg/testutil/apps/cluster_factory.go +++ b/pkg/testutil/apps/cluster_factory.go @@ -57,17 +57,18 @@ func (factory *MockClusterFactory) SetSchedulingPolicy(schedulingPolicy *appsv1. return factory } -func (factory *MockClusterFactory) AddShardingSpec(shardingName string, compDefName string) *MockClusterFactory { - shardingSpec := appsv1.ShardingSpec{ +func (factory *MockClusterFactory) AddSharding(shardingName string, shardingDefName, compDefName string) *MockClusterFactory { + sharding := appsv1.ClusterSharding{ + Name: shardingName, + ShardingDef: shardingDefName, Template: appsv1.ClusterComponentSpec{ Name: "fake", ComponentDef: compDefName, Replicas: 1, }, - Name: shardingName, Shards: 1, } - factory.Get().Spec.ShardingSpecs = append(factory.Get().Spec.ShardingSpecs, shardingSpec) + factory.Get().Spec.Shardings = append(factory.Get().Spec.Shardings, sharding) return factory } @@ -116,7 +117,7 @@ func (factory *MockClusterFactory) AddService(service appsv1.ClusterService) *Mo type updateFn func(comp *appsv1.ClusterComponentSpec) -type shardingUpdateFn func(shardingSpec *appsv1.ShardingSpec) +type shardingUpdateFn func(*appsv1.ClusterSharding) func (factory *MockClusterFactory) lastComponentRef(update updateFn) *MockClusterFactory { comps := factory.Get().Spec.ComponentSpecs @@ -127,18 +128,18 @@ func (factory *MockClusterFactory) lastComponentRef(update updateFn) *MockCluste return factory } -func (factory *MockClusterFactory) lastShardingSpec(update shardingUpdateFn) *MockClusterFactory { - shardingSpecs := factory.Get().Spec.ShardingSpecs - if len(shardingSpecs) > 0 { - update(&shardingSpecs[len(shardingSpecs)-1]) +func (factory *MockClusterFactory) lastSharding(update shardingUpdateFn) *MockClusterFactory { + shardings := factory.Get().Spec.Shardings + if len(shardings) > 0 { + update(&shardings[len(shardings)-1]) } - factory.Get().Spec.ShardingSpecs = shardingSpecs + factory.Get().Spec.Shardings = shardings return factory } func (factory *MockClusterFactory) SetShards(shards int32) *MockClusterFactory { - return factory.lastShardingSpec(func(shardingSpec *appsv1.ShardingSpec) { - shardingSpec.Shards = shards + return factory.lastSharding(func(sharding *appsv1.ClusterSharding) { + sharding.Shards = shards }) }