diff --git a/apis/apps/v1/cluster_types.go b/apis/apps/v1/cluster_types.go
index 49c15b4e0ba..57561c029ce 100644
--- a/apis/apps/v1/cluster_types.go
+++ b/apis/apps/v1/cluster_types.go
@@ -426,17 +426,10 @@ type ClusterComponentSpec struct {
// +optional
ParallelPodManagementConcurrency *intstr.IntOrString `json:"parallelPodManagementConcurrency,omitempty"`
- // PodUpdatePolicy indicates how pods should be updated
+ // Provides fine-grained control over the spec update process of all instances.
//
- // - `StrictInPlace` indicates that only allows in-place upgrades.
- // Any attempt to modify other fields will be rejected.
- // - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- // If that fails, it will fall back to the ReCreate, where pod will be recreated.
- // Default value is "PreferInPlace"
- //
- // +kubebuilder:validation:Enum={StrictInPlace,PreferInPlace}
// +optional
- PodUpdatePolicy *PodUpdatePolicyType `json:"podUpdatePolicy,omitempty"`
+ UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
// Allows for the customization of configuration values for each instance within a Component.
// An instance represent a single replica (Pod and associated K8s resources like PVCs, Services, and ConfigMaps).
diff --git a/apis/apps/v1/component_types.go b/apis/apps/v1/component_types.go
index cd6aec7d54d..1a1ba8d7f00 100644
--- a/apis/apps/v1/component_types.go
+++ b/apis/apps/v1/component_types.go
@@ -189,6 +189,11 @@ type ComponentSpec struct {
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
+ // Provides fine-grained control over the spec update process of all instances.
+ //
+ // +optional
+ UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
+
// Controls the concurrency of pods during initial scale up, when replacing pods on nodes,
// or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
// The default Concurrency is 100%.
@@ -196,17 +201,6 @@ type ComponentSpec struct {
// +optional
ParallelPodManagementConcurrency *intstr.IntOrString `json:"parallelPodManagementConcurrency,omitempty"`
- // PodUpdatePolicy indicates how pods should be updated
- //
- // - `StrictInPlace` indicates that only allows in-place upgrades.
- // Any attempt to modify other fields will be rejected.
- // - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- // If that fails, it will fall back to the ReCreate, where pod will be recreated.
- // Default value is "PreferInPlace"
- //
- // +optional
- PodUpdatePolicy *PodUpdatePolicyType `json:"podUpdatePolicy,omitempty"`
-
// Specifies the scheduling policy for the Component.
//
// +optional
diff --git a/apis/apps/v1/componentdefinition_types.go b/apis/apps/v1/componentdefinition_types.go
index d602b67f9c0..d53c15612a8 100644
--- a/apis/apps/v1/componentdefinition_types.go
+++ b/apis/apps/v1/componentdefinition_types.go
@@ -444,23 +444,24 @@ type ComponentDefinitionSpec struct {
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty"`
- // Specifies the concurrency strategy for updating multiple instances of the Component.
- // Available strategies:
+ // Specifies the concurrency level for updating instances during a rolling update.
+ // Available levels:
//
- // - `Serial`: Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready
+ // - `Serial`: Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
// before updating the next.
- // - `Parallel`: Updates all replicas simultaneously, optimizing for speed but potentially reducing availability
+ // - `Parallel`: Updates all instances simultaneously, optimizing for speed but potentially reducing availability
// during the update.
- // - `BestEffortParallel`: Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum
+ // - `BestEffortParallel`: Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
// number of operational replicas for maintaining quorum.
- // For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps
+ // For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
// at least 3 operational for quorum.
//
- // This field is immutable and defaults to 'Serial'.
+ // Defaults to 'Serial'.
//
+ // +kubebuilder:validation:Enum={Serial,Parallel,BestEffortParallel}
// +kubebuilder:default=Serial
// +optional
- UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
+ UpdateConcurrency *UpdateConcurrency `json:"updateConcurrency,omitempty"`
// InstanceSet controls the creation of pods during initial scale up, replacement of pods on nodes, and scaling down.
//
@@ -1459,40 +1460,6 @@ type ReplicaRole struct {
Votable bool `json:"votable,omitempty"`
}
-// UpdateStrategy defines the update strategy for cluster components. This strategy determines how updates are applied
-// across the cluster.
-// The available strategies are `Serial`, `BestEffortParallel`, and `Parallel`.
-//
-// +enum
-// +kubebuilder:validation:Enum={Serial,BestEffortParallel,Parallel}
-type UpdateStrategy string
-
-const (
- // SerialStrategy indicates that updates are applied one at a time in a sequential manner.
- // The operator waits for each replica to be updated and ready before proceeding to the next one.
- // This ensures that only one replica is unavailable at a time during the update process.
- SerialStrategy UpdateStrategy = "Serial"
-
- // ParallelStrategy indicates that updates are applied simultaneously to all Pods of a Component.
- // The replicas are updated in parallel, with the operator updating all replicas concurrently.
- // This strategy provides the fastest update time but may lead to a period of reduced availability or
- // capacity during the update process.
- ParallelStrategy UpdateStrategy = "Parallel"
-
- // BestEffortParallelStrategy indicates that the replicas are updated in parallel, with the operator making
- // a best-effort attempt to update as many replicas as possible concurrently
- // while maintaining the component's availability.
- // Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number
- // of replicas remain available during the update process to maintain the component's quorum and functionality.
- //
- // For example, consider a component with 5 replicas. To maintain the component's availability and quorum,
- // the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least
- // 3 replicas (a quorum) remain available and functional during the update process.
- //
- // The `BestEffortParallel` strategy strikes a balance between update speed and component availability.
- BestEffortParallelStrategy UpdateStrategy = "BestEffortParallel"
-)
-
// ComponentLifecycleActions defines a collection of Actions for customizing the behavior of a Component.
type ComponentLifecycleActions struct {
// Specifies the hook to be executed after a component's creation.
diff --git a/apis/apps/v1/shardingdefinition_types.go b/apis/apps/v1/shardingdefinition_types.go
index 58a27a432b9..968f8188b2e 100644
--- a/apis/apps/v1/shardingdefinition_types.go
+++ b/apis/apps/v1/shardingdefinition_types.go
@@ -75,7 +75,7 @@ type ShardingDefinitionSpec struct {
//
// +kubebuilder:default=Serial
// +optional
- ProvisionStrategy *UpdateStrategy `json:"provisionStrategy,omitempty"`
+ ProvisionStrategy *UpdateConcurrency `json:"provisionStrategy,omitempty"`
// Specifies the strategy for updating shards of the sharding. Only `Serial` and `Parallel` are supported.
//
@@ -83,7 +83,7 @@ type ShardingDefinitionSpec struct {
//
// +kubebuilder:default=Serial
// +optional
- UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
+ UpdateStrategy *UpdateConcurrency `json:"updateStrategy,omitempty"`
// Defines a set of hooks and procedures that customize the behavior of a sharding throughout its lifecycle.
//
diff --git a/apis/apps/v1/types.go b/apis/apps/v1/types.go
index 0a51b10e4a9..72a5471b93e 100644
--- a/apis/apps/v1/types.go
+++ b/apis/apps/v1/types.go
@@ -18,6 +18,7 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
const (
@@ -463,16 +464,16 @@ type ClusterComponentConfigSource struct {
// - Local file
}
-type PodUpdatePolicyType string
+type InstanceUpdatePolicyType string
const (
- // StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades.
- // Any attempt to modify other fields will be rejected.
- StrictInPlacePodUpdatePolicyType PodUpdatePolicyType = "StrictInPlace"
+ // StrictInPlaceInstanceUpdatePolicyType indicates that only allows in-place update.
+ // Any attempt to modify other fields that not support in-place update will be rejected.
+ StrictInPlaceInstanceUpdatePolicyType InstanceUpdatePolicyType = "StrictInPlace"
- // PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod.
- // If that fails, it will fall back to the ReCreate, where pod will be recreated.
- PreferInPlacePodUpdatePolicyType PodUpdatePolicyType = "PreferInPlace"
+ // PreferInPlaceInstanceUpdatePolicyType indicates that we will first attempt an in-place update of the instance.
+ // If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ PreferInPlaceInstanceUpdatePolicyType InstanceUpdatePolicyType = "PreferInPlace"
)
type SchedulingPolicy struct {
@@ -690,3 +691,121 @@ type Ordinals struct {
Ranges []Range `json:"ranges,omitempty"`
Discrete []int32 `json:"discrete,omitempty"`
}
+
+// UpdateStrategy defines fine-grained control over the spec update process of all instances.
+type UpdateStrategy struct {
+ // Indicates the type of the UpdateStrategy.
+ // Default is RollingUpdate.
+ //
+ // +optional
+ Type UpdateStrategyType `json:"type,omitempty"`
+
+ // Indicates how instances should be updated.
+ //
+ // - `StrictInPlace` indicates that only allows in-place update.
+ // Any attempt to modify other fields that not support in-place update will be rejected.
+ // - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ // If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ // Default value is "PreferInPlace".
+ //
+ // +kubebuilder:validation:Enum={StrictInPlace,PreferInPlace}
+ // +optional
+ InstanceUpdatePolicy *InstanceUpdatePolicyType `json:"instanceUpdatePolicy,omitempty"`
+
+ // Specifies how the rolling update should be applied.
+ //
+ // +optional
+ RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
+}
+
+// UpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the KubeBlocks controllers.
+//
+// +enum
+// +kubebuilder:validation:Enum={RollingUpdate,OnDelete}
+type UpdateStrategyType string
+
+const (
+ // RollingUpdateStrategyType indicates that update will be
+ // applied to all Instances with respect to the InstanceSet
+ // ordering constraints.
+ RollingUpdateStrategyType UpdateStrategyType = "RollingUpdate"
+ // OnDeleteStrategyType indicates that ordered rolling restarts are disabled. Instances are recreated
+ // when they are manually deleted.
+ OnDeleteStrategyType UpdateStrategyType = "OnDelete"
+)
+
+// RollingUpdate specifies how the rolling update should be applied.
+type RollingUpdate struct {
+ // Indicates the number of instances that should be updated during a rolling update.
+ // The remaining instances will remain untouched. This is helpful in defining how many instances
+ // should participate in the update process.
+ // Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up.
+ // The default value is ComponentSpec.Replicas (i.e., update all instances).
+ //
+ // +optional
+ Replicas *intstr.IntOrString `json:"replicas,omitempty"`
+
+ // Specifies the concurrency level for updating instances during a rolling update.
+ // Available levels:
+ //
+ // - `Serial`: Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
+ // before updating the next.
+ // - `Parallel`: Updates all instances simultaneously, optimizing for speed but potentially reducing availability
+ // during the update.
+ // - `BestEffortParallel`: Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
+ // number of operational replicas for maintaining quorum.
+ // For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
+ // at least 3 operational for quorum.
+ //
+ // Defaults to 'Serial'.
+ //
+ // +kubebuilder:validation:Enum={Serial,Parallel,BestEffortParallel}
+ // +kubebuilder:default=Serial
+ // +optional
+ UpdateConcurrency *UpdateConcurrency `json:"updateConcurrency,omitempty"`
+
+ // The maximum number of instances that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up. This can not be 0.
+ // Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ // it will be counted towards MaxUnavailable.
+ //
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+}
+
+// UpdateConcurrency defines the update concurrency level for cluster components. This concurrency level determines how updates are applied
+// across the cluster.
+// The available concurrency levels are `Serial`, `BestEffortParallel`, and `Parallel`.
+//
+// +enum
+// +kubebuilder:validation:Enum={Serial,BestEffortParallel,Parallel}
+type UpdateConcurrency string
+
+const (
+ // SerialConcurrency indicates that updates are applied one at a time in a sequential manner.
+ // The operator waits for each replica to be updated and ready before proceeding to the next one.
+ // This ensures that only one replica is unavailable at a time during the update process.
+ SerialConcurrency UpdateConcurrency = "Serial"
+
+ // ParallelConcurrency indicates that updates are applied simultaneously to all Pods of a Component.
+ // The replicas are updated in parallel, with the operator updating all replicas concurrently.
+ // This strategy provides the fastest update time but may lead to a period of reduced availability or
+ // capacity during the update process.
+ ParallelConcurrency UpdateConcurrency = "Parallel"
+
+ // BestEffortParallelConcurrency indicates that the replicas are updated in parallel, with the operator making
+ // a best-effort attempt to update as many replicas as possible concurrently
+ // while maintaining the component's availability.
+ // Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number
+ // of replicas remain available during the update process to maintain the component's quorum and functionality.
+ //
+ // For example, consider a component with 5 replicas. To maintain the component's availability and quorum,
+ // the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least
+ // 3 replicas (a quorum) remain available and functional during the update process.
+ //
+ // The `BestEffortParallel` strategy strikes a balance between update speed and component availability.
+ BestEffortParallelConcurrency UpdateConcurrency = "BestEffortParallel"
+)
diff --git a/apis/apps/v1/zz_generated.deepcopy.go b/apis/apps/v1/zz_generated.deepcopy.go
index 300a52d5052..e1c298fd4d1 100644
--- a/apis/apps/v1/zz_generated.deepcopy.go
+++ b/apis/apps/v1/zz_generated.deepcopy.go
@@ -329,10 +329,10 @@ func (in *ClusterComponentSpec) DeepCopyInto(out *ClusterComponentSpec) {
*out = new(intstr.IntOrString)
**out = **in
}
- if in.PodUpdatePolicy != nil {
- in, out := &in.PodUpdatePolicy, &out.PodUpdatePolicy
- *out = new(PodUpdatePolicyType)
- **out = **in
+ if in.UpdateStrategy != nil {
+ in, out := &in.UpdateStrategy, &out.UpdateStrategy
+ *out = new(UpdateStrategy)
+ (*in).DeepCopyInto(*out)
}
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
@@ -1196,9 +1196,9 @@ func (in *ComponentDefinitionSpec) DeepCopyInto(out *ComponentDefinitionSpec) {
*out = make([]ReplicaRole, len(*in))
copy(*out, *in)
}
- if in.UpdateStrategy != nil {
- in, out := &in.UpdateStrategy, &out.UpdateStrategy
- *out = new(UpdateStrategy)
+ if in.UpdateConcurrency != nil {
+ in, out := &in.UpdateConcurrency, &out.UpdateConcurrency
+ *out = new(UpdateConcurrency)
**out = **in
}
if in.PodManagementPolicy != nil {
@@ -1462,16 +1462,16 @@ func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.UpdateStrategy != nil {
+ in, out := &in.UpdateStrategy, &out.UpdateStrategy
+ *out = new(UpdateStrategy)
+ (*in).DeepCopyInto(*out)
+ }
if in.ParallelPodManagementConcurrency != nil {
in, out := &in.ParallelPodManagementConcurrency, &out.ParallelPodManagementConcurrency
*out = new(intstr.IntOrString)
**out = **in
}
- if in.PodUpdatePolicy != nil {
- in, out := &in.PodUpdatePolicy, &out.PodUpdatePolicy
- *out = new(PodUpdatePolicyType)
- **out = **in
- }
if in.SchedulingPolicy != nil {
in, out := &in.SchedulingPolicy, &out.SchedulingPolicy
*out = new(SchedulingPolicy)
@@ -2485,6 +2485,36 @@ func (in *RoledVar) DeepCopy() *RoledVar {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.UpdateConcurrency != nil {
+ in, out := &in.UpdateConcurrency, &out.UpdateConcurrency
+ *out = new(UpdateConcurrency)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
+func (in *RollingUpdate) DeepCopy() *RollingUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulingPolicy) DeepCopyInto(out *SchedulingPolicy) {
*out = *in
@@ -2978,12 +3008,12 @@ func (in *ShardingDefinitionSpec) DeepCopyInto(out *ShardingDefinitionSpec) {
}
if in.ProvisionStrategy != nil {
in, out := &in.ProvisionStrategy, &out.ProvisionStrategy
- *out = new(UpdateStrategy)
+ *out = new(UpdateConcurrency)
**out = **in
}
if in.UpdateStrategy != nil {
in, out := &in.UpdateStrategy, &out.UpdateStrategy
- *out = new(UpdateStrategy)
+ *out = new(UpdateConcurrency)
**out = **in
}
if in.LifecycleActions != nil {
@@ -3400,6 +3430,31 @@ func (in *TLSVars) DeepCopy() *TLSVars {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) {
+ *out = *in
+ if in.InstanceUpdatePolicy != nil {
+ in, out := &in.InstanceUpdatePolicy, &out.InstanceUpdatePolicy
+ *out = new(InstanceUpdatePolicyType)
+ **out = **in
+ }
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdate)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy.
+func (in *UpdateStrategy) DeepCopy() *UpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VarSource) DeepCopyInto(out *VarSource) {
*out = *in
diff --git a/apis/apps/v1alpha1/cluster_conversion.go b/apis/apps/v1alpha1/cluster_conversion.go
index 27c069db048..6185da1bcde 100644
--- a/apis/apps/v1alpha1/cluster_conversion.go
+++ b/apis/apps/v1alpha1/cluster_conversion.go
@@ -20,6 +20,7 @@ along with this program. If not, see .
package v1alpha1
import (
+ "slices"
"sort"
"strings"
@@ -105,13 +106,13 @@ func (r *Cluster) changesToCluster(cluster *appsv1.Cluster) {
// - volumeClaimTemplates
// spec:
// resources: corev1.ResourceRequirements -> corev1.VolumeResourceRequirements
- // podUpdatePolicy: *workloads.PodUpdatePolicyType -> *PodUpdatePolicyType
+ // podUpdatePolicy: *workloads.InstanceUpdatePolicyType -> *UpdateStrategy.InstanceUpdatePolicyType
// sharings
// - template
// volumeClaimTemplates
// spec:
// resources: corev1.ResourceRequirements -> corev1.VolumeResourceRequirements
- // podUpdatePolicy: *workloads.PodUpdatePolicyType -> *PodUpdatePolicyType
+ // podUpdatePolicy: *workloads.InstanceUpdatePolicyType -> *UpdateStrategy.InstanceUpdatePolicyType
// status
// components
// - message: ComponentMessageMap -> map[string]string
@@ -152,6 +153,39 @@ func (r *Cluster) changesToCluster(cluster *appsv1.Cluster) {
cluster.Annotations["apps.kubeblocks.io/shard-pod-anti-affinity"] = strings.Join(shardingRequiredPodAntiAffinity, ",")
}
}
+
+ for i := range r.Spec.ComponentSpecs {
+ spec := &r.Spec.ComponentSpecs[i]
+ if spec.PodUpdatePolicy == nil {
+ continue
+ }
+ index := slices.IndexFunc(cluster.Spec.ComponentSpecs, func(componentSpec appsv1.ClusterComponentSpec) bool {
+ return spec.Name == componentSpec.Name
+ })
+ if index < 0 {
+ continue
+ }
+ if cluster.Spec.ComponentSpecs[index].UpdateStrategy == nil {
+ cluster.Spec.ComponentSpecs[index].UpdateStrategy = &appsv1.UpdateStrategy{}
+ }
+ cluster.Spec.ComponentSpecs[index].UpdateStrategy.InstanceUpdatePolicy = (*appsv1.InstanceUpdatePolicyType)(spec.PodUpdatePolicy)
+ }
+ for i := range r.Spec.ShardingSpecs {
+ spec := &r.Spec.ShardingSpecs[i]
+ if spec.Template.PodUpdatePolicy == nil {
+ continue
+ }
+ index := slices.IndexFunc(cluster.Spec.Shardings, func(sharding appsv1.ClusterSharding) bool {
+ return spec.Name == sharding.Name
+ })
+ if index < 0 {
+ continue
+ }
+ if cluster.Spec.Shardings[index].Template.UpdateStrategy == nil {
+ cluster.Spec.Shardings[index].Template.UpdateStrategy = &appsv1.UpdateStrategy{}
+ }
+ cluster.Spec.Shardings[index].Template.UpdateStrategy.InstanceUpdatePolicy = (*appsv1.InstanceUpdatePolicyType)(spec.Template.PodUpdatePolicy)
+ }
}
func (r *Cluster) toClusterServices(cluster *appsv1.Cluster) {
@@ -198,13 +232,13 @@ func (r *Cluster) changesFromCluster(cluster *appsv1.Cluster) {
// - volumeClaimTemplates
// spec:
// resources: corev1.ResourceRequirements -> corev1.VolumeResourceRequirements
- // podUpdatePolicy: *workloads.PodUpdatePolicyType -> *PodUpdatePolicyType
+ // podUpdatePolicy: *workloads.InstanceUpdatePolicyType -> *UpdateStrategy.InstanceUpdatePolicyType
// shardingSpecs -> shardings
// - template
// volumeClaimTemplates
// spec:
// resources: corev1.ResourceRequirements -> corev1.VolumeResourceRequirements
- // podUpdatePolicy: *workloads.PodUpdatePolicyType -> *PodUpdatePolicyType
+ // podUpdatePolicy: *workloads.InstanceUpdatePolicyType -> *UpdateStrategy.InstanceUpdatePolicyType
// status
// components
// - message: ComponentMessageMap -> map[string]string
@@ -218,6 +252,31 @@ func (r *Cluster) changesFromCluster(cluster *appsv1.Cluster) {
// copy from sharding spec
_ = copier.Copy(&r.Spec.ShardingSpecs[i], &shardingSpec)
}
+
+ for _, spec := range cluster.Spec.ComponentSpecs {
+ if spec.UpdateStrategy == nil || spec.UpdateStrategy.InstanceUpdatePolicy == nil {
+ continue
+ }
+ index := slices.IndexFunc(r.Spec.ComponentSpecs, func(componentSpec ClusterComponentSpec) bool {
+ return spec.Name == componentSpec.Name
+ })
+ if index < 0 {
+ continue
+ }
+ r.Spec.ComponentSpecs[index].PodUpdatePolicy = (*workloads.PodUpdatePolicyType)(spec.UpdateStrategy.InstanceUpdatePolicy)
+ }
+ for _, sharding := range cluster.Spec.Shardings {
+ if sharding.Template.UpdateStrategy == nil || sharding.Template.UpdateStrategy.InstanceUpdatePolicy == nil {
+ continue
+ }
+ index := slices.IndexFunc(r.Spec.ShardingSpecs, func(spec ShardingSpec) bool {
+ return spec.Name == sharding.Name
+ })
+ if index < 0 {
+ continue
+ }
+ r.Spec.ShardingSpecs[index].Template.PodUpdatePolicy = (*workloads.PodUpdatePolicyType)(sharding.Template.UpdateStrategy.InstanceUpdatePolicy)
+ }
}
type clusterConverter struct {
diff --git a/apis/apps/v1alpha1/component_conversion.go b/apis/apps/v1alpha1/component_conversion.go
index 11d5da599c5..6f1104ff51e 100644
--- a/apis/apps/v1alpha1/component_conversion.go
+++ b/apis/apps/v1alpha1/component_conversion.go
@@ -26,6 +26,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/conversion"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1"
)
// ConvertTo converts this Component to the Hub version (v1).
@@ -76,8 +77,7 @@ func (r *Component) ConvertFrom(srcRaw conversion.Hub) error {
func (r *Component) incrementConvertTo(dstRaw metav1.Object) (incrementChange, error) {
// changed
- comp := dstRaw.(*appsv1.Component)
- comp.Status.Message = r.Status.Message
+ r.changesToComponent(dstRaw.(*appsv1.Component))
// deleted
return &componentConverter{
@@ -97,12 +97,34 @@ func (r *Component) incrementConvertFrom(srcRaw metav1.Object, ic incrementChang
r.Spec.InstanceUpdateStrategy = c.InstanceUpdateStrategy
// changed
- comp := srcRaw.(*appsv1.Component)
- r.Status.Message = comp.Status.Message
+ r.changesFromComponent(srcRaw.(*appsv1.Component))
return nil
}
+func (r *Component) changesToComponent(comp *appsv1.Component) {
+ // changed:
+ // spec
+ // podUpdatePolicy: *workloads.InstanceUpdatePolicyType -> *UpdateStrategy.InstanceUpdatePolicyType
+ comp.Status.Message = r.Status.Message
+ if r.Spec.PodUpdatePolicy != nil {
+ if comp.Spec.UpdateStrategy == nil {
+ comp.Spec.UpdateStrategy = &appsv1.UpdateStrategy{}
+ }
+ comp.Spec.UpdateStrategy.InstanceUpdatePolicy = (*appsv1.InstanceUpdatePolicyType)(r.Spec.PodUpdatePolicy)
+ }
+}
+
+func (r *Component) changesFromComponent(comp *appsv1.Component) {
+ // changed:
+ // spec
+ // podUpdatePolicy: *workloads.InstanceUpdatePolicyType -> *UpdateStrategy.InstanceUpdatePolicyType
+ if comp.Spec.UpdateStrategy != nil && comp.Spec.UpdateStrategy.InstanceUpdatePolicy != nil {
+ r.Spec.PodUpdatePolicy = (*v1alpha1.PodUpdatePolicyType)(comp.Spec.UpdateStrategy.InstanceUpdatePolicy)
+ }
+ r.Status.Message = comp.Status.Message
+}
+
type componentConverter struct {
EnabledLogs []string `json:"enabledLogs,omitempty"`
Affinity *Affinity `json:"affinity,omitempty"`
diff --git a/apis/apps/v1alpha1/componentdefinition_conversion.go b/apis/apps/v1alpha1/componentdefinition_conversion.go
index a1574961a0b..a819daf3e18 100644
--- a/apis/apps/v1alpha1/componentdefinition_conversion.go
+++ b/apis/apps/v1alpha1/componentdefinition_conversion.go
@@ -117,6 +117,7 @@ func (r *ComponentDefinition) changesToComponentDefinition(cmpd *appsv1.Componen
// - ValueFrom
// componentVarRef:
// instanceNames -> podNames
+ // updateStrategy -> updateConcurrency
// lifecycleActions
for _, v := range r.Spec.Vars {
@@ -127,6 +128,9 @@ func (r *ComponentDefinition) changesToComponentDefinition(cmpd *appsv1.Componen
return err
}
}
+ if r.Spec.UpdateStrategy != nil {
+ cmpd.Spec.UpdateConcurrency = (*appsv1.UpdateConcurrency)(r.Spec.UpdateStrategy)
+ }
r.toV1LifecycleActions(cmpd)
return nil
}
@@ -138,6 +142,7 @@ func (r *ComponentDefinition) changesFromComponentDefinition(cmpd *appsv1.Compon
// - ValueFrom
// componentVarRef:
// instanceNames -> podNames
+ // updateStrategy -> updateConcurrency
// lifecycleActions
for _, v := range cmpd.Spec.Vars {
@@ -148,6 +153,9 @@ func (r *ComponentDefinition) changesFromComponentDefinition(cmpd *appsv1.Compon
return err
}
}
+ if cmpd.Spec.UpdateConcurrency != nil {
+ r.Spec.UpdateStrategy = (*UpdateStrategy)(cmpd.Spec.UpdateConcurrency)
+ }
r.fromV1LifecycleActions(cmpd)
return nil
}
diff --git a/apis/workloads/v1/instanceset_types.go b/apis/workloads/v1/instanceset_types.go
index 97ec45ed254..2a4544da1ca 100644
--- a/apis/workloads/v1/instanceset_types.go
+++ b/apis/workloads/v1/instanceset_types.go
@@ -173,24 +173,10 @@ type InstanceSetSpec struct {
// +optional
ParallelPodManagementConcurrency *intstr.IntOrString `json:"parallelPodManagementConcurrency,omitempty"`
- // PodUpdatePolicy indicates how pods should be updated
- //
- // - `StrictInPlace` indicates that only allows in-place upgrades.
- // Any attempt to modify other fields will be rejected.
- // - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- // If that fails, it will fall back to the ReCreate, where pod will be recreated.
- // Default value is "PreferInPlace"
+ // Provides fine-grained control over the spec update process of all instances.
//
// +optional
- PodUpdatePolicy PodUpdatePolicyType `json:"podUpdatePolicy,omitempty"`
-
- // Indicates the StatefulSetUpdateStrategy that will be
- // employed to update Pods in the InstanceSet when a revision is made to
- // Template.
- // UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil
- //
- // Note: This field will be removed in future version.
- UpdateStrategy appsv1.StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"`
+ UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"`
// A list of roles defined in the system. Instanceset obtains role through pods' role label `kubeblocks.io/role`.
//
@@ -207,16 +193,6 @@ type InstanceSetSpec struct {
// +optional
TemplateVars map[string]string `json:"templateVars,omitempty"`
- // Members(Pods) update strategy.
- //
- // - serial: update Members one by one that guarantee minimum component unavailable time.
- // - bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
- // - parallel: force parallel
- //
- // +kubebuilder:validation:Enum={Serial,BestEffortParallel,Parallel}
- // +optional
- MemberUpdateStrategy *MemberUpdateStrategy `json:"memberUpdateStrategy,omitempty"`
-
// Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused.
// +optional
Paused bool `json:"paused,omitempty"`
@@ -318,16 +294,134 @@ type InstanceSetStatus struct {
// +kubebuilder:object:generate=false
type InstanceTemplate = kbappsv1.InstanceTemplate
-type PodUpdatePolicyType string
+// UpdateStrategy defines fine-grained control over the spec update process of all instances.
+type UpdateStrategy struct {
+ // Indicates the type of the UpdateStrategy.
+ // Default is RollingUpdate.
+ //
+ // +optional
+ Type UpdateStrategyType `json:"type,omitempty"`
+
+ // Indicates how instances should be updated.
+ //
+ // - `StrictInPlace` indicates that only allows in-place update.
+ // Any attempt to modify other fields that not support in-place update will be rejected.
+ // - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ // If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ // Default value is "PreferInPlace".
+ //
+ // +kubebuilder:validation:Enum={StrictInPlace,PreferInPlace}
+ // +optional
+ InstanceUpdatePolicy *InstanceUpdatePolicyType `json:"instanceUpdatePolicy,omitempty"`
+
+ // Specifies how the rolling update should be applied.
+ //
+ // +optional
+ RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
+}
+
+// UpdateStrategyType is a string enumeration type that enumerates
+// all possible update strategies for the KubeBlocks controllers.
+//
+// +enum
+// +kubebuilder:validation:Enum={RollingUpdate,OnDelete}
+type UpdateStrategyType string
+
+const (
+ // RollingUpdateStrategyType indicates that update will be
+ // applied to all Instances with respect to the InstanceSet
+ // ordering constraints.
+ RollingUpdateStrategyType UpdateStrategyType = "RollingUpdate"
+ // OnDeleteStrategyType indicates that ordered rolling restarts are disabled. Instances are recreated
+ // when they are manually deleted.
+ OnDeleteStrategyType UpdateStrategyType = "OnDelete"
+)
+
+// RollingUpdate specifies how the rolling update should be applied.
+type RollingUpdate struct {
+ // Indicates the number of instances that should be updated during a rolling update.
+ // The remaining instances will remain untouched. This is helpful in defining how many instances
+ // should participate in the update process.
+ // Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up.
+ // The default value is ComponentSpec.Replicas (i.e., update all instances).
+ //
+ // +optional
+ Replicas *intstr.IntOrString `json:"replicas,omitempty"`
+
+ // Specifies the concurrency level for updating instances during a rolling update.
+ // Available levels:
+ //
+ // - `Serial`: Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
+ // before updating the next.
+ // - `Parallel`: Updates all instances simultaneously, optimizing for speed but potentially reducing availability
+ // during the update.
+ // - `BestEffortParallel`: Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
+ // number of operational replicas for maintaining quorum.
+ // For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
+ // at least 3 operational for quorum.
+ //
+ // Defaults to 'Serial'.
+ //
+ // +kubebuilder:validation:Enum={Serial,Parallel,BestEffortParallel}
+ // +kubebuilder:default=Serial
+ // +optional
+ UpdateConcurrency *UpdateConcurrency `json:"updateConcurrency,omitempty"`
+
+ // The maximum number of instances that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up. This can not be 0.
+ // Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ // it will be counted towards MaxUnavailable.
+ //
+ // +optional
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+}
+
+type InstanceUpdatePolicyType string
const (
- // StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades.
- // Any attempt to modify other fields will be rejected.
- StrictInPlacePodUpdatePolicyType PodUpdatePolicyType = "StrictInPlace"
+ // StrictInPlaceInstanceUpdatePolicyType indicates that only allows in-place update.
+ // Any attempt to modify other fields that not support in-place update will be rejected.
+ StrictInPlaceInstanceUpdatePolicyType InstanceUpdatePolicyType = "StrictInPlace"
+
+ // PreferInPlaceInstanceUpdatePolicyType indicates that we will first attempt an in-place update of the instance.
+ // If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ PreferInPlaceInstanceUpdatePolicyType InstanceUpdatePolicyType = "PreferInPlace"
+)
+
+// UpdateConcurrency defines the update concurrency level for cluster components. This concurrency level determines how updates are applied
+// across the cluster.
+// The available concurrency levels are `Serial`, `BestEffortParallel`, and `Parallel`.
+//
+// +enum
+// +kubebuilder:validation:Enum={Serial,BestEffortParallel,Parallel}
+type UpdateConcurrency string
- // PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod.
- // If that fails, it will fall back to the ReCreate, where pod will be recreated.
- PreferInPlacePodUpdatePolicyType PodUpdatePolicyType = "PreferInPlace"
+const (
+ // SerialConcurrency indicates that updates are applied one at a time in a sequential manner.
+ // The operator waits for each replica to be updated and ready before proceeding to the next one.
+ // This ensures that only one replica is unavailable at a time during the update process.
+ SerialConcurrency UpdateConcurrency = "Serial"
+
+ // ParallelConcurrency indicates that updates are applied simultaneously to all Pods of a Component.
+ // The replicas are updated in parallel, with the operator updating all replicas concurrently.
+ // This strategy provides the fastest update time but may lead to a period of reduced availability or
+ // capacity during the update process.
+ ParallelConcurrency UpdateConcurrency = "Parallel"
+
+ // BestEffortParallelConcurrency indicates that the replicas are updated in parallel, with the operator making
+ // a best-effort attempt to update as many replicas as possible concurrently
+ // while maintaining the component's availability.
+ // Unlike the `Parallel` strategy, the `BestEffortParallel` strategy aims to ensure that a minimum number
+ // of replicas remain available during the update process to maintain the component's quorum and functionality.
+ //
+ // For example, consider a component with 5 replicas. To maintain the component's availability and quorum,
+ // the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least
+ // 3 replicas (a quorum) remain available and functional during the update process.
+ //
+ // The `BestEffortParallel` strategy strikes a balance between update speed and component availability.
+ BestEffortParallelConcurrency UpdateConcurrency = "BestEffortParallel"
)
type ReplicaRole struct {
@@ -438,16 +532,6 @@ type MembershipReconfiguration struct {
Switchover *kbappsv1.Action `json:"switchover,omitempty"`
}
-// MemberUpdateStrategy defines Cluster Component update strategy.
-// +enum
-type MemberUpdateStrategy string
-
-const (
- SerialUpdateStrategy MemberUpdateStrategy = "Serial"
- BestEffortParallelUpdateStrategy MemberUpdateStrategy = "BestEffortParallel"
- ParallelUpdateStrategy MemberUpdateStrategy = "Parallel"
-)
-
type Credential struct {
// Defines the user's name for the credential.
// The corresponding environment variable will be KB_ITS_USERNAME.
diff --git a/apis/workloads/v1/zz_generated.deepcopy.go b/apis/workloads/v1/zz_generated.deepcopy.go
index 1be40a7f8c0..642bbf98845 100644
--- a/apis/workloads/v1/zz_generated.deepcopy.go
+++ b/apis/workloads/v1/zz_generated.deepcopy.go
@@ -191,7 +191,11 @@ func (in *InstanceSetSpec) DeepCopyInto(out *InstanceSetSpec) {
*out = new(intstr.IntOrString)
**out = **in
}
- in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy)
+ if in.UpdateStrategy != nil {
+ in, out := &in.UpdateStrategy, &out.UpdateStrategy
+ *out = new(UpdateStrategy)
+ (*in).DeepCopyInto(*out)
+ }
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]ReplicaRole, len(*in))
@@ -209,11 +213,6 @@ func (in *InstanceSetSpec) DeepCopyInto(out *InstanceSetSpec) {
(*out)[key] = val
}
}
- if in.MemberUpdateStrategy != nil {
- in, out := &in.MemberUpdateStrategy, &out.MemberUpdateStrategy
- *out = new(MemberUpdateStrategy)
- **out = **in
- }
if in.Credential != nil {
in, out := &in.Credential, &out.Credential
*out = new(Credential)
@@ -373,3 +372,58 @@ func (in *ReplicaRole) DeepCopy() *ReplicaRole {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
+ *out = *in
+ if in.Replicas != nil {
+ in, out := &in.Replicas, &out.Replicas
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.UpdateConcurrency != nil {
+ in, out := &in.UpdateConcurrency, &out.UpdateConcurrency
+ *out = new(UpdateConcurrency)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
+func (in *RollingUpdate) DeepCopy() *RollingUpdate {
+ if in == nil {
+ return nil
+ }
+ out := new(RollingUpdate)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) {
+ *out = *in
+ if in.InstanceUpdatePolicy != nil {
+ in, out := &in.InstanceUpdatePolicy, &out.InstanceUpdatePolicy
+ *out = new(InstanceUpdatePolicyType)
+ **out = **in
+ }
+ if in.RollingUpdate != nil {
+ in, out := &in.RollingUpdate, &out.RollingUpdate
+ *out = new(RollingUpdate)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy.
+func (in *UpdateStrategy) DeepCopy() *UpdateStrategy {
+ if in == nil {
+ return nil
+ }
+ out := new(UpdateStrategy)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/apis/workloads/v1alpha1/instanceset_conversion.go b/apis/workloads/v1alpha1/instanceset_conversion.go
index 88a732b6439..84f1270d535 100644
--- a/apis/workloads/v1alpha1/instanceset_conversion.go
+++ b/apis/workloads/v1alpha1/instanceset_conversion.go
@@ -22,7 +22,9 @@ package v1alpha1
import (
"github.com/jinzhu/copier"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/conversion"
workloadsv1 "github.com/apecloud/kubeblocks/apis/workloads/v1"
@@ -43,6 +45,7 @@ func (r *InstanceSet) ConvertTo(dstRaw conversion.Hub) error {
if err := copier.Copy(&dst.Spec, &r.Spec); err != nil {
return err
}
+ r.changesToInstanceSet(dst)
// status
if err := copier.Copy(&dst.Status, &r.Status); err != nil {
@@ -67,6 +70,7 @@ func (r *InstanceSet) ConvertFrom(srcRaw conversion.Hub) error {
if err := copier.Copy(&r.Spec, &src.Spec); err != nil {
return err
}
+ r.changesFromInstanceSet(src)
// status
if err := copier.Copy(&r.Status, &src.Status); err != nil {
@@ -80,21 +84,12 @@ func (r *InstanceSet) ConvertFrom(srcRaw conversion.Hub) error {
}
func (r *InstanceSet) incrementConvertTo(dstRaw metav1.Object) error {
- if r.Spec.RoleProbe == nil && r.Spec.UpdateStrategy == nil {
+ if r.Spec.RoleProbe == nil {
return nil
}
// changed
instanceConvert := instanceSetConverter{
- RoleProbe: r.Spec.RoleProbe,
- UpdateStrategy: r.Spec.UpdateStrategy,
- }
-
- if r.Spec.UpdateStrategy == nil || r.Spec.UpdateStrategy.MemberUpdateStrategy == nil {
- // 1. set default update strategy
- updateStrategy := SerialUpdateStrategy
- instanceConvert.UpdateStrategy = &InstanceUpdateStrategy{
- MemberUpdateStrategy: &updateStrategy,
- }
+ RoleProbe: r.Spec.RoleProbe,
}
bytes, err := json.Marshal(instanceConvert)
if err != nil {
@@ -120,11 +115,81 @@ func (r *InstanceSet) incrementConvertFrom(srcRaw metav1.Object) error {
}
delete(srcRaw.GetAnnotations(), kbIncrementConverterAK)
r.Spec.RoleProbe = instanceConvert.RoleProbe
- r.Spec.UpdateStrategy = instanceConvert.UpdateStrategy
return nil
}
type instanceSetConverter struct {
- RoleProbe *RoleProbe `json:"roleProbe,omitempty"`
- UpdateStrategy *InstanceUpdateStrategy `json:"updateStrategy,omitempty"`
+ RoleProbe *RoleProbe `json:"roleProbe,omitempty"`
+}
+
+func (r *InstanceSet) changesToInstanceSet(its *workloadsv1.InstanceSet) {
+ // changed:
+ // spec
+ // podUpdatePolicy -> updateStrategy.instanceUpdatePolicy
+ // memberUpdateStrategy -> updateStrategy.rollingUpdate.updateConcurrency
+ // updateStrategy.partition -> updateStrategy.rollingUpdate.replicas
+ // updateStrategy.maxUnavailable -> updateStrategy.rollingUpdate.maxUnavailable
+ // updateStrategy.memberUpdateStrategy -> updateStrategy.rollingUpdate.updateConcurrency
+ if its.Spec.UpdateStrategy == nil {
+ its.Spec.UpdateStrategy = &workloadsv1.UpdateStrategy{}
+ }
+ its.Spec.UpdateStrategy.InstanceUpdatePolicy = (*workloadsv1.InstanceUpdatePolicyType)(&r.Spec.PodUpdatePolicy)
+ initRollingUpdate := func() {
+ if its.Spec.UpdateStrategy.RollingUpdate == nil {
+ its.Spec.UpdateStrategy.RollingUpdate = &workloadsv1.RollingUpdate{}
+ }
+ }
+ setUpdateConcurrency := func(strategy *MemberUpdateStrategy) {
+ if strategy == nil {
+ return
+ }
+ initRollingUpdate()
+ its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency = (*workloadsv1.UpdateConcurrency)(strategy)
+ }
+ setUpdateConcurrency(r.Spec.MemberUpdateStrategy)
+ if r.Spec.UpdateStrategy != nil {
+ setUpdateConcurrency(r.Spec.UpdateStrategy.MemberUpdateStrategy)
+ if r.Spec.UpdateStrategy.Partition != nil {
+ initRollingUpdate()
+ replicas := intstr.FromInt32(*r.Spec.UpdateStrategy.Partition)
+ its.Spec.UpdateStrategy.RollingUpdate.Replicas = &replicas
+ }
+ if r.Spec.UpdateStrategy.MaxUnavailable != nil {
+ initRollingUpdate()
+ its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = r.Spec.UpdateStrategy.MaxUnavailable
+ }
+ }
+}
+
+func (r *InstanceSet) changesFromInstanceSet(its *workloadsv1.InstanceSet) {
+ // changed:
+ // spec
+ // podUpdatePolicy -> updateStrategy.instanceUpdatePolicy
+ // memberUpdateStrategy -> updateStrategy.rollingUpdate.updateConcurrency
+ // updateStrategy.partition -> updateStrategy.rollingUpdate.replicas
+ // updateStrategy.maxUnavailable -> updateStrategy.rollingUpdate.maxUnavailable
+ // updateStrategy.memberUpdateStrategy -> updateStrategy.rollingUpdate.updateConcurrency
+ if its.Spec.UpdateStrategy == nil {
+ return
+ }
+ if its.Spec.UpdateStrategy.InstanceUpdatePolicy != nil {
+ r.Spec.PodUpdatePolicy = PodUpdatePolicyType(*its.Spec.UpdateStrategy.InstanceUpdatePolicy)
+ }
+ if its.Spec.UpdateStrategy.RollingUpdate == nil {
+ return
+ }
+ if r.Spec.UpdateStrategy == nil {
+ r.Spec.UpdateStrategy = &InstanceUpdateStrategy{}
+ }
+ if its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency != nil {
+ r.Spec.MemberUpdateStrategy = (*MemberUpdateStrategy)(its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency)
+ r.Spec.UpdateStrategy.MemberUpdateStrategy = r.Spec.MemberUpdateStrategy
+ }
+ if its.Spec.UpdateStrategy.RollingUpdate.Replicas != nil {
+ partition, _ := intstr.GetScaledValueFromIntOrPercent(its.Spec.UpdateStrategy.RollingUpdate.Replicas, int(*its.Spec.Replicas), false)
+ r.Spec.UpdateStrategy.Partition = pointer.Int32(int32(partition))
+ }
+ if its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable != nil {
+ r.Spec.UpdateStrategy.MaxUnavailable = its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable
+ }
}
diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml
index 7ac44eebc41..11d5890a4d5 100644
--- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml
+++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml
@@ -3832,20 +3832,6 @@ spec:
or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
The default Concurrency is 100%.
x-kubernetes-int-or-string: true
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- enum:
- - StrictInPlace
- - PreferInPlace
- type: string
replicas:
default: 1
description: Specifies the desired number of replicas in the
@@ -5402,6 +5388,87 @@ spec:
If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys,
to properly set up the secure communication channel.
type: boolean
+ updateStrategy:
+ description: Provides fine-grained control over the spec update
+ process of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
+
+
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
+ rollingUpdate:
+ description: Specifies how the rolling update should be
+ applied.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up. This can not be 0.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
+ x-kubernetes-int-or-string: true
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for updating
+ instances during a rolling update.\nAvailable levels:\n\n\n-
+ `Serial`: Updates instances one at a time, ensuring
+ minimal downtime by waiting for each instance to become
+ ready\n before updating the next.\n- `Parallel`:
+ Updates all instances simultaneously, optimizing for
+ speed but potentially reducing availability\n during
+ the update.\n- `BestEffortParallel`: Updates instances
+ concurrently with a limit on simultaneous updates
+ to ensure a minimum\n number of operational replicas
+ for maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
+ type: object
+ type:
+ description: |-
+ Indicates the type of the UpdateStrategy.
+ Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
volumeClaimTemplates:
description: |-
Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component.
@@ -12571,20 +12638,6 @@ spec:
or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
The default Concurrency is 100%.
x-kubernetes-int-or-string: true
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- enum:
- - StrictInPlace
- - PreferInPlace
- type: string
replicas:
default: 1
description: Specifies the desired number of replicas in
@@ -14153,6 +14206,88 @@ spec:
If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys,
to properly set up the secure communication channel.
type: boolean
+ updateStrategy:
+ description: Provides fine-grained control over the spec
+ update process of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
+
+
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
+ rollingUpdate:
+ description: Specifies how the rolling update should
+ be applied.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up. This can not be 0.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
+ x-kubernetes-int-or-string: true
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for
+ updating instances during a rolling update.\nAvailable
+ levels:\n\n\n- `Serial`: Updates instances one
+ at a time, ensuring minimal downtime by waiting
+ for each instance to become ready\n before updating
+ the next.\n- `Parallel`: Updates all instances
+ simultaneously, optimizing for speed but potentially
+ reducing availability\n during the update.\n-
+ `BestEffortParallel`: Updates instances concurrently
+ with a limit on simultaneous updates to ensure
+ a minimum\n number of operational replicas for
+ maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
+ type: object
+ type:
+ description: |-
+ Indicates the type of the UpdateStrategy.
+ Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
volumeClaimTemplates:
description: |-
Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component.
diff --git a/config/crd/bases/apps.kubeblocks.io_componentdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_componentdefinitions.yaml
index fe6bec4e932..c3f1e82690f 100644
--- a/config/crd/bases/apps.kubeblocks.io_componentdefinitions.yaml
+++ b/config/crd/bases/apps.kubeblocks.io_componentdefinitions.yaml
@@ -16811,24 +16811,28 @@ spec:
- mountPath
- volumeName
type: object
- updateStrategy:
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
default: Serial
- description: "Specifies the concurrency strategy for updating multiple
- instances of the Component.\nAvailable strategies:\n\n\n- `Serial`:
- Updates replicas one at a time, ensuring minimal downtime by waiting
- for each replica to become ready\n before updating the next.\n-
- `Parallel`: Updates all replicas simultaneously, optimizing for
- speed but potentially reducing availability\n during the update.\n-
- `BestEffortParallel`: Updates replicas concurrently with a limit
- on simultaneous updates to ensure a minimum\n number of operational
- replicas for maintaining quorum.\n\t For example, in a 5-replica
- component, updating a maximum of 2 replicas simultaneously keeps\n\t
- at least 3 operational for quorum.\n\n\nThis field is immutable
- and defaults to 'Serial'."
- enum:
- - Serial
- - BestEffortParallel
- - Parallel
+ description: "Specifies the concurrency level for updating instances
+ during a rolling update.\nAvailable levels:\n\n\n- `Serial`: Updates
+ instances one at a time, ensuring minimal downtime by waiting for
+ each instance to become ready\n before updating the next.\n- `Parallel`:
+ Updates all instances simultaneously, optimizing for speed but potentially
+ reducing availability\n during the update.\n- `BestEffortParallel`:
+ Updates instances concurrently with a limit on simultaneous updates
+ to ensure a minimum\n number of operational replicas for maintaining
+ quorum.\n\t For example, in a 5-instances setup, updating a maximum
+ of 2 instances simultaneously keeps\n\t at least 3 operational for
+ quorum.\n\n\nDefaults to 'Serial'."
type: string
vars:
description: |-
diff --git a/config/crd/bases/apps.kubeblocks.io_components.yaml b/config/crd/bases/apps.kubeblocks.io_components.yaml
index b2a53efd985..d6596a7185a 100644
--- a/config/crd/bases/apps.kubeblocks.io_components.yaml
+++ b/config/crd/bases/apps.kubeblocks.io_components.yaml
@@ -3603,17 +3603,6 @@ spec:
or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
The default Concurrency is 100%.
x-kubernetes-int-or-string: true
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- type: string
replicas:
default: 1
description: Specifies the desired number of replicas in the Component
@@ -5653,6 +5642,85 @@ spec:
- name
type: object
type: object
+ updateStrategy:
+ description: Provides fine-grained control over the spec update process
+ of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
+
+
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
+ rollingUpdate:
+ description: Specifies how the rolling update should be applied.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up. This can not be 0.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
+ x-kubernetes-int-or-string: true
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for updating
+ instances during a rolling update.\nAvailable levels:\n\n\n-
+ `Serial`: Updates instances one at a time, ensuring minimal
+ downtime by waiting for each instance to become ready\n
+ \ before updating the next.\n- `Parallel`: Updates all instances
+ simultaneously, optimizing for speed but potentially reducing
+ availability\n during the update.\n- `BestEffortParallel`:
+ Updates instances concurrently with a limit on simultaneous
+ updates to ensure a minimum\n number of operational replicas
+ for maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
+ type: object
+ type:
+ description: |-
+ Indicates the type of the UpdateStrategy.
+ Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
volumeClaimTemplates:
description: |-
Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component.
diff --git a/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml b/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml
index d20170e3eca..fb0564b5807 100644
--- a/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml
+++ b/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml
@@ -3592,19 +3592,6 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- memberUpdateStrategy:
- description: |-
- Members(Pods) update strategy.
-
-
- - serial: update Members one by one that guarantee minimum component unavailable time.
- - bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
- - parallel: force parallel
- enum:
- - Serial
- - BestEffortParallel
- - Parallel
- type: string
membershipReconfiguration:
description: Provides actions to do membership dynamic reconfiguration.
properties:
@@ -4074,17 +4061,6 @@ spec:
Note: This field will be removed in future version.
type: string
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- type: string
replicas:
default: 1
description: |-
@@ -11690,45 +11666,82 @@ spec:
description: Provides variables which are used to call Actions.
type: object
updateStrategy:
- description: |-
- Indicates the StatefulSetUpdateStrategy that will be
- employed to update Pods in the InstanceSet when a revision is made to
- Template.
- UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil
+ description: Provides fine-grained control over the spec update process
+ of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
- Note: This field will be removed in future version.
- properties:
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
rollingUpdate:
- description: RollingUpdate is used to communicate parameters when
- Type is RollingUpdateStatefulSetStrategyType.
+ description: Specifies how the rolling update should be applied.
properties:
maxUnavailable:
anyOf:
- type: integer
- type: string
description: |-
- The maximum number of pods that can be unavailable during the update.
- Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
Absolute number is calculated from percentage by rounding up. This can not be 0.
- Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
- Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
- will be counted towards MaxUnavailable.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
x-kubernetes-int-or-string: true
- partition:
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
description: |-
- Partition indicates the ordinal at which the StatefulSet should be partitioned
- for updates. During a rolling update, all pods from ordinal Replicas-1 to
- Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched.
- This is helpful in being able to do a canary based deployment. The default value is 0.
- format: int32
- type: integer
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for updating
+ instances during a rolling update.\nAvailable levels:\n\n\n-
+ `Serial`: Updates instances one at a time, ensuring minimal
+ downtime by waiting for each instance to become ready\n
+ \ before updating the next.\n- `Parallel`: Updates all instances
+ simultaneously, optimizing for speed but potentially reducing
+ availability\n during the update.\n- `BestEffortParallel`:
+ Updates instances concurrently with a limit on simultaneous
+ updates to ensure a minimum\n number of operational replicas
+ for maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
type: object
type:
description: |-
- Type indicates the type of the StatefulSetUpdateStrategy.
+ Indicates the type of the UpdateStrategy.
Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
type: string
type: object
volumeClaimTemplates:
diff --git a/controllers/apps/cluster/transformer_cluster_component.go b/controllers/apps/cluster/transformer_cluster_component.go
index 9912135f803..34dedfaae5a 100644
--- a/controllers/apps/cluster/transformer_cluster_component.go
+++ b/controllers/apps/cluster/transformer_cluster_component.go
@@ -200,7 +200,7 @@ func copyAndMergeComponent(oldCompObj, newCompObj *appsv1.Component) *appsv1.Com
compObjCopy.Spec.Configs = compProto.Spec.Configs
compObjCopy.Spec.ServiceAccountName = compProto.Spec.ServiceAccountName
compObjCopy.Spec.ParallelPodManagementConcurrency = compProto.Spec.ParallelPodManagementConcurrency
- compObjCopy.Spec.PodUpdatePolicy = compProto.Spec.PodUpdatePolicy
+ compObjCopy.Spec.UpdateStrategy = compProto.Spec.UpdateStrategy
compObjCopy.Spec.SchedulingPolicy = compProto.Spec.SchedulingPolicy
compObjCopy.Spec.TLSConfig = compProto.Spec.TLSConfig
compObjCopy.Spec.Instances = compProto.Spec.Instances
diff --git a/controllers/apps/component/transformer_component_workload.go b/controllers/apps/component/transformer_component_workload.go
index 5b46e26cdf5..a4ccfeff738 100644
--- a/controllers/apps/component/transformer_component_workload.go
+++ b/controllers/apps/component/transformer_component_workload.go
@@ -452,16 +452,15 @@ func copyAndMergeITS(oldITS, newITS *workloads.InstanceSet) *workloads.InstanceS
itsObjCopy.Spec.Roles = itsProto.Spec.Roles
itsObjCopy.Spec.MembershipReconfiguration = itsProto.Spec.MembershipReconfiguration
itsObjCopy.Spec.TemplateVars = itsProto.Spec.TemplateVars
- itsObjCopy.Spec.MemberUpdateStrategy = itsProto.Spec.MemberUpdateStrategy
itsObjCopy.Spec.Credential = itsProto.Spec.Credential
itsObjCopy.Spec.Instances = itsProto.Spec.Instances
itsObjCopy.Spec.OfflineInstances = itsProto.Spec.OfflineInstances
itsObjCopy.Spec.MinReadySeconds = itsProto.Spec.MinReadySeconds
itsObjCopy.Spec.VolumeClaimTemplates = itsProto.Spec.VolumeClaimTemplates
itsObjCopy.Spec.ParallelPodManagementConcurrency = itsProto.Spec.ParallelPodManagementConcurrency
- itsObjCopy.Spec.PodUpdatePolicy = itsProto.Spec.PodUpdatePolicy
+ itsObjCopy.Spec.UpdateStrategy = itsProto.Spec.UpdateStrategy
- if itsProto.Spec.UpdateStrategy.Type != "" || itsProto.Spec.UpdateStrategy.RollingUpdate != nil {
+ if itsProto.Spec.UpdateStrategy != nil || itsProto.Spec.UpdateStrategy.RollingUpdate != nil {
updateUpdateStrategy(itsObjCopy, itsProto)
}
diff --git a/controllers/apps/componentdefinition_controller_test.go b/controllers/apps/componentdefinition_controller_test.go
index dd7ef1f38df..e583a679fa1 100644
--- a/controllers/apps/componentdefinition_controller_test.go
+++ b/controllers/apps/componentdefinition_controller_test.go
@@ -493,7 +493,7 @@ var _ = Describe("ComponentDefinition Controller", func() {
Image: "image:v0.0.1",
Command: []string{"command"},
}).
- SetUpdateStrategy(nil).
+ SetUpdateConcurrency(nil).
SetPodManagementPolicy(nil)
if processor != nil {
processor(builder)
@@ -542,8 +542,8 @@ var _ = Describe("ComponentDefinition Controller", func() {
Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(componentDefObj), func(cmpd *kbappsv1.ComponentDefinition) {
cmpd.Spec.Description = "v0.0.2"
cmpd.Spec.Runtime.Containers[0].Image = "image:v0.0.2"
- parallel := kbappsv1.ParallelStrategy
- cmpd.Spec.UpdateStrategy = ¶llel
+ parallel := kbappsv1.ParallelConcurrency
+ cmpd.Spec.UpdateConcurrency = ¶llel
})()).Should(Succeed())
By(fmt.Sprintf("checking the updated object as %s", strings.ToLower(string(kbappsv1.AvailablePhase))))
@@ -558,8 +558,8 @@ var _ = Describe("ComponentDefinition Controller", func() {
Command: []string{"command"},
}
g.Expect(cmpd.Spec.Runtime.Containers[0]).Should(BeEquivalentTo(c))
- g.Expect(cmpd.Spec.UpdateStrategy).ShouldNot(BeNil())
- g.Expect(*cmpd.Spec.UpdateStrategy).Should(Equal(kbappsv1.ParallelStrategy))
+ g.Expect(cmpd.Spec.UpdateConcurrency).ShouldNot(BeNil())
+ g.Expect(*cmpd.Spec.UpdateConcurrency).Should(Equal(kbappsv1.ParallelConcurrency))
})).Should(Succeed())
})
@@ -570,8 +570,8 @@ var _ = Describe("ComponentDefinition Controller", func() {
Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(componentDefObj), func(cmpd *kbappsv1.ComponentDefinition) {
cmpd.Spec.Description = "v0.0.2"
cmpd.Spec.Runtime.Containers[0].Image = "image:v0.0.2"
- parallel := kbappsv1.ParallelStrategy
- cmpd.Spec.UpdateStrategy = ¶llel
+ parallel := kbappsv1.ParallelConcurrency
+ cmpd.Spec.UpdateConcurrency = ¶llel
})()).Should(Succeed())
By(fmt.Sprintf("checking the updated object as %s", strings.ToLower(string(kbappsv1.UnavailablePhase))))
@@ -586,14 +586,14 @@ var _ = Describe("ComponentDefinition Controller", func() {
Command: []string{"command"},
}
g.Expect(cmpd.Spec.Runtime.Containers[0]).Should(BeEquivalentTo(c))
- g.Expect(cmpd.Spec.UpdateStrategy).ShouldNot(BeNil())
- g.Expect(*cmpd.Spec.UpdateStrategy).Should(Equal(kbappsv1.ParallelStrategy))
+ g.Expect(cmpd.Spec.UpdateConcurrency).ShouldNot(BeNil())
+ g.Expect(*cmpd.Spec.UpdateConcurrency).Should(Equal(kbappsv1.ParallelConcurrency))
})).Should(Succeed())
By("revert the change to immutable fields back")
Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(componentDefObj), func(cmpd *kbappsv1.ComponentDefinition) {
cmpd.Spec.Runtime.Containers[0].Image = "image:v0.0.1"
- cmpd.Spec.UpdateStrategy = nil
+ cmpd.Spec.UpdateConcurrency = nil
})()).Should(Succeed())
By(fmt.Sprintf("checking the updated object as %s", strings.ToLower(string(kbappsv1.AvailablePhase))))
@@ -608,8 +608,8 @@ var _ = Describe("ComponentDefinition Controller", func() {
Command: []string{"command"},
}
g.Expect(cmpd.Spec.Runtime.Containers[0]).Should(BeEquivalentTo(c))
- g.Expect(cmpd.Spec.UpdateStrategy).ShouldNot(BeNil())
- g.Expect(*cmpd.Spec.UpdateStrategy).Should(Equal(kbappsv1.SerialStrategy))
+ g.Expect(cmpd.Spec.UpdateConcurrency).ShouldNot(BeNil())
+ g.Expect(*cmpd.Spec.UpdateConcurrency).Should(Equal(kbappsv1.SerialConcurrency))
})).Should(Succeed())
})
})
diff --git a/controllers/apps/shardingdefinition_controller.go b/controllers/apps/shardingdefinition_controller.go
index f504d04c5fc..f592e2a4089 100644
--- a/controllers/apps/shardingdefinition_controller.go
+++ b/controllers/apps/shardingdefinition_controller.go
@@ -190,11 +190,11 @@ func (r *ShardingDefinitionReconciler) validateShardsLimit(ctx context.Context,
func (r *ShardingDefinitionReconciler) validateProvisionNUpdateStrategy(ctx context.Context, cli client.Client,
shardingDef *appsv1.ShardingDefinition) error {
- supported := func(strategy *appsv1.UpdateStrategy) bool {
+ supported := func(strategy *appsv1.UpdateConcurrency) bool {
if strategy == nil {
return true
}
- return *strategy == appsv1.SerialStrategy || *strategy == appsv1.ParallelStrategy
+ return *strategy == appsv1.SerialConcurrency || *strategy == appsv1.ParallelConcurrency
}
if !supported(shardingDef.Spec.ProvisionStrategy) {
return fmt.Errorf("unsupported provision strategy: %s", *shardingDef.Spec.ProvisionStrategy)
diff --git a/controllers/apps/shardingdefinition_controller_test.go b/controllers/apps/shardingdefinition_controller_test.go
index ee8aff37eba..e2fbd84e831 100644
--- a/controllers/apps/shardingdefinition_controller_test.go
+++ b/controllers/apps/shardingdefinition_controller_test.go
@@ -115,8 +115,8 @@ var _ = Describe("ShardingDefinition Controller", func() {
It("ok", func() {
By("create a ShardingDefinition obj")
shardingDefObj := testapps.NewShardingDefinitionFactory(shardingDefName, compDefObj.GetName()).
- SetProvisionStrategy(appsv1.SerialStrategy).
- SetUpdateStrategy(appsv1.ParallelStrategy).
+ SetProvisionStrategy(appsv1.SerialConcurrency).
+ SetUpdateStrategy(appsv1.ParallelConcurrency).
Create(&testCtx).GetObject()
checkObjectStatus(shardingDefObj, appsv1.AvailablePhase)
@@ -125,8 +125,8 @@ var _ = Describe("ShardingDefinition Controller", func() {
It("unsupported strategy", func() {
By("create a ShardingDefinition obj")
shardingDefObj := testapps.NewShardingDefinitionFactory(shardingDefName, compDefObj.GetName()).
- SetProvisionStrategy(appsv1.BestEffortParallelStrategy).
- SetUpdateStrategy(appsv1.BestEffortParallelStrategy).
+ SetProvisionStrategy(appsv1.BestEffortParallelConcurrency).
+ SetUpdateStrategy(appsv1.BestEffortParallelConcurrency).
Create(&testCtx).GetObject()
checkObjectStatus(shardingDefObj, appsv1.UnavailablePhase)
diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml
index 7ac44eebc41..11d5890a4d5 100644
--- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml
+++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml
@@ -3832,20 +3832,6 @@ spec:
or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
The default Concurrency is 100%.
x-kubernetes-int-or-string: true
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- enum:
- - StrictInPlace
- - PreferInPlace
- type: string
replicas:
default: 1
description: Specifies the desired number of replicas in the
@@ -5402,6 +5388,87 @@ spec:
If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys,
to properly set up the secure communication channel.
type: boolean
+ updateStrategy:
+ description: Provides fine-grained control over the spec update
+ process of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
+
+
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
+ rollingUpdate:
+ description: Specifies how the rolling update should be
+ applied.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up. This can not be 0.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
+ x-kubernetes-int-or-string: true
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for updating
+ instances during a rolling update.\nAvailable levels:\n\n\n-
+ `Serial`: Updates instances one at a time, ensuring
+ minimal downtime by waiting for each instance to become
+ ready\n before updating the next.\n- `Parallel`:
+ Updates all instances simultaneously, optimizing for
+ speed but potentially reducing availability\n during
+ the update.\n- `BestEffortParallel`: Updates instances
+ concurrently with a limit on simultaneous updates
+ to ensure a minimum\n number of operational replicas
+ for maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
+ type: object
+ type:
+ description: |-
+ Indicates the type of the UpdateStrategy.
+ Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
volumeClaimTemplates:
description: |-
Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component.
@@ -12571,20 +12638,6 @@ spec:
or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
The default Concurrency is 100%.
x-kubernetes-int-or-string: true
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- enum:
- - StrictInPlace
- - PreferInPlace
- type: string
replicas:
default: 1
description: Specifies the desired number of replicas in
@@ -14153,6 +14206,88 @@ spec:
If TLS is enabled, the Component may require additional configuration, such as specifying TLS certificates and keys,
to properly set up the secure communication channel.
type: boolean
+ updateStrategy:
+ description: Provides fine-grained control over the spec
+ update process of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
+
+
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
+ rollingUpdate:
+ description: Specifies how the rolling update should
+ be applied.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up. This can not be 0.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
+ x-kubernetes-int-or-string: true
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for
+ updating instances during a rolling update.\nAvailable
+ levels:\n\n\n- `Serial`: Updates instances one
+ at a time, ensuring minimal downtime by waiting
+ for each instance to become ready\n before updating
+ the next.\n- `Parallel`: Updates all instances
+ simultaneously, optimizing for speed but potentially
+ reducing availability\n during the update.\n-
+ `BestEffortParallel`: Updates instances concurrently
+ with a limit on simultaneous updates to ensure
+ a minimum\n number of operational replicas for
+ maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
+ type: object
+ type:
+ description: |-
+ Indicates the type of the UpdateStrategy.
+ Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
volumeClaimTemplates:
description: |-
Specifies a list of PersistentVolumeClaim templates that represent the storage requirements for the Component.
diff --git a/deploy/helm/crds/apps.kubeblocks.io_componentdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_componentdefinitions.yaml
index fe6bec4e932..c3f1e82690f 100644
--- a/deploy/helm/crds/apps.kubeblocks.io_componentdefinitions.yaml
+++ b/deploy/helm/crds/apps.kubeblocks.io_componentdefinitions.yaml
@@ -16811,24 +16811,28 @@ spec:
- mountPath
- volumeName
type: object
- updateStrategy:
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
default: Serial
- description: "Specifies the concurrency strategy for updating multiple
- instances of the Component.\nAvailable strategies:\n\n\n- `Serial`:
- Updates replicas one at a time, ensuring minimal downtime by waiting
- for each replica to become ready\n before updating the next.\n-
- `Parallel`: Updates all replicas simultaneously, optimizing for
- speed but potentially reducing availability\n during the update.\n-
- `BestEffortParallel`: Updates replicas concurrently with a limit
- on simultaneous updates to ensure a minimum\n number of operational
- replicas for maintaining quorum.\n\t For example, in a 5-replica
- component, updating a maximum of 2 replicas simultaneously keeps\n\t
- at least 3 operational for quorum.\n\n\nThis field is immutable
- and defaults to 'Serial'."
- enum:
- - Serial
- - BestEffortParallel
- - Parallel
+ description: "Specifies the concurrency level for updating instances
+ during a rolling update.\nAvailable levels:\n\n\n- `Serial`: Updates
+ instances one at a time, ensuring minimal downtime by waiting for
+ each instance to become ready\n before updating the next.\n- `Parallel`:
+ Updates all instances simultaneously, optimizing for speed but potentially
+ reducing availability\n during the update.\n- `BestEffortParallel`:
+ Updates instances concurrently with a limit on simultaneous updates
+ to ensure a minimum\n number of operational replicas for maintaining
+ quorum.\n\t For example, in a 5-instances setup, updating a maximum
+ of 2 instances simultaneously keeps\n\t at least 3 operational for
+ quorum.\n\n\nDefaults to 'Serial'."
type: string
vars:
description: |-
diff --git a/deploy/helm/crds/apps.kubeblocks.io_components.yaml b/deploy/helm/crds/apps.kubeblocks.io_components.yaml
index b2a53efd985..d6596a7185a 100644
--- a/deploy/helm/crds/apps.kubeblocks.io_components.yaml
+++ b/deploy/helm/crds/apps.kubeblocks.io_components.yaml
@@ -3603,17 +3603,6 @@ spec:
or when scaling down. It only used when `PodManagementPolicy` is set to `Parallel`.
The default Concurrency is 100%.
x-kubernetes-int-or-string: true
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- type: string
replicas:
default: 1
description: Specifies the desired number of replicas in the Component
@@ -5653,6 +5642,85 @@ spec:
- name
type: object
type: object
+ updateStrategy:
+ description: Provides fine-grained control over the spec update process
+ of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
+
+
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
+ rollingUpdate:
+ description: Specifies how the rolling update should be applied.
+ properties:
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up. This can not be 0.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
+ x-kubernetes-int-or-string: true
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for updating
+ instances during a rolling update.\nAvailable levels:\n\n\n-
+ `Serial`: Updates instances one at a time, ensuring minimal
+ downtime by waiting for each instance to become ready\n
+ \ before updating the next.\n- `Parallel`: Updates all instances
+ simultaneously, optimizing for speed but potentially reducing
+ availability\n during the update.\n- `BestEffortParallel`:
+ Updates instances concurrently with a limit on simultaneous
+ updates to ensure a minimum\n number of operational replicas
+ for maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
+ type: object
+ type:
+ description: |-
+ Indicates the type of the UpdateStrategy.
+ Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
+ type: string
+ type: object
volumeClaimTemplates:
description: |-
Specifies a list of PersistentVolumeClaim templates that define the storage requirements for the Component.
diff --git a/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml b/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml
index d20170e3eca..fb0564b5807 100644
--- a/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml
+++ b/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml
@@ -3592,19 +3592,6 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- memberUpdateStrategy:
- description: |-
- Members(Pods) update strategy.
-
-
- - serial: update Members one by one that guarantee minimum component unavailable time.
- - bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
- - parallel: force parallel
- enum:
- - Serial
- - BestEffortParallel
- - Parallel
- type: string
membershipReconfiguration:
description: Provides actions to do membership dynamic reconfiguration.
properties:
@@ -4074,17 +4061,6 @@ spec:
Note: This field will be removed in future version.
type: string
- podUpdatePolicy:
- description: |-
- PodUpdatePolicy indicates how pods should be updated
-
-
- - `StrictInPlace` indicates that only allows in-place upgrades.
- Any attempt to modify other fields will be rejected.
- - `PreferInPlace` indicates that we will first attempt an in-place upgrade of the Pod.
- If that fails, it will fall back to the ReCreate, where pod will be recreated.
- Default value is "PreferInPlace"
- type: string
replicas:
default: 1
description: |-
@@ -11690,45 +11666,82 @@ spec:
description: Provides variables which are used to call Actions.
type: object
updateStrategy:
- description: |-
- Indicates the StatefulSetUpdateStrategy that will be
- employed to update Pods in the InstanceSet when a revision is made to
- Template.
- UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil
+ description: Provides fine-grained control over the spec update process
+ of all instances.
+ properties:
+ instanceUpdatePolicy:
+ description: |-
+ Indicates how instances should be updated.
- Note: This field will be removed in future version.
- properties:
+ - `StrictInPlace` indicates that only allows in-place update.
+ Any attempt to modify other fields that not support in-place update will be rejected.
+ - `PreferInPlace` indicates that we will first attempt an in-place update of the instance.
+ If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ Default value is "PreferInPlace".
+ enum:
+ - StrictInPlace
+ - PreferInPlace
+ type: string
rollingUpdate:
- description: RollingUpdate is used to communicate parameters when
- Type is RollingUpdateStatefulSetStrategyType.
+ description: Specifies how the rolling update should be applied.
properties:
maxUnavailable:
anyOf:
- type: integer
- type: string
description: |-
- The maximum number of pods that can be unavailable during the update.
- Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ The maximum number of instances that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
Absolute number is calculated from percentage by rounding up. This can not be 0.
- Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
- Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
- will be counted towards MaxUnavailable.
+ Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+ it will be counted towards MaxUnavailable.
x-kubernetes-int-or-string: true
- partition:
+ replicas:
+ anyOf:
+ - type: integer
+ - type: string
description: |-
- Partition indicates the ordinal at which the StatefulSet should be partitioned
- for updates. During a rolling update, all pods from ordinal Replicas-1 to
- Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched.
- This is helpful in being able to do a canary based deployment. The default value is 0.
- format: int32
- type: integer
+ Indicates the number of instances that should be updated during a rolling update.
+ The remaining instances will remain untouched. This is helpful in defining how many instances
+ should participate in the update process.
+ Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+ Absolute number is calculated from percentage by rounding up.
+ The default value is ComponentSpec.Replicas (i.e., update all instances).
+ x-kubernetes-int-or-string: true
+ updateConcurrency:
+ allOf:
+ - enum:
+ - Serial
+ - BestEffortParallel
+ - Parallel
+ - enum:
+ - Serial
+ - Parallel
+ - BestEffortParallel
+ default: Serial
+ description: "Specifies the concurrency level for updating
+ instances during a rolling update.\nAvailable levels:\n\n\n-
+ `Serial`: Updates instances one at a time, ensuring minimal
+ downtime by waiting for each instance to become ready\n
+ \ before updating the next.\n- `Parallel`: Updates all instances
+ simultaneously, optimizing for speed but potentially reducing
+ availability\n during the update.\n- `BestEffortParallel`:
+ Updates instances concurrently with a limit on simultaneous
+ updates to ensure a minimum\n number of operational replicas
+ for maintaining quorum.\n\t For example, in a 5-instances
+ setup, updating a maximum of 2 instances simultaneously
+ keeps\n\t at least 3 operational for quorum.\n\n\nDefaults
+ to 'Serial'."
+ type: string
type: object
type:
description: |-
- Type indicates the type of the StatefulSetUpdateStrategy.
+ Indicates the type of the UpdateStrategy.
Default is RollingUpdate.
+ enum:
+ - RollingUpdate
+ - OnDelete
type: string
type: object
volumeClaimTemplates:
diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md
index 1fb5b6f1892..67536872a9b 100644
--- a/docs/developer_docs/api-reference/cluster.md
+++ b/docs/developer_docs/api-reference/cluster.md
@@ -675,39 +675,32 @@ an existed ServiceAccount in this field.
-parallelPodManagementConcurrency
+updateStrategy
-
-Kubernetes api utils intstr.IntOrString
+
+UpdateStrategy
|
(Optional)
- Controls the concurrency of pods during initial scale up, when replacing pods on nodes,
-or when scaling down. It only used when PodManagementPolicy is set to Parallel .
-The default Concurrency is 100%.
+Provides fine-grained control over the spec update process of all instances.
|
-podUpdatePolicy
+parallelPodManagementConcurrency
-
-PodUpdatePolicyType
+
+Kubernetes api utils intstr.IntOrString
|
(Optional)
- PodUpdatePolicy indicates how pods should be updated
-
-StrictInPlace indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
-PreferInPlace indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
-Default value is “PreferInPlace”
-
+Controls the concurrency of pods during initial scale up, when replacing pods on nodes,
+or when scaling down. It only used when PodManagementPolicy is set to Parallel .
+The default Concurrency is 100%.
|
@@ -1415,28 +1408,28 @@ This ensures the Pod’s stability and readiness to serve requests.
-updateStrategy
+updateConcurrency
-
-UpdateStrategy
+
+UpdateConcurrency
|
(Optional)
- Specifies the concurrency strategy for updating multiple instances of the Component.
-Available strategies:
+Specifies the concurrency level for updating instances during a rolling update.
+Available levels:
-Serial : Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready
+Serial : Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
before updating the next.
-Parallel : Updates all replicas simultaneously, optimizing for speed but potentially reducing availability
+Parallel : Updates all instances simultaneously, optimizing for speed but potentially reducing availability
during the update.
-BestEffortParallel : Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum
+BestEffortParallel : Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
number of operational replicas for maintaining quorum.
- For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps
+ For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
at least 3 operational for quorum.
-This field is immutable and defaults to ‘Serial’.
+Defaults to ‘Serial’.
|
@@ -1927,8 +1920,8 @@ ShardsLimit
provisionStrategy
-
-UpdateStrategy
+
+UpdateConcurrency
|
@@ -1942,8 +1935,8 @@ UpdateStrategy
updateStrategy
-
-UpdateStrategy
+
+UpdateConcurrency
|
@@ -3042,23 +3035,16 @@ The default Concurrency is 100%.
-podUpdatePolicy
+updateStrategy
-
-PodUpdatePolicyType
+
+UpdateStrategy
|
(Optional)
- PodUpdatePolicy indicates how pods should be updated
-
-StrictInPlace indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
-PreferInPlace indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
-Default value is “PreferInPlace”
-
+Provides fine-grained control over the spec update process of all instances.
|
@@ -5287,28 +5273,28 @@ This ensures the Pod’s stability and readiness to serve requests.
-updateStrategy
+updateConcurrency
-
-UpdateStrategy
+
+UpdateConcurrency
|
(Optional)
- Specifies the concurrency strategy for updating multiple instances of the Component.
-Available strategies:
+Specifies the concurrency level for updating instances during a rolling update.
+Available levels:
-Serial : Updates replicas one at a time, ensuring minimal downtime by waiting for each replica to become ready
+Serial : Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
before updating the next.
-Parallel : Updates all replicas simultaneously, optimizing for speed but potentially reducing availability
+Parallel : Updates all instances simultaneously, optimizing for speed but potentially reducing availability
during the update.
-BestEffortParallel : Updates replicas concurrently with a limit on simultaneous updates to ensure a minimum
+BestEffortParallel : Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
number of operational replicas for maintaining quorum.
- For example, in a 5-replica component, updating a maximum of 2 replicas simultaneously keeps
+ For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
at least 3 operational for quorum.
-This field is immutable and defaults to ‘Serial’.
+Defaults to ‘Serial’.
|
@@ -6178,39 +6164,32 @@ an existed ServiceAccount in this field.
-parallelPodManagementConcurrency
+updateStrategy
-
-Kubernetes api utils intstr.IntOrString
+
+UpdateStrategy
|
(Optional)
- Controls the concurrency of pods during initial scale up, when replacing pods on nodes,
-or when scaling down. It only used when PodManagementPolicy is set to Parallel .
-The default Concurrency is 100%.
+Provides fine-grained control over the spec update process of all instances.
|
-podUpdatePolicy
+parallelPodManagementConcurrency
-
-PodUpdatePolicyType
+
+Kubernetes api utils intstr.IntOrString
|
(Optional)
- PodUpdatePolicy indicates how pods should be updated
-
-StrictInPlace indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
-PreferInPlace indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
-Default value is “PreferInPlace”
-
+Controls the concurrency of pods during initial scale up, when replacing pods on nodes,
+or when scaling down. It only used when PodManagementPolicy is set to Parallel .
+The default Concurrency is 100%.
|
@@ -7973,6 +7952,30 @@ Add new or override existing volume claim templates.
+InstanceUpdatePolicyType
+(string
alias)
+
+(Appears on:UpdateStrategy)
+
+
+
+
+
+
+Value |
+Description |
+
+
+"PreferInPlace" |
+PreferInPlaceInstanceUpdatePolicyType indicates that we will first attempt an in-place update of the instance.
+If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ |
+
"StrictInPlace" |
+StrictInPlaceInstanceUpdatePolicyType indicates that only allows in-place update.
+Any attempt to modify other fields that not support in-place update will be rejected.
+ |
+
+
Issuer
@@ -8612,30 +8615,6 @@ string
-
PodUpdatePolicyType
-(string
alias)
-
-(Appears on:ClusterComponentSpec, ComponentSpec)
-
-
-
-
-
-
-Value |
-Description |
-
-
-"PreferInPlace" |
-PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
- |
-
"StrictInPlace" |
-StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
- |
-
-
PreConditionType
(string
alias)
@@ -9063,6 +9042,87 @@ VarOption
+
RollingUpdate
+
+
+(Appears on:UpdateStrategy)
+
+
+
RollingUpdate specifies how the rolling update should be applied.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replicas
+
+
+Kubernetes api utils intstr.IntOrString
+
+
+ |
+
+(Optional)
+ Indicates the number of instances that should be updated during a rolling update.
+The remaining instances will remain untouched. This is helpful in defining how many instances
+should participate in the update process.
+Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+Absolute number is calculated from percentage by rounding up.
+The default value is ComponentSpec.Replicas (i.e., update all instances).
+ |
+
+
+
+updateConcurrency
+
+
+UpdateConcurrency
+
+
+ |
+
+(Optional)
+ Specifies the concurrency level for updating instances during a rolling update.
+Available levels:
+
+Serial : Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
+before updating the next.
+Parallel : Updates all instances simultaneously, optimizing for speed but potentially reducing availability
+during the update.
+BestEffortParallel : Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
+number of operational replicas for maintaining quorum.
+ For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
+at least 3 operational for quorum.
+
+Defaults to ‘Serial’.
+ |
+
+
+
+maxUnavailable
+
+
+Kubernetes api utils intstr.IntOrString
+
+
+ |
+
+(Optional)
+ The maximum number of instances that can be unavailable during the update.
+Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+Absolute number is calculated from percentage by rounding up. This can not be 0.
+Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+it will be counted towards MaxUnavailable.
+ |
+
+
+
SchedulingPolicy
@@ -10580,8 +10640,8 @@ ShardsLimit
provisionStrategy
-
-UpdateStrategy
+
+UpdateConcurrency
|
@@ -10595,8 +10655,8 @@ UpdateStrategy
updateStrategy
-
-UpdateStrategy
+
+UpdateConcurrency
|
@@ -11614,15 +11674,15 @@ VarOption
-UpdateStrategy
+UpdateConcurrency
(string
alias)
-(Appears on:ComponentDefinitionSpec, ShardingDefinitionSpec)
+(Appears on:ComponentDefinitionSpec, RollingUpdate, ShardingDefinitionSpec)
-
UpdateStrategy defines the update strategy for cluster components. This strategy determines how updates are applied
+
UpdateConcurrency defines the update concurrency level for cluster components. This concurrency level determines how updates are applied
across the cluster.
-The available strategies are Serial
, BestEffortParallel
, and Parallel
.
+The available concurrency levels are
Serial
,
BestEffortParallel
, and
Parallel
.
@@ -11632,7 +11692,7 @@ The available strategies are Serial
, BestEffortParallel
"BestEffortParallel" |
-BestEffortParallelStrategy indicates that the replicas are updated in parallel, with the operator making
+ | BestEffortParallelConcurrency indicates that the replicas are updated in parallel, with the operator making
a best-effort attempt to update as many replicas as possible concurrently
while maintaining the component’s availability.
Unlike the Parallel strategy, the BestEffortParallel strategy aims to ensure that a minimum number
@@ -11643,33 +11703,25 @@ the operator may allow a maximum of 2 replicas to be simultaneously updated. Thi
The BestEffortParallel strategy strikes a balance between update speed and component availability.
|
"Parallel" |
-ParallelStrategy indicates that updates are applied simultaneously to all Pods of a Component.
+ | ParallelConcurrency indicates that updates are applied simultaneously to all Pods of a Component.
The replicas are updated in parallel, with the operator updating all replicas concurrently.
This strategy provides the fastest update time but may lead to a period of reduced availability or
capacity during the update process.
|
"Serial" |
-SerialStrategy indicates that updates are applied one at a time in a sequential manner.
+ | SerialConcurrency indicates that updates are applied one at a time in a sequential manner.
The operator waits for each replica to be updated and ready before proceeding to the next one.
This ensures that only one replica is unavailable at a time during the update process.
|
-VarOption
-(string
alias)
-
-(Appears on:ClusterVars, ComponentVars, CredentialVars, NamedVar, RoledVar, ServiceRefVars, ServiceVars, TLSVars)
-
-
-
VarOption defines whether a variable is required or optional.
-
-VarSource
+UpdateStrategy
-(Appears on:EnvVar)
+(Appears on:ClusterComponentSpec, ComponentSpec)
-
VarSource represents a source for the value of an EnvVar.
+
UpdateStrategy defines fine-grained control over the spec update process of all instances.
@@ -11681,67 +11733,170 @@ This ensures that only one replica is unavailable at a time during the update pr
-configMapKeyRef
+type
-
-Kubernetes core/v1.ConfigMapKeySelector
+
+UpdateStrategyType
|
(Optional)
- Selects a key of a ConfigMap.
+Indicates the type of the UpdateStrategy.
+Default is RollingUpdate.
|
-secretKeyRef
+instanceUpdatePolicy
-
-Kubernetes core/v1.SecretKeySelector
+
+InstanceUpdatePolicyType
|
(Optional)
- Selects a key of a Secret.
+Indicates how instances should be updated.
+
+StrictInPlace indicates that only allows in-place update.
+Any attempt to modify other fields that not support in-place update will be rejected.
+PreferInPlace indicates that we will first attempt an in-place update of the instance.
+If that fails, it will fall back to the ReCreate, where instance will be recreated.
+Default value is “PreferInPlace”.
+
|
-hostNetworkVarRef
+rollingUpdate
-
-HostNetworkVarSelector
+
+RollingUpdate
|
(Optional)
- Selects a defined var of host-network resources.
+Specifies how the rolling update should be applied.
|
+
+
+UpdateStrategyType
+(string
alias)
+
+(Appears on:UpdateStrategy)
+
+
+
UpdateStrategyType is a string enumeration type that enumerates
+all possible update strategies for the KubeBlocks controllers.
+
+
+
-
-serviceVarRef
-
-
-ServiceVarSelector
-
-
+ | Value |
+Description |
+
+
+"OnDelete" |
+OnDeleteStrategyType indicates that ordered rolling restarts are disabled. Instances are recreated
+when they are manually deleted.
|
-
-(Optional)
- Selects a defined var of a Service.
+ |
"RollingUpdate" |
+RollingUpdateStrategyType indicates that update will be
+applied to all Instances with respect to the InstanceSet
+ordering constraints.
|
-
-
-
-credentialVarRef
-
-
-CredentialVarSelector
-
+ |
+
+VarOption
+(string
alias)
+
+(Appears on:ClusterVars, ComponentVars, CredentialVars, NamedVar, RoledVar, ServiceRefVars, ServiceVars, TLSVars)
+
+
+
VarOption defines whether a variable is required or optional.
+
+VarSource
+
+
+(Appears on:EnvVar)
+
+
+
VarSource represents a source for the value of an EnvVar.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+configMapKeyRef
+
+
+Kubernetes core/v1.ConfigMapKeySelector
+
+
+ |
+
+(Optional)
+ Selects a key of a ConfigMap.
+ |
+
+
+
+secretKeyRef
+
+
+Kubernetes core/v1.SecretKeySelector
+
+
+ |
+
+(Optional)
+ Selects a key of a Secret.
+ |
+
+
+
+hostNetworkVarRef
+
+
+HostNetworkVarSelector
+
+
+ |
+
+(Optional)
+ Selects a defined var of host-network resources.
+ |
+
+
+
+serviceVarRef
+
+
+ServiceVarSelector
+
+
+ |
+
+(Optional)
+ Selects a defined var of a Service.
+ |
+
+
+
+credentialVarRef
+
+
+CredentialVarSelector
+
|
@@ -29069,40 +29224,16 @@ The default Concurrency is 100%.
|
-podUpdatePolicy
-
-
-PodUpdatePolicyType
-
-
- |
-
-(Optional)
- PodUpdatePolicy indicates how pods should be updated
-
-StrictInPlace indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
-PreferInPlace indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
-Default value is “PreferInPlace”
-
- |
-
-
-
updateStrategy
-
-Kubernetes apps/v1.StatefulSetUpdateStrategy
+
+UpdateStrategy
|
- Indicates the StatefulSetUpdateStrategy that will be
-employed to update Pods in the InstanceSet when a revision is made to
-Template.
-UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil
-Note: This field will be removed in future version.
+(Optional)
+Provides fine-grained control over the spec update process of all instances.
|
@@ -29147,25 +29278,6 @@ map[string]string
-memberUpdateStrategy
-
-
-MemberUpdateStrategy
-
-
- |
-
-(Optional)
- Members(Pods) update strategy.
-
-- serial: update Members one by one that guarantee minimum component unavailable time.
-- bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
-- parallel: force parallel
-
- |
-
-
-
paused
bool
@@ -29598,40 +29710,16 @@ The default Concurrency is 100%.
|
-podUpdatePolicy
-
-
-PodUpdatePolicyType
-
-
- |
-
-(Optional)
- PodUpdatePolicy indicates how pods should be updated
-
-StrictInPlace indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
-PreferInPlace indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
-Default value is “PreferInPlace”
-
- |
-
-
-
updateStrategy
-
-Kubernetes apps/v1.StatefulSetUpdateStrategy
+
+UpdateStrategy
|
- Indicates the StatefulSetUpdateStrategy that will be
-employed to update Pods in the InstanceSet when a revision is made to
-Template.
-UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil
-Note: This field will be removed in future version.
+(Optional)
+Provides fine-grained control over the spec update process of all instances.
|
@@ -29676,25 +29764,6 @@ map[string]string
-memberUpdateStrategy
-
-
-MemberUpdateStrategy
-
-
- |
-
-(Optional)
- Members(Pods) update strategy.
-
-- serial: update Members one by one that guarantee minimum component unavailable time.
-- bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
-- parallel: force parallel
-
- |
-
-
-
paused
bool
@@ -30033,6 +30102,30 @@ indicated by UpdateRevisions.
|
+InstanceUpdatePolicyType
+(string
alias)
+
+(Appears on:UpdateStrategy)
+
+
+
+
+
+
+Value |
+Description |
+
+
+"PreferInPlace" |
+PreferInPlaceInstanceUpdatePolicyType indicates that we will first attempt an in-place update of the instance.
+If that fails, it will fall back to the ReCreate, where instance will be recreated.
+ |
+
"StrictInPlace" |
+StrictInPlaceInstanceUpdatePolicyType indicates that only allows in-place update.
+Any attempt to modify other fields that not support in-place update will be rejected.
+ |
+
+
MemberStatus
@@ -30075,29 +30168,6 @@ ReplicaRole
-
MemberUpdateStrategy
-(string
alias)
-
-(Appears on:InstanceSetSpec)
-
-
-
MemberUpdateStrategy defines Cluster Component update strategy.
-
-
-
-
-Value |
-Description |
-
-
-"BestEffortParallel" |
- |
-
"Parallel" |
- |
-
"Serial" |
- |
-
-
MembershipReconfiguration
@@ -30210,30 +30280,6 @@ Action
-
PodUpdatePolicyType
-(string
alias)
-
-(Appears on:InstanceSetSpec)
-
-
-
-
-
-
-Value |
-Description |
-
-
-"PreferInPlace" |
-PreferInPlacePodUpdatePolicyType indicates that we will first attempt an in-place upgrade of the Pod.
-If that fails, it will fall back to the ReCreate, where pod will be recreated.
- |
-
"StrictInPlace" |
-StrictInPlacePodUpdatePolicyType indicates that only allows in-place upgrades.
-Any attempt to modify other fields will be rejected.
- |
-
-
ReplicaRole
@@ -30317,6 +30363,223 @@ bool
|
+RollingUpdate
+
+
+(Appears on:UpdateStrategy)
+
+
+
RollingUpdate specifies how the rolling update should be applied.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replicas
+
+
+Kubernetes api utils intstr.IntOrString
+
+
+ |
+
+(Optional)
+ Indicates the number of instances that should be updated during a rolling update.
+The remaining instances will remain untouched. This is helpful in defining how many instances
+should participate in the update process.
+Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+Absolute number is calculated from percentage by rounding up.
+The default value is ComponentSpec.Replicas (i.e., update all instances).
+ |
+
+
+
+updateConcurrency
+
+
+UpdateConcurrency
+
+
+ |
+
+(Optional)
+ Specifies the concurrency level for updating instances during a rolling update.
+Available levels:
+
+Serial : Updates instances one at a time, ensuring minimal downtime by waiting for each instance to become ready
+before updating the next.
+Parallel : Updates all instances simultaneously, optimizing for speed but potentially reducing availability
+during the update.
+BestEffortParallel : Updates instances concurrently with a limit on simultaneous updates to ensure a minimum
+number of operational replicas for maintaining quorum.
+ For example, in a 5-instances setup, updating a maximum of 2 instances simultaneously keeps
+at least 3 operational for quorum.
+
+Defaults to ‘Serial’.
+ |
+
+
+
+maxUnavailable
+
+
+Kubernetes api utils intstr.IntOrString
+
+
+ |
+
+(Optional)
+ The maximum number of instances that can be unavailable during the update.
+Value can be an absolute number (ex: 5) or a percentage of desired instances (ex: 10%).
+Absolute number is calculated from percentage by rounding up. This can not be 0.
+Defaults to 1. The field applies to all instances. That means if there is any unavailable pod,
+it will be counted towards MaxUnavailable.
+ |
+
+
+
+UpdateConcurrency
+(string
alias)
+
+(Appears on:RollingUpdate)
+
+
+
UpdateConcurrency defines the update concurrency level for cluster components. This concurrency level determines how updates are applied
+across the cluster.
+The available concurrency levels are Serial
, BestEffortParallel
, and Parallel
.
+
+
+
+
+Value |
+Description |
+
+
+"BestEffortParallel" |
+BestEffortParallelConcurrency indicates that the replicas are updated in parallel, with the operator making
+a best-effort attempt to update as many replicas as possible concurrently
+while maintaining the component’s availability.
+Unlike the Parallel strategy, the BestEffortParallel strategy aims to ensure that a minimum number
+of replicas remain available during the update process to maintain the component’s quorum and functionality.
+For example, consider a component with 5 replicas. To maintain the component’s availability and quorum,
+the operator may allow a maximum of 2 replicas to be simultaneously updated. This ensures that at least
+3 replicas (a quorum) remain available and functional during the update process.
+The BestEffortParallel strategy strikes a balance between update speed and component availability.
+ |
+
"Parallel" |
+ParallelConcurrency indicates that updates are applied simultaneously to all Pods of a Component.
+The replicas are updated in parallel, with the operator updating all replicas concurrently.
+This strategy provides the fastest update time but may lead to a period of reduced availability or
+capacity during the update process.
+ |
+
"Serial" |
+SerialConcurrency indicates that updates are applied one at a time in a sequential manner.
+The operator waits for each replica to be updated and ready before proceeding to the next one.
+This ensures that only one replica is unavailable at a time during the update process.
+ |
+
+
+UpdateStrategy
+
+
+(Appears on:InstanceSetSpec)
+
+
+
UpdateStrategy defines fine-grained control over the spec update process of all instances.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+UpdateStrategyType
+
+
+ |
+
+(Optional)
+ Indicates the type of the UpdateStrategy.
+Default is RollingUpdate.
+ |
+
+
+
+instanceUpdatePolicy
+
+
+InstanceUpdatePolicyType
+
+
+ |
+
+(Optional)
+ Indicates how instances should be updated.
+
+StrictInPlace indicates that only allows in-place update.
+Any attempt to modify other fields that not support in-place update will be rejected.
+PreferInPlace indicates that we will first attempt an in-place update of the instance.
+If that fails, it will fall back to the ReCreate, where instance will be recreated.
+Default value is “PreferInPlace”.
+
+ |
+
+
+
+rollingUpdate
+
+
+RollingUpdate
+
+
+ |
+
+(Optional)
+ Specifies how the rolling update should be applied.
+ |
+
+
+
+UpdateStrategyType
+(string
alias)
+
+(Appears on:UpdateStrategy)
+
+
+
UpdateStrategyType is a string enumeration type that enumerates
+all possible update strategies for the KubeBlocks controllers.
+
+
+
+
+Value |
+Description |
+
+
+"OnDelete" |
+OnDeleteStrategyType indicates that ordered rolling restarts are disabled. Instances are recreated
+when they are manually deleted.
+ |
+
"RollingUpdate" |
+RollingUpdateStrategyType indicates that update will be
+applied to all Instances with respect to the InstanceSet
+ordering constraints.
+ |
+
+
workloads.kubeblocks.io/v1alpha1
diff --git a/pkg/controller/builder/builder_component.go b/pkg/controller/builder/builder_component.go
index ce41b00d655..e58b3e09760 100644
--- a/pkg/controller/builder/builder_component.go
+++ b/pkg/controller/builder/builder_component.go
@@ -86,8 +86,8 @@ func (builder *ComponentBuilder) SetParallelPodManagementConcurrency(parallelPod
return builder
}
-func (builder *ComponentBuilder) SetPodUpdatePolicy(policy *appsv1.PodUpdatePolicyType) *ComponentBuilder {
- builder.get().Spec.PodUpdatePolicy = policy
+func (builder *ComponentBuilder) SetUpdateStrategy(strategy *appsv1.UpdateStrategy) *ComponentBuilder {
+ builder.get().Spec.UpdateStrategy = strategy
return builder
}
diff --git a/pkg/controller/builder/builder_component_definition.go b/pkg/controller/builder/builder_component_definition.go
index 92497251145..e873b6fe58b 100644
--- a/pkg/controller/builder/builder_component_definition.go
+++ b/pkg/controller/builder/builder_component_definition.go
@@ -205,8 +205,8 @@ func (builder *ComponentDefinitionBuilder) AddSystemAccount(accountName string,
return builder
}
-func (builder *ComponentDefinitionBuilder) SetUpdateStrategy(strategy *appsv1.UpdateStrategy) *ComponentDefinitionBuilder {
- builder.get().Spec.UpdateStrategy = strategy
+func (builder *ComponentDefinitionBuilder) SetUpdateConcurrency(concurrency *appsv1.UpdateConcurrency) *ComponentDefinitionBuilder {
+ builder.get().Spec.UpdateConcurrency = concurrency
return builder
}
diff --git a/pkg/controller/builder/builder_instance_set.go b/pkg/controller/builder/builder_instance_set.go
index 6afc1a06e2d..b1255f13a48 100644
--- a/pkg/controller/builder/builder_instance_set.go
+++ b/pkg/controller/builder/builder_instance_set.go
@@ -114,21 +114,11 @@ func (builder *InstanceSetBuilder) SetParallelPodManagementConcurrency(parallelP
return builder
}
-func (builder *InstanceSetBuilder) SetPodUpdatePolicy(policy workloads.PodUpdatePolicyType) *InstanceSetBuilder {
- builder.get().Spec.PodUpdatePolicy = policy
- return builder
-}
-
-func (builder *InstanceSetBuilder) SetUpdateStrategy(strategy apps.StatefulSetUpdateStrategy) *InstanceSetBuilder {
+func (builder *InstanceSetBuilder) SetUpdateStrategy(strategy *workloads.UpdateStrategy) *InstanceSetBuilder {
builder.get().Spec.UpdateStrategy = strategy
return builder
}
-func (builder *InstanceSetBuilder) SetUpdateStrategyType(strategyType apps.StatefulSetUpdateStrategyType) *InstanceSetBuilder {
- builder.get().Spec.UpdateStrategy.Type = strategyType
- return builder
-}
-
func (builder *InstanceSetBuilder) SetMembershipReconfiguration(reconfiguration *workloads.MembershipReconfiguration) *InstanceSetBuilder {
builder.get().Spec.MembershipReconfiguration = reconfiguration
return builder
@@ -154,14 +144,6 @@ func (builder *InstanceSetBuilder) SetTemplateVars(templateVars map[string]any)
return builder
}
-func (builder *InstanceSetBuilder) SetMemberUpdateStrategy(strategy *workloads.MemberUpdateStrategy) *InstanceSetBuilder {
- builder.get().Spec.MemberUpdateStrategy = strategy
- if strategy != nil {
- builder.SetUpdateStrategyType(apps.OnDeleteStatefulSetStrategyType)
- }
- return builder
-}
-
func (builder *InstanceSetBuilder) SetPaused(paused bool) *InstanceSetBuilder {
builder.get().Spec.Paused = paused
return builder
diff --git a/pkg/controller/builder/builder_instance_set_test.go b/pkg/controller/builder/builder_instance_set_test.go
index 44f7116cbcc..57543158a3c 100644
--- a/pkg/controller/builder/builder_instance_set_test.go
+++ b/pkg/controller/builder/builder_instance_set_test.go
@@ -45,7 +45,7 @@ var _ = Describe("instance_set builder", func() {
minReadySeconds = int32(11)
port = int32(12345)
policy = apps.OrderedReadyPodManagement
- podUpdatePolicy = workloads.PreferInPlacePodUpdatePolicyType
+ instanceUpdatePolicy = workloads.PreferInPlaceInstanceUpdatePolicyType
)
parallelPodManagementConcurrency := &intstr.IntOrString{Type: intstr.String, StrVal: "100%"}
selectors := map[string]string{selectorKey4: selectorValue4}
@@ -107,16 +107,17 @@ var _ = Describe("instance_set builder", func() {
},
},
}
- partition, maxUnavailable := int32(3), intstr.FromInt(2)
- strategy := apps.StatefulSetUpdateStrategy{
- Type: apps.RollingUpdateStatefulSetStrategyType,
- RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{
- Partition: &partition,
- MaxUnavailable: &maxUnavailable,
+ itUpdatePolicy := workloads.PreferInPlaceInstanceUpdatePolicyType
+ updateReplicas, maxUnavailable := intstr.FromInt32(3), intstr.FromInt32(2)
+ updateConcurrency := workloads.BestEffortParallelConcurrency
+ strategy := workloads.UpdateStrategy{
+ InstanceUpdatePolicy: &itUpdatePolicy,
+ RollingUpdate: &workloads.RollingUpdate{
+ Replicas: &updateReplicas,
+ MaxUnavailable: &maxUnavailable,
+ UpdateConcurrency: &updateConcurrency,
},
}
- strategyType := apps.OnDeleteStatefulSetStrategyType
- memberUpdateStrategy := workloads.BestEffortParallelUpdateStrategy
paused := true
credential := workloads.Credential{
Username: workloads.CredentialVar{Value: "foo"},
@@ -145,10 +146,7 @@ var _ = Describe("instance_set builder", func() {
AddVolumeClaimTemplates(vc).
SetPodManagementPolicy(policy).
SetParallelPodManagementConcurrency(parallelPodManagementConcurrency).
- SetPodUpdatePolicy(podUpdatePolicy).
- SetUpdateStrategy(strategy).
- SetUpdateStrategyType(strategyType).
- SetMemberUpdateStrategy(&memberUpdateStrategy).
+ SetUpdateStrategy(&strategy).
SetPaused(paused).
SetCredential(credential).
SetInstances(instances).
@@ -174,15 +172,16 @@ var _ = Describe("instance_set builder", func() {
Expect(its.Spec.VolumeClaimTemplates[1]).Should(Equal(vc))
Expect(its.Spec.PodManagementPolicy).Should(Equal(policy))
Expect(its.Spec.ParallelPodManagementConcurrency).Should(Equal(parallelPodManagementConcurrency))
- Expect(its.Spec.PodUpdatePolicy).Should(Equal(podUpdatePolicy))
- Expect(its.Spec.UpdateStrategy.Type).Should(Equal(strategyType))
+ Expect(its.Spec.UpdateStrategy).ShouldNot(BeNil())
+ Expect(its.Spec.UpdateStrategy.InstanceUpdatePolicy).ShouldNot(BeNil())
+ Expect(*its.Spec.UpdateStrategy.InstanceUpdatePolicy).Should(Equal(instanceUpdatePolicy))
Expect(its.Spec.UpdateStrategy.RollingUpdate).ShouldNot(BeNil())
- Expect(its.Spec.UpdateStrategy.RollingUpdate.Partition).ShouldNot(BeNil())
- Expect(*its.Spec.UpdateStrategy.RollingUpdate.Partition).Should(Equal(partition))
+ Expect(its.Spec.UpdateStrategy.RollingUpdate.Replicas).ShouldNot(BeNil())
+ Expect(*its.Spec.UpdateStrategy.RollingUpdate.Replicas).Should(Equal(updateReplicas))
Expect(its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).ShouldNot(BeNil())
- Expect(its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).ShouldNot(Equal(maxUnavailable))
- Expect(its.Spec.MemberUpdateStrategy).ShouldNot(BeNil())
- Expect(*its.Spec.MemberUpdateStrategy).Should(Equal(memberUpdateStrategy))
+ Expect(*its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).Should(Equal(maxUnavailable))
+ Expect(its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency).ShouldNot(BeNil())
+ Expect(*its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency).Should(Equal(updateConcurrency))
Expect(its.Spec.Paused).Should(Equal(paused))
Expect(its.Spec.Credential).ShouldNot(BeNil())
Expect(*its.Spec.Credential).Should(Equal(credential))
diff --git a/pkg/controller/component/component.go b/pkg/controller/component/component.go
index 9678d516481..ea2a59fe451 100644
--- a/pkg/controller/component/component.go
+++ b/pkg/controller/component/component.go
@@ -77,7 +77,7 @@ func BuildComponent(cluster *appsv1.Cluster, compSpec *appsv1.ClusterComponentSp
SetResources(compSpec.Resources).
SetServiceAccountName(compSpec.ServiceAccountName).
SetParallelPodManagementConcurrency(compSpec.ParallelPodManagementConcurrency).
- SetPodUpdatePolicy(compSpec.PodUpdatePolicy).
+ SetUpdateStrategy(compSpec.UpdateStrategy).
SetVolumeClaimTemplates(compSpec.VolumeClaimTemplates).
SetVolumes(compSpec.Volumes).
SetServices(compSpec.Services).
diff --git a/pkg/controller/component/its_convertor.go b/pkg/controller/component/its_convertor.go
index e1e9653be8f..3e68dba77f8 100644
--- a/pkg/controller/component/its_convertor.go
+++ b/pkg/controller/component/its_convertor.go
@@ -39,11 +39,10 @@ func BuildWorkloadFrom(synthesizeComp *SynthesizedComponent, protoITS *workloads
protoITS = &workloads.InstanceSet{}
}
convertors := map[string]convertor{
- "roles": &itsRolesConvertor{},
- "credential": &itsCredentialConvertor{},
- "memberupdatestrategy": &itsMemberUpdateStrategyConvertor{},
- "podmanagementpolicy": &itsPodManagementPolicyConvertor{},
- "updatestrategy": &itsUpdateStrategyConvertor{},
+ "roles": &itsRolesConvertor{},
+ "credential": &itsCredentialConvertor{},
+ "podmanagementpolicy": &itsPodManagementPolicyConvertor{},
+ "updatestrategy": &itsUpdateStrategyConvertor{},
}
if err := covertObject(convertors, &protoITS.Spec, synthesizeComp); err != nil {
return nil, err
@@ -107,17 +106,6 @@ func (c *itsCredentialConvertor) convert(args ...any) (any, error) {
return nil, nil
}
-// itsMemberUpdateStrategyConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.MemberUpdateStrategy.
-type itsMemberUpdateStrategyConvertor struct{}
-
-func (c *itsMemberUpdateStrategyConvertor) convert(args ...any) (any, error) {
- synthesizeComp, err := parseITSConvertorArgs(args...)
- if err != nil {
- return nil, err
- }
- return getMemberUpdateStrategy(synthesizeComp), nil
-}
-
// itsPodManagementPolicyConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.PodManagementPolicy.
type itsPodManagementPolicyConvertor struct{}
@@ -129,11 +117,7 @@ func (c *itsPodManagementPolicyConvertor) convert(args ...any) (any, error) {
if synthesizedComp.PodManagementPolicy != nil {
return *synthesizedComp.PodManagementPolicy, nil
}
- memberUpdateStrategy := getMemberUpdateStrategy(synthesizedComp)
- if memberUpdateStrategy == nil || *memberUpdateStrategy == workloads.SerialUpdateStrategy {
- return appsv1.OrderedReadyPodManagement, nil
- }
- return appsv1.ParallelPodManagement, nil
+ return appsv1.OrderedReadyPodManagement, nil
}
// itsUpdateStrategyConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.Instances.
@@ -144,11 +128,21 @@ func (c *itsUpdateStrategyConvertor) convert(args ...any) (any, error) {
if err != nil {
return nil, err
}
- if getMemberUpdateStrategy(synthesizedComp) != nil {
- // appsv1.OnDeleteStatefulSetStrategyType is the default value if member update strategy is set.
- return appsv1.StatefulSetUpdateStrategy{}, nil
+ var updateStrategy *workloads.UpdateStrategy
+ if synthesizedComp.UpdateStrategy != nil {
+ updateStrategy = &workloads.UpdateStrategy{
+ Type: workloads.UpdateStrategyType(synthesizedComp.UpdateStrategy.Type),
+ InstanceUpdatePolicy: (*workloads.InstanceUpdatePolicyType)(synthesizedComp.UpdateStrategy.InstanceUpdatePolicy),
+ }
+ if synthesizedComp.UpdateStrategy.RollingUpdate != nil {
+ updateStrategy.RollingUpdate = &workloads.RollingUpdate{
+ Replicas: synthesizedComp.UpdateStrategy.RollingUpdate.Replicas,
+ MaxUnavailable: synthesizedComp.UpdateStrategy.RollingUpdate.MaxUnavailable,
+ UpdateConcurrency: (*workloads.UpdateConcurrency)(synthesizedComp.UpdateStrategy.RollingUpdate.UpdateConcurrency),
+ }
+ }
}
- return nil, nil
+ return updateStrategy, nil
}
// parseITSConvertorArgs parses the args of ITS convertor.
@@ -160,27 +154,6 @@ func parseITSConvertorArgs(args ...any) (*SynthesizedComponent, error) {
return synthesizeComp, nil
}
-func getMemberUpdateStrategy(synthesizedComp *SynthesizedComponent) *workloads.MemberUpdateStrategy {
- if synthesizedComp.UpdateStrategy == nil {
- return nil
- }
- var (
- serial = workloads.SerialUpdateStrategy
- parallelUpdate = workloads.ParallelUpdateStrategy
- bestEffortParallelUpdate = workloads.BestEffortParallelUpdateStrategy
- )
- switch *synthesizedComp.UpdateStrategy {
- case kbappsv1.SerialStrategy:
- return &serial
- case kbappsv1.ParallelStrategy:
- return ¶llelUpdate
- case kbappsv1.BestEffortParallelStrategy:
- return &bestEffortParallelUpdate
- default:
- return nil
- }
-}
-
// ConvertSynthesizeCompRoleToInstanceSetRole converts the component.SynthesizedComponent.Roles to workloads.ReplicaRole.
func ConvertSynthesizeCompRoleToInstanceSetRole(synthesizedComp *SynthesizedComponent) []workloads.ReplicaRole {
if synthesizedComp.Roles == nil {
diff --git a/pkg/controller/component/synthesize_component.go b/pkg/controller/component/synthesize_component.go
index 9e81dc006a5..41173a7eb4a 100644
--- a/pkg/controller/component/synthesize_component.go
+++ b/pkg/controller/component/synthesize_component.go
@@ -89,7 +89,6 @@ func BuildSynthesizedComponent(ctx context.Context, cli client.Reader,
ConfigTemplates: compDefObj.Spec.Configs,
ScriptTemplates: compDefObj.Spec.Scripts,
Roles: compDefObj.Spec.Roles,
- UpdateStrategy: compDefObj.Spec.UpdateStrategy,
MinReadySeconds: compDefObj.Spec.MinReadySeconds,
PolicyRules: compDefObj.Spec.PolicyRules,
LifecycleActions: compDefObj.Spec.LifecycleActions,
@@ -104,13 +103,15 @@ func BuildSynthesizedComponent(ctx context.Context, cli client.Reader,
Stop: comp.Spec.Stop,
PodManagementPolicy: compDef.Spec.PodManagementPolicy,
ParallelPodManagementConcurrency: comp.Spec.ParallelPodManagementConcurrency,
- PodUpdatePolicy: comp.Spec.PodUpdatePolicy,
}
if err = mergeUserDefinedEnv(synthesizeComp, comp); err != nil {
return nil, err
}
+ // build update strategy for workload
+ buildUpdateStrategy(synthesizeComp, comp, compDefObj)
+
// build scheduling policy for workload
buildSchedulingPolicy(synthesizeComp, comp)
@@ -153,6 +154,35 @@ func BuildSynthesizedComponent(ctx context.Context, cli client.Reader,
return synthesizeComp, nil
}
+func buildUpdateStrategy(synthesizeComp *SynthesizedComponent, comp *appsv1.Component, compDef *appsv1.ComponentDefinition) {
+ var updateStrategy *appsv1.UpdateStrategy
+ if comp.Spec.UpdateStrategy != nil {
+ updateStrategy = &appsv1.UpdateStrategy{
+ Type: comp.Spec.UpdateStrategy.Type,
+ InstanceUpdatePolicy: comp.Spec.UpdateStrategy.InstanceUpdatePolicy,
+ }
+ if comp.Spec.UpdateStrategy.RollingUpdate != nil {
+ updateStrategy.RollingUpdate = &appsv1.RollingUpdate{
+ Replicas: comp.Spec.UpdateStrategy.RollingUpdate.Replicas,
+ MaxUnavailable: comp.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable,
+ UpdateConcurrency: comp.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency,
+ }
+ }
+ }
+ if compDef.Spec.UpdateConcurrency != nil {
+ if updateStrategy == nil {
+ updateStrategy = &appsv1.UpdateStrategy{}
+ }
+ if updateStrategy.RollingUpdate == nil {
+ updateStrategy.RollingUpdate = &appsv1.RollingUpdate{}
+ }
+ if updateStrategy.RollingUpdate.UpdateConcurrency == nil {
+ updateStrategy.RollingUpdate.UpdateConcurrency = compDef.Spec.UpdateConcurrency
+ }
+ }
+ synthesizeComp.UpdateStrategy = updateStrategy
+}
+
func buildComp2CompDefs(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster) (map[string]string, error) {
if cluster == nil {
return nil, nil
diff --git a/pkg/controller/component/type.go b/pkg/controller/component/type.go
index 7b6d06d9f20..cb8f8435d46 100644
--- a/pkg/controller/component/type.go
+++ b/pkg/controller/component/type.go
@@ -65,7 +65,6 @@ type SynthesizedComponent struct {
UpdateStrategy *kbappsv1.UpdateStrategy `json:"updateStrategy,omitempty"`
PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
ParallelPodManagementConcurrency *intstr.IntOrString `json:"parallelPodManagementConcurrency,omitempty"`
- PodUpdatePolicy *kbappsv1.PodUpdatePolicyType `json:"podUpdatePolicy,omitempty"`
PolicyRules []rbacv1.PolicyRule `json:"policyRules,omitempty"`
LifecycleActions *kbappsv1.ComponentLifecycleActions `json:"lifecycleActions,omitempty"`
SystemAccounts []kbappsv1.SystemAccount `json:"systemAccounts,omitempty"`
diff --git a/pkg/controller/factory/builder.go b/pkg/controller/factory/builder.go
index 3f7c2c92b1a..e87ae66ab95 100644
--- a/pkg/controller/factory/builder.go
+++ b/pkg/controller/factory/builder.go
@@ -82,7 +82,6 @@ func BuildInstanceSet(synthesizedComp *component.SynthesizedComponent, component
SetInstances(synthesizedComp.Instances).
SetOfflineInstances(synthesizedComp.OfflineInstances).
SetParallelPodManagementConcurrency(getParallelPodManagementConcurrency(synthesizedComp)).
- SetPodUpdatePolicy(getPodUpdatePolicy(synthesizedComp)).
SetLifecycleActions(synthesizedComp.LifecycleActions).
SetTemplateVars(synthesizedComp.TemplateVars)
@@ -129,13 +128,6 @@ func getParallelPodManagementConcurrency(synthesizedComp *component.SynthesizedC
return &intstr.IntOrString{Type: intstr.String, StrVal: "100%"} // default value
}
-func getPodUpdatePolicy(synthesizedComp *component.SynthesizedComponent) workloads.PodUpdatePolicyType {
- if synthesizedComp.PodUpdatePolicy != nil {
- return workloads.PodUpdatePolicyType(*synthesizedComp.PodUpdatePolicy)
- }
- return workloads.PreferInPlacePodUpdatePolicyType // default value
-}
-
func vctToPVC(vct corev1.PersistentVolumeClaimTemplate) corev1.PersistentVolumeClaim {
return corev1.PersistentVolumeClaim{
ObjectMeta: vct.ObjectMeta,
diff --git a/pkg/controller/factory/builder_test.go b/pkg/controller/factory/builder_test.go
index 7af11a7e784..e122431e82d 100644
--- a/pkg/controller/factory/builder_test.go
+++ b/pkg/controller/factory/builder_test.go
@@ -123,9 +123,13 @@ var _ = Describe("builder", func() {
// test roles
Expect(its.Spec.Roles).Should(HaveLen(len(compDef.Spec.Roles)))
- // test member update strategy
- Expect(its.Spec.MemberUpdateStrategy).ShouldNot(BeNil())
- Expect(*its.Spec.MemberUpdateStrategy).Should(BeEquivalentTo(workloads.BestEffortParallelUpdateStrategy))
+ // test update strategy
+ Expect(its.Spec.UpdateStrategy).ShouldNot(BeNil())
+ Expect(its.Spec.UpdateStrategy.Type).Should(BeEmpty())
+ Expect(its.Spec.UpdateStrategy.InstanceUpdatePolicy).Should(BeNil())
+ Expect(its.Spec.UpdateStrategy.RollingUpdate).ShouldNot(BeNil())
+ Expect(its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency).ShouldNot(BeNil())
+ Expect(*its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency).Should(BeEquivalentTo(workloads.BestEffortParallelConcurrency))
})
It("builds ConfigMap with template correctly", func() {
diff --git a/pkg/controller/instanceset/reconciler_update.go b/pkg/controller/instanceset/reconciler_update.go
index baee5352317..33fd6c6fbf9 100644
--- a/pkg/controller/instanceset/reconciler_update.go
+++ b/pkg/controller/instanceset/reconciler_update.go
@@ -24,7 +24,6 @@ import (
"fmt"
"time"
- apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -98,13 +97,12 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder
// 3. do update
// do nothing if UpdateStrategyType is 'OnDelete'
- if its.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType {
- // TODO: how to handle the OnDelete type?
+ if its.Spec.UpdateStrategy != nil && its.Spec.UpdateStrategy.Type == workloads.OnDeleteStrategyType {
return kubebuilderx.Continue, nil
}
// handle 'RollingUpdate'
- partition, maxUnavailable, err := parsePartitionNMaxUnavailable(its.Spec.UpdateStrategy.RollingUpdate, len(oldPodList))
+ partition, maxUnavailable, err := parseReplicasNMaxUnavailable(its.Spec.UpdateStrategy, len(oldPodList))
if err != nil {
return kubebuilderx.Continue, err
}
@@ -119,8 +117,7 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder
// if it's a roleful InstanceSet, we use updateCount to represent Pods can be updated according to the spec.memberUpdateStrategy.
updateCount := len(oldPodList)
if len(its.Spec.Roles) > 0 {
- itsForPlan := getInstanceSetForUpdatePlan(its)
- plan := NewUpdatePlan(*itsForPlan, oldPodList, IsPodUpdated)
+ plan := NewUpdatePlan(*its, oldPodList, IsPodUpdated)
podsToBeUpdated, err := plan.Execute()
if err != nil {
return kubebuilderx.Continue, err
@@ -165,9 +162,13 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder
if err != nil {
return kubebuilderx.Continue, err
}
- if its.Spec.PodUpdatePolicy == workloads.StrictInPlacePodUpdatePolicyType && updatePolicy == RecreatePolicy {
- message := fmt.Sprintf("InstanceSet %s/%s blocks on update as the PodUpdatePolicy is %s and the pod %s can not inplace update",
- its.Namespace, its.Name, workloads.StrictInPlacePodUpdatePolicyType, pod.Name)
+ instanceUpdatePolicy := workloads.PreferInPlaceInstanceUpdatePolicyType
+ if its.Spec.UpdateStrategy != nil && its.Spec.UpdateStrategy.InstanceUpdatePolicy != nil {
+ instanceUpdatePolicy = *its.Spec.UpdateStrategy.InstanceUpdatePolicy
+ }
+ if instanceUpdatePolicy == workloads.StrictInPlaceInstanceUpdatePolicyType && updatePolicy == RecreatePolicy {
+ message := fmt.Sprintf("InstanceSet %s/%s blocks on update as the InstanceUpdatePolicy is %s and the pod %s can not inplace update",
+ its.Namespace, its.Name, workloads.StrictInPlaceInstanceUpdatePolicyType, pod.Name)
if tree != nil && tree.EventRecorder != nil {
tree.EventRecorder.Eventf(its, corev1.EventTypeWarning, EventReasonStrictInPlace, message)
}
@@ -268,39 +269,33 @@ func buildBlockedCondition(its *workloads.InstanceSet, message string) *metav1.C
}
}
-func getInstanceSetForUpdatePlan(its *workloads.InstanceSet) *workloads.InstanceSet {
- if its.Spec.MemberUpdateStrategy != nil {
- return its
- }
- itsForPlan := its.DeepCopy()
- updateStrategy := workloads.SerialUpdateStrategy
- if its.Spec.PodManagementPolicy == apps.ParallelPodManagement {
- updateStrategy = workloads.ParallelUpdateStrategy
- }
- itsForPlan.Spec.MemberUpdateStrategy = &updateStrategy
- return itsForPlan
-}
-
-func parsePartitionNMaxUnavailable(rollingUpdate *apps.RollingUpdateStatefulSetStrategy, replicas int) (int, int, error) {
- partition := replicas
+func parseReplicasNMaxUnavailable(updateStrategy *workloads.UpdateStrategy, totalReplicas int) (int, int, error) {
+ replicas := totalReplicas
maxUnavailable := 1
+ if updateStrategy == nil {
+ return replicas, maxUnavailable, nil
+ }
+ rollingUpdate := updateStrategy.RollingUpdate
if rollingUpdate == nil {
- return partition, maxUnavailable, nil
+ return replicas, maxUnavailable, nil
}
- if rollingUpdate.Partition != nil {
- partition = int(*rollingUpdate.Partition)
+ var err error
+ if rollingUpdate.Replicas != nil {
+ replicas, err = intstr.GetScaledValueFromIntOrPercent(rollingUpdate.Replicas, totalReplicas, false)
+ if err != nil {
+ return replicas, maxUnavailable, err
+ }
}
if rollingUpdate.MaxUnavailable != nil {
- maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(rollingUpdate.MaxUnavailable, intstr.FromInt32(1)), replicas, false)
+ maxUnavailable, err = intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(rollingUpdate.MaxUnavailable, intstr.FromInt32(1)), totalReplicas, false)
if err != nil {
return 0, 0, err
}
// maxUnavailable might be zero for small percentage with round down.
// So we have to enforce it not to be less than 1.
- if maxUnavailableNum < 1 {
- maxUnavailableNum = 1
+ if maxUnavailable < 1 {
+ maxUnavailable = 1
}
- maxUnavailable = maxUnavailableNum
}
- return partition, maxUnavailable, nil
+ return replicas, maxUnavailable, nil
}
diff --git a/pkg/controller/instanceset/reconciler_update_test.go b/pkg/controller/instanceset/reconciler_update_test.go
index feb067449e7..42d7fff5098 100644
--- a/pkg/controller/instanceset/reconciler_update_test.go
+++ b/pkg/controller/instanceset/reconciler_update_test.go
@@ -159,11 +159,11 @@ var _ = Describe("update reconciler test", func() {
Expect(err).Should(BeNil())
root, ok := partitionTree.GetRoot().(*workloads.InstanceSet)
Expect(ok).Should(BeTrue())
- partition := int32(3)
+ updateReplicas := intstr.FromInt32(3)
maxUnavailable := intstr.FromInt32(2)
- root.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
- RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
- Partition: &partition,
+ root.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ Replicas: &updateReplicas,
MaxUnavailable: &maxUnavailable,
},
}
@@ -179,9 +179,9 @@ var _ = Describe("update reconciler test", func() {
Expect(err).Should(BeNil())
root, ok = partitionTree.GetRoot().(*workloads.InstanceSet)
Expect(ok).Should(BeTrue())
- root.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
- RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{
- Partition: &partition,
+ root.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ Replicas: &updateReplicas,
MaxUnavailable: &maxUnavailable,
},
}
@@ -203,7 +203,9 @@ var _ = Describe("update reconciler test", func() {
Expect(err).Should(BeNil())
root, ok = onDeleteTree.GetRoot().(*workloads.InstanceSet)
Expect(ok).Should(BeTrue())
- root.Spec.UpdateStrategy.Type = appsv1.OnDeleteStatefulSetStrategyType
+ root.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ Type: workloads.OnDeleteStrategyType,
+ }
res, err = reconciler.Reconcile(onDeleteTree)
Expect(err).Should(BeNil())
Expect(res).Should(Equal(kubebuilderx.Continue))
@@ -216,7 +218,10 @@ var _ = Describe("update reconciler test", func() {
Expect(err).Should(BeNil())
root, ok = preferInPlaceTree.GetRoot().(*workloads.InstanceSet)
Expect(ok).Should(BeTrue())
- root.Spec.PodUpdatePolicy = workloads.PreferInPlacePodUpdatePolicyType
+ instanceUpdatePolicy := workloads.PreferInPlaceInstanceUpdatePolicyType
+ root.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ InstanceUpdatePolicy: &instanceUpdatePolicy,
+ }
// try to add env to instanceHello to trigger the recreation
root.Spec.Instances[0].Env = []corev1.EnvVar{
{
@@ -234,7 +239,10 @@ var _ = Describe("update reconciler test", func() {
Expect(err).Should(BeNil())
root, ok = strictInPlaceTree.GetRoot().(*workloads.InstanceSet)
Expect(ok).Should(BeTrue())
- root.Spec.PodUpdatePolicy = workloads.StrictInPlacePodUpdatePolicyType
+ instanceUpdatePolicy = workloads.StrictInPlaceInstanceUpdatePolicyType
+ root.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ InstanceUpdatePolicy: &instanceUpdatePolicy,
+ }
// try to add env to instanceHello to trigger the recreation
root.Spec.Instances[0].Env = []corev1.EnvVar{
{
diff --git a/pkg/controller/instanceset/update_plan.go b/pkg/controller/instanceset/update_plan.go
index 7c05e6aa60a..343356f6644 100644
--- a/pkg/controller/instanceset/update_plan.go
+++ b/pkg/controller/instanceset/update_plan.go
@@ -97,7 +97,8 @@ func (p *realUpdatePlan) planWalkFunc(vertex graph.Vertex) error {
// This change may lead to false alarms, as when all replicas are temporarily unavailable for some reason,
// the system will update them without waiting for their roles to be elected and probed. This cloud
// potentially hide some uncertain risks.
- serialUpdate := p.its.Spec.MemberUpdateStrategy != nil && *p.its.Spec.MemberUpdateStrategy == workloads.SerialUpdateStrategy
+ updateConcurrency := getUpdateConcurrency(&p.its)
+ serialUpdate := updateConcurrency == workloads.SerialConcurrency
hasRoleProbed := len(p.its.Status.MembersStatus) > 0
if !serialUpdate || hasRoleProbed {
return ErrWait
@@ -121,20 +122,18 @@ func (p *realUpdatePlan) build() {
root := &model.ObjectVertex{}
p.dag.AddVertex(root)
- if p.its.Spec.MemberUpdateStrategy == nil {
- return
- }
+ updateConcurrency := getUpdateConcurrency(&p.its)
rolePriorityMap := ComposeRolePriorityMap(p.its.Spec.Roles)
SortPods(p.pods, rolePriorityMap, false)
// generate plan by MemberUpdateStrategy
- switch *p.its.Spec.MemberUpdateStrategy {
- case workloads.SerialUpdateStrategy:
+ switch updateConcurrency {
+ case workloads.SerialConcurrency:
p.buildSerialUpdatePlan()
- case workloads.ParallelUpdateStrategy:
+ case workloads.ParallelConcurrency:
p.buildParallelUpdatePlan()
- case workloads.BestEffortParallelUpdateStrategy:
+ case workloads.BestEffortParallelConcurrency:
p.buildBestEffortParallelUpdatePlan(rolePriorityMap)
}
}
diff --git a/pkg/controller/instanceset/update_plan_test.go b/pkg/controller/instanceset/update_plan_test.go
index a789130d9d5..8a33a0fdbe0 100644
--- a/pkg/controller/instanceset/update_plan_test.go
+++ b/pkg/controller/instanceset/update_plan_test.go
@@ -121,8 +121,12 @@ var _ = Describe("update plan test.", func() {
It("should work well in a serial plan", func() {
By("build a serial plan")
- strategy := workloads.SerialUpdateStrategy
- its.Spec.MemberUpdateStrategy = &strategy
+ updateConcurrency := workloads.SerialConcurrency
+ its.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ UpdateConcurrency: &updateConcurrency,
+ },
+ }
expectedPlan := [][]*corev1.Pod{
{pod4},
{pod2},
@@ -137,8 +141,12 @@ var _ = Describe("update plan test.", func() {
It("should work well in a serial plan when pod has no role", func() {
By("build a serial plan")
- strategy := workloads.SerialUpdateStrategy
- its.Spec.MemberUpdateStrategy = &strategy
+ updateConcurrency := workloads.SerialConcurrency
+ its.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ UpdateConcurrency: &updateConcurrency,
+ },
+ }
expectedPlan := [][]*corev1.Pod{
{pod4},
{pod2},
@@ -153,8 +161,12 @@ var _ = Describe("update plan test.", func() {
It("should work well in a parallel plan", func() {
By("build a parallel plan")
- strategy := workloads.ParallelUpdateStrategy
- its.Spec.MemberUpdateStrategy = &strategy
+ updateConcurrency := workloads.ParallelConcurrency
+ its.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ UpdateConcurrency: &updateConcurrency,
+ },
+ }
expectedPlan := [][]*corev1.Pod{
{pod0, pod1, pod2, pod3, pod4, pod5, pod6},
}
@@ -163,8 +175,12 @@ var _ = Describe("update plan test.", func() {
It("should work well in a best effort parallel", func() {
By("build a best effort parallel plan")
- strategy := workloads.BestEffortParallelUpdateStrategy
- its.Spec.MemberUpdateStrategy = &strategy
+ updateConcurrency := workloads.BestEffortParallelConcurrency
+ its.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ UpdateConcurrency: &updateConcurrency,
+ },
+ }
expectedPlan := [][]*corev1.Pod{
{pod2, pod3, pod4, pod6},
{pod1},
@@ -176,8 +192,12 @@ var _ = Describe("update plan test.", func() {
It("should work well with role-less and heterogeneous pods", func() {
By("build a serial plan with role-less and heterogeneous pods")
- strategy := workloads.SerialUpdateStrategy
- its.Spec.MemberUpdateStrategy = &strategy
+ updateConcurrency := workloads.SerialConcurrency
+ its.Spec.UpdateStrategy = &workloads.UpdateStrategy{
+ RollingUpdate: &workloads.RollingUpdate{
+ UpdateConcurrency: &updateConcurrency,
+ },
+ }
its.Spec.Roles = nil
for _, pod := range []*corev1.Pod{pod0, pod1, pod2, pod3, pod4, pod5, pod6} {
labels := pod.Labels
diff --git a/pkg/controller/instanceset/utils.go b/pkg/controller/instanceset/utils.go
index df7110ffa1e..95449a6be13 100644
--- a/pkg/controller/instanceset/utils.go
+++ b/pkg/controller/instanceset/utils.go
@@ -205,3 +205,13 @@ func CalculateConcurrencyReplicas(concurrency *intstr.IntOrString, replicas int)
pValue = integer.IntMax(integer.IntMin(pValue, replicas), 1)
return pValue, nil
}
+
+func getUpdateConcurrency(its *workloads.InstanceSet) workloads.UpdateConcurrency {
+ updateConcurrency := workloads.SerialConcurrency
+ if its.Spec.UpdateStrategy != nil &&
+ its.Spec.UpdateStrategy.RollingUpdate != nil &&
+ its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency != nil {
+ updateConcurrency = *its.Spec.UpdateStrategy.RollingUpdate.UpdateConcurrency
+ }
+ return updateConcurrency
+}
diff --git a/pkg/testutil/apps/componentdefinition_factory.go b/pkg/testutil/apps/componentdefinition_factory.go
index 8871c693202..c9e24b95cd3 100644
--- a/pkg/testutil/apps/componentdefinition_factory.go
+++ b/pkg/testutil/apps/componentdefinition_factory.go
@@ -276,8 +276,8 @@ func (f *MockComponentDefinitionFactory) AddSystemAccount(accountName string, in
return f
}
-func (f *MockComponentDefinitionFactory) SetUpdateStrategy(strategy *kbappsv1.UpdateStrategy) *MockComponentDefinitionFactory {
- f.Get().Spec.UpdateStrategy = strategy
+func (f *MockComponentDefinitionFactory) SetUpdateConcurrency(concurrency *kbappsv1.UpdateConcurrency) *MockComponentDefinitionFactory {
+ f.Get().Spec.UpdateConcurrency = concurrency
return f
}
diff --git a/pkg/testutil/apps/constant.go b/pkg/testutil/apps/constant.go
index c18fa35603d..7d1b5714d08 100644
--- a/pkg/testutil/apps/constant.go
+++ b/pkg/testutil/apps/constant.go
@@ -206,7 +206,7 @@ var (
},
},
},
- UpdateStrategy: &[]appsv1.UpdateStrategy{appsv1.BestEffortParallelStrategy}[0],
+ UpdateConcurrency: &[]appsv1.UpdateConcurrency{appsv1.BestEffortParallelConcurrency}[0],
Roles: []appsv1.ReplicaRole{
{
Name: "leader",
diff --git a/pkg/testutil/apps/instance_set_factoy.go b/pkg/testutil/apps/instance_set_factoy.go
index e8a12b99077..b8fb66baef2 100644
--- a/pkg/testutil/apps/instance_set_factoy.go
+++ b/pkg/testutil/apps/instance_set_factoy.go
@@ -20,7 +20,6 @@ along with this program. If not, see .
package apps
import (
- appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -60,8 +59,8 @@ func NewInstanceSetFactory(namespace, name string, clusterName string, component
},
},
},
- UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
- Type: appsv1.OnDeleteStatefulSetStrategyType,
+ UpdateStrategy: &workloads.UpdateStrategy{
+ Type: workloads.OnDeleteStrategyType,
},
},
}, f)
diff --git a/pkg/testutil/apps/shardingdefinition_factory.go b/pkg/testutil/apps/shardingdefinition_factory.go
index d1cbc3e61a4..3f41b75ef82 100644
--- a/pkg/testutil/apps/shardingdefinition_factory.go
+++ b/pkg/testutil/apps/shardingdefinition_factory.go
@@ -40,12 +40,12 @@ func NewShardingDefinitionFactory(name, compDef string) *MockShardingDefinitionF
return f
}
-func (f *MockShardingDefinitionFactory) SetProvisionStrategy(strategy appsv1.UpdateStrategy) *MockShardingDefinitionFactory {
+func (f *MockShardingDefinitionFactory) SetProvisionStrategy(strategy appsv1.UpdateConcurrency) *MockShardingDefinitionFactory {
f.Get().Spec.ProvisionStrategy = &strategy
return f
}
-func (f *MockShardingDefinitionFactory) SetUpdateStrategy(strategy appsv1.UpdateStrategy) *MockShardingDefinitionFactory {
+func (f *MockShardingDefinitionFactory) SetUpdateStrategy(strategy appsv1.UpdateConcurrency) *MockShardingDefinitionFactory {
f.Get().Spec.UpdateStrategy = &strategy
return f
}