diff --git a/docs/user_docs/cli/kbcli_cluster_promote.md b/docs/user_docs/cli/kbcli_cluster_promote.md index d32b072be..a76bb1548 100644 --- a/docs/user_docs/cli/kbcli_cluster_promote.md +++ b/docs/user_docs/cli/kbcli_cluster_promote.md @@ -5,32 +5,25 @@ title: kbcli cluster promote Promote a non-primary or non-leader instance as the new primary or leader of the cluster ``` -kbcli cluster promote NAME [--component=] [--instance ] [flags] +kbcli cluster promote NAME [--instance ] [flags] ``` ### Examples ``` # Promote the instance mycluster-mysql-1 as the new primary or leader. - kbcli cluster promote mycluster --instance mycluster-mysql-1 - - # Promote a non-primary or non-leader instance as the new primary or leader, the new primary or leader is determined by the system. - kbcli cluster promote mycluster - - # If the cluster has multiple components, you need to specify a component, otherwise an error will be reported. - kbcli cluster promote mycluster --component=mysql --instance mycluster-mysql-1 + kbcli cluster promote mycluster --candidate mycluster-mysql-1 ``` ### Options ``` --auto-approve Skip interactive approval before promote the instance - --component string Specify the component name of the cluster, if the cluster has multiple components, you need to specify a component + --candidate string Specify the instance name as the new primary or leader of the cluster, you can get the instance name by running "kbcli cluster list-instances" --dry-run string[="unchanged"] Must be "client", or "server". If with client strategy, only print the object that would be sent, and no data is actually sent. If with server strategy, submit the server-side request, but no data is persistent. (default "none") --edit Edit the API resource before creating --force skip the pre-checks of the opsRequest to run the opsRequest forcibly -h, --help help for promote - --instance string Specify the instance name as the new primary or leader of the cluster, you can get the instance name by running "kbcli cluster list-instances" --name string OpsRequest name. if not specified, it will be randomly generated -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed diff --git a/docs/user_docs/cli/kbcli_cluster_rebuild-instance.md b/docs/user_docs/cli/kbcli_cluster_rebuild-instance.md index 2d8598784..fd1a83b6a 100644 --- a/docs/user_docs/cli/kbcli_cluster_rebuild-instance.md +++ b/docs/user_docs/cli/kbcli_cluster_rebuild-instance.md @@ -39,6 +39,7 @@ kbcli cluster rebuild-instance NAME [flags] --node strings specified the target node which rebuilds the instance on the node otherwise will rebuild on a random node. format: insName1=nodeName,insName2=nodeName -o, --output format Prints the output in the specified format. Allowed values: JSON and YAML (default yaml) --restore-env stringArray provide the necessary env for the 'Restore' operation from the backup. format: key1=value, key2=value + --source-backup-target string To rebuild a sharding component instance from a backup, you can specify the name of the source backup target --ttlSecondsAfterSucceed int Time to live after the OpsRequest succeed ``` diff --git a/go.mod b/go.mod index e546499d7..5bcbaceb2 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/Masterminds/semver/v3 v3.3.0 github.com/NimbleMarkets/ntcharts v0.1.2 github.com/apecloud/dbctl v0.0.0-20240827084000-68a1980b1a46 - github.com/apecloud/kubeblocks v1.0.0-beta.17 + github.com/apecloud/kubeblocks v1.0.0-beta.23 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/briandowns/spinner v1.23.0 github.com/chaos-mesh/chaos-mesh/api v0.0.0-20230912020346-a5d89c1c90ad diff --git a/go.sum b/go.sum index 2c63dd2a6..59ecb474a 100644 --- a/go.sum +++ b/go.sum @@ -677,8 +677,8 @@ github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4x github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/apecloud/dbctl v0.0.0-20240827084000-68a1980b1a46 h1:+Jcc7IjDGxPgIfIkGX2Q5Yxj35U65zgcfjh0B9rDhjo= github.com/apecloud/dbctl v0.0.0-20240827084000-68a1980b1a46/go.mod h1:eksJtZ7z1nVcVLqDzAdcN5EfpHwXllIAvHZEks2zWys= -github.com/apecloud/kubeblocks v1.0.0-beta.17 h1:taNHtwUWyCUBSHbPAx5sY5ltY0Dcf62cr+1HjxlK60w= -github.com/apecloud/kubeblocks v1.0.0-beta.17/go.mod h1:bQ6uey/6S9gAuDkAJ7T89CdpmeXyxEFJpLw1hV2hANE= +github.com/apecloud/kubeblocks v1.0.0-beta.23 h1:JrQBB9gJ/jtMD9wHv5js26rdfNQgeoU5+GcUEiaAmFU= +github.com/apecloud/kubeblocks v1.0.0-beta.23/go.mod h1:b656nTyvHhwRwOuwNpOPG87Q0Lba3ygGRWoSOacPt5o= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= diff --git a/pkg/action/template/cluster_operations_template.cue b/pkg/action/template/cluster_operations_template.cue index e35c26acb..129187a41 100644 --- a/pkg/action/template/cluster_operations_template.cue +++ b/pkg/action/template/cluster_operations_template.cue @@ -30,6 +30,7 @@ options: { componentDefinitionName: string serviceVersion: string component: string + componentObjectName: string instance: string componentNames: [...string] instanceTPLNames: [...string] @@ -38,6 +39,7 @@ options: { componentName: string backupName?: string inPlace?: bool + sourceBackupTargetName?: string instances: [ ...{ name: string @@ -283,13 +285,9 @@ content: { } if options.type == "Switchover" { switchover: [{ - componentName: options.component - if options.instance == "" { - instanceName: "*" - } - if options.instance != "" { - instanceName: options.instance - } + componentObjectName: options.componentObjectName + instanceName: options.instance + candidateName: options.instance }] } if options.type == "RebuildInstance" { diff --git a/pkg/cmd/cluster/operations.go b/pkg/cmd/cluster/operations.go index fce50953c..c5b9b305d 100755 --- a/pkg/cmd/cluster/operations.go +++ b/pkg/cmd/cluster/operations.go @@ -108,14 +108,16 @@ type OperationsOptions struct { Services []opsv1alpha1.OpsService `json:"services,omitempty"` // Switchover options - Component string `json:"component"` - Instance string `json:"instance"` - BackupName string `json:"-"` - Inplace bool `json:"-"` - InstanceNames []string `json:"-"` - Nodes []string `json:"-"` - RebuildInstanceFrom []opsv1alpha1.RebuildInstance `json:"rebuildInstanceFrom,omitempty"` - Env []string `json:"-"` + Component string `json:"component"` + ComponentObjectName string `json:"componentObjectName,omitempty"` + Instance string `json:"instance"` + BackupName string `json:"-"` + Inplace bool `json:"-"` + InstanceNames []string `json:"-"` + Nodes []string `json:"-"` + RebuildInstanceFrom []opsv1alpha1.RebuildInstance `json:"rebuildInstanceFrom,omitempty"` + Env []string `json:"-"` + SourceBackupTargetName string `json:"-"` // Stop and Start options isComponentsFlagOptional bool @@ -212,10 +214,11 @@ func (o *OperationsOptions) CompleteSwitchoverOps() error { } if o.Component == "" { - if len(clusterObj.Spec.ComponentSpecs) > 1 { + if len(clusterObj.Spec.ComponentSpecs) == 1 { + o.Component = clusterObj.Spec.ComponentSpecs[0].Name + } else if len(clusterObj.Spec.ComponentSpecs) > 1 { return fmt.Errorf("there are multiple components in cluster, please use --component to specify the component for promote") } - o.Component = clusterObj.Spec.ComponentSpecs[0].Name } return nil } @@ -435,23 +438,19 @@ func (o *OperationsOptions) validatePromote(clusterObj *appsv1.Cluster) error { podObj = &corev1.Pod{} ) - if o.Component == "" && o.Instance == "" { - return fmt.Errorf("at least one of --component or --instance is required") + if o.Instance == "" { + return fmt.Errorf("--candidate is required") } // if the instance is not specified, do not need to check the validity of the instance - if o.Instance != "" { - // checks the validity of the instance whether it belongs to the current component and ensure it is not the primary or leader instance currently. - podKey := client.ObjectKey{ - Namespace: clusterObj.Namespace, - Name: o.Instance, - } - if err := util.GetResourceObjectFromGVR(types.PodGVR(), podKey, o.Dynamic, podObj); err != nil || podObj == nil { - return fmt.Errorf("instance %s not found, please check the validity of the instance using \"kbcli cluster list-instances\"", o.Instance) - } - if o.Component == "" { - o.Component = cluster.GetPodComponentName(podObj) - } + // checks the validity of the instance whether it belongs to the current component and ensure it is not the primary or leader instance currently. + podKey := client.ObjectKey{ + Namespace: clusterObj.Namespace, + Name: o.Instance, } + if err := util.GetResourceObjectFromGVR(types.PodGVR(), podKey, o.Dynamic, podObj); err != nil || podObj == nil { + return fmt.Errorf("instance %s not found, please check the validity of the instance using \"kbcli cluster list-instances\"", o.Instance) + } + o.ComponentObjectName = constant.GenerateClusterComponentName(clusterObj.Name, podObj.Labels[constant.KBAppComponentLabelKey]) getAndValidatePod := func(targetRoles ...string) error { // if the instance is not specified, do not need to check the validity of the instance @@ -467,8 +466,8 @@ func (o *OperationsOptions) validatePromote(clusterObj *appsv1.Cluster) error { return fmt.Errorf("instanceName %s cannot be promoted because it is already the targetRole %s instance", o.Instance, targetRole) } } - if cluster.GetPodComponentName(podObj) != o.Component || podObj.Labels[constant.AppInstanceLabelKey] != clusterObj.Name { - return fmt.Errorf("instanceName %s does not belong to the current component, please check the validity of the instance using \"kbcli cluster list-instances\"", o.Instance) + if podObj.Labels[constant.AppInstanceLabelKey] != clusterObj.Name { + return fmt.Errorf("instanceName %s does not belong to the current cluster, please check the validity of the instance using \"kbcli cluster list-instances\"", o.Instance) } return nil } @@ -492,13 +491,13 @@ func (o *OperationsOptions) validatePromote(clusterObj *appsv1.Cluster) error { // check componentDefinition exist compDefKey := client.ObjectKey{ Namespace: "", - Name: cluster.GetComponentSpec(clusterObj, o.Component).ComponentDef, + Name: cluster.GetComponentSpec(clusterObj, cluster.GetPodComponentName(podObj)).ComponentDef, } if err := util.GetResourceObjectFromGVR(types.CompDefGVR(), compDefKey, o.Dynamic, &compDefObj); err != nil { return err } if compDefObj.Spec.LifecycleActions == nil || compDefObj.Spec.LifecycleActions.Switchover == nil { - return fmt.Errorf(`this component "%s does not support switchover, you can define the switchover action in the componentDef "%s"`, o.Component, compDefKey.Name) + return fmt.Errorf(`this instance "%s does not support switchover, you can define the switchover action in the componentDef "%s"`, o.Instance, compDefKey.Name) } targetRole, err := getTargetRole(compDefObj.Spec.Roles) if err != nil { @@ -988,20 +987,14 @@ func NewCancelCmd(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra. var promoteExample = templates.Examples(` # Promote the instance mycluster-mysql-1 as the new primary or leader. - kbcli cluster promote mycluster --instance mycluster-mysql-1 - - # Promote a non-primary or non-leader instance as the new primary or leader, the new primary or leader is determined by the system. - kbcli cluster promote mycluster - - # If the cluster has multiple components, you need to specify a component, otherwise an error will be reported. - kbcli cluster promote mycluster --component=mysql --instance mycluster-mysql-1 + kbcli cluster promote mycluster --candidate mycluster-mysql-1 `) // NewPromoteCmd creates a promote command func NewPromoteCmd(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Command { o := newBaseOperationsOptions(f, streams, opsv1alpha1.SwitchoverType, false) cmd := &cobra.Command{ - Use: "promote NAME [--component=] [--instance ]", + Use: "promote NAME [--instance ]", Short: "Promote a non-primary or non-leader instance as the new primary or leader of the cluster", Example: promoteExample, ValidArgsFunction: util.ResourceNameCompletionFunc(f, types.ClusterGVR()), @@ -1015,9 +1008,9 @@ func NewPromoteCmd(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra cmdutil.CheckErr(o.Run()) }, } - flags.AddComponentFlag(f, cmd, &o.Component, "Specify the component name of the cluster, if the cluster has multiple components, you need to specify a component") - cmd.Flags().StringVar(&o.Instance, "instance", "", "Specify the instance name as the new primary or leader of the cluster, you can get the instance name by running \"kbcli cluster list-instances\"") + cmd.Flags().StringVar(&o.Instance, "candidate", "", "Specify the instance name as the new primary or leader of the cluster, you can get the instance name by running \"kbcli cluster list-instances\"") cmd.Flags().BoolVar(&o.AutoApprove, "auto-approve", false, "Skip interactive approval before promote the instance") + _ = cmd.MarkFlagRequired("candidate") o.addCommonFlags(cmd, f) return cmd } @@ -1327,10 +1320,11 @@ func NewRebuildInstanceCmd(f cmdutil.Factory, streams genericiooptions.IOStreams ComponentOps: opsv1alpha1.ComponentOps{ ComponentName: compName, }, - Instances: instances, - InPlace: o.Inplace, - BackupName: o.BackupName, - RestoreEnv: envVars, + Instances: instances, + InPlace: o.Inplace, + BackupName: o.BackupName, + RestoreEnv: envVars, + SourceBackupTargetName: o.SourceBackupTargetName, }, } return nil @@ -1355,6 +1349,7 @@ func NewRebuildInstanceCmd(f cmdutil.Factory, streams genericiooptions.IOStreams cmd.Flags().StringVar(&o.BackupName, "backup", "", "instances will be rebuild by the specified backup.") cmd.Flags().StringSliceVar(&o.InstanceNames, "instances", nil, "instances which need to rebuild.") util.CheckErr(flags.CompletedInstanceFlag(cmd, f, "instances")) + cmd.Flags().StringVar(&o.SourceBackupTargetName, "source-backup-target", "", "To rebuild a sharding component instance from a backup, you can specify the name of the source backup target") cmd.Flags().StringSliceVar(&o.Nodes, "node", nil, `specified the target node which rebuilds the instance on the node otherwise will rebuild on a random node. format: insName1=nodeName,insName2=nodeName`) cmd.Flags().StringArrayVar(&o.Env, "restore-env", []string{}, "provide the necessary env for the 'Restore' operation from the backup. format: key1=value, key2=value") return cmd diff --git a/pkg/cmd/cluster/operations_test.go b/pkg/cmd/cluster/operations_test.go index 9f420c993..bb4f351c5 100644 --- a/pkg/cmd/cluster/operations_test.go +++ b/pkg/cmd/cluster/operations_test.go @@ -370,8 +370,9 @@ var _ = Describe("operations", func() { By("validate failed because o.Instance does not belong to the current component") o.Instance = fmt.Sprintf("%s-%s-%d", clusterName, testing.ComponentName, 1) + o.Name = clusterName1 Expect(o.Validate()).ShouldNot(Succeed()) - Expect(testing.ContainExpectStrings(o.Validate().Error(), "does not belong to the current component")).Should(BeTrue()) + Expect(testing.ContainExpectStrings(o.Validate().Error(), "does not belong to the current cluster")).Should(BeTrue()) }) It("Switchover ops base on component definition", func() { @@ -402,11 +403,12 @@ var _ = Describe("operations", func() { Expect(o.Validate()).ShouldNot(Succeed()) Expect(testing.ContainExpectStrings(o.Validate().Error(), "cannot be promoted because it is already the targetRole")).Should(BeTrue()) - By("validate failed because o.Instance does not belong to the current component") + By("validate failed because o.Instance does not belong to the current cluster") o.Instance = fmt.Sprintf("%s-%s-%d", clusterName1, testing.ComponentName, 1) o.Component = testing.ComponentName + o.Name = clusterName Expect(o.Validate()).ShouldNot(Succeed()) - Expect(testing.ContainExpectStrings(o.Validate().Error(), "does not belong to the current component")).Should(BeTrue()) + Expect(testing.ContainExpectStrings(o.Validate().Error(), "does not belong to the current cluster")).Should(BeTrue()) })