diff --git a/apis/core/v1alpha1/cluster_types.go b/apis/core/v1alpha1/cluster_types.go index cd767e5df4..0c4e204dd9 100644 --- a/apis/core/v1alpha1/cluster_types.go +++ b/apis/core/v1alpha1/cluster_types.go @@ -185,17 +185,14 @@ func (c *Cluster) IsTLSClusterEnabled() bool { return c.Spec.TLSCluster != nil && c.Spec.TLSCluster.Enabled } -// TLSClusterSecretName returns the mTLS secret name for a component group. -func (c *Cluster) TLSClusterSecretName(groupName string) string { - return fmt.Sprintf("%s-%s-cluster-secret", c.Name, groupName) -} - // ClusterClientTLSSecretName returns the mTLS secret name for the cluster client. +// TODO: move it to namer pkg func (c *Cluster) ClusterClientTLSSecretName() string { return TLSClusterClientSecretName(c.Name) } // TLSClusterClientSecretName returns the mTLS secret name for the cluster client. +// TODO: move it to namer pkg func TLSClusterClientSecretName(clusterName string) string { return fmt.Sprintf("%s-cluster-client-secret", clusterName) } diff --git a/apis/core/v1alpha1/pd_types.go b/apis/core/v1alpha1/pd_types.go index 7734da07db..0bd9d47ed4 100644 --- a/apis/core/v1alpha1/pd_types.go +++ b/apis/core/v1alpha1/pd_types.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "strings" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -249,6 +251,30 @@ func (in *PD) GetPeerPort() int32 { return DefaultPDPortPeer } +// NOTE: name prefix is used to generate all names of underlying resources of this instance +func (in *PD) NamePrefixAndSuffix() (prefix, suffix string) { + index := strings.LastIndexByte(in.Name, '-') + // TODO(liubo02): validate name to avoid '-' is not found + if index == -1 { + panic("cannot get name prefix") + } + return in.Name[:index], in.Name[index+1:] +} + +// This name is not only for pod, but also configMap, hostname and almost all underlying resources +// TODO(liubo02): rename to more reasonable one +func (in *PD) PodName() string { + prefix, suffix := in.NamePrefixAndSuffix() + return prefix + "-pd-" + suffix +} + +// TLSClusterSecretName returns the mTLS secret name for a component. +// TODO(liubo02): move to namer +func (in *PD) TLSClusterSecretName() string { + prefix, _ := in.NamePrefixAndSuffix() + return prefix + "-pd-cluster-secret" +} + // PDGroupSpec describes the common attributes of a PDGroup type PDGroupSpec struct { Cluster ClusterReference `json:"cluster"` diff --git a/apis/core/v1alpha1/tidb_types.go b/apis/core/v1alpha1/tidb_types.go index b2b8482dfb..aebd8619f5 100644 --- a/apis/core/v1alpha1/tidb_types.go +++ b/apis/core/v1alpha1/tidb_types.go @@ -16,6 +16,7 @@ package v1alpha1 import ( "fmt" + "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -279,6 +280,30 @@ func (in *TiDB) GetStatusPort() int32 { return DefaultTiDBPortStatus } +// NOTE: name prefix is used to generate all names of underlying resources of this instance +func (in *TiDB) NamePrefixAndSuffix() (prefix, suffix string) { + index := strings.LastIndexByte(in.Name, '-') + // TODO(liubo02): validate name to avoid '-' is not found + if index == -1 { + panic("cannot get name prefix") + } + return in.Name[:index], in.Name[index+1:] +} + +// This name is not only for pod, but also configMap, hostname and almost all underlying resources +// TODO(liubo02): rename to more reasonable one +func (in *TiDB) PodName() string { + prefix, suffix := in.NamePrefixAndSuffix() + return prefix + "-tidb-" + suffix +} + +// TLSClusterSecretName returns the mTLS secret name for a component. +// TODO(liubo02): move to namer +func (in *TiDB) TLSClusterSecretName() string { + prefix, _ := in.NamePrefixAndSuffix() + return prefix + "-tidb-cluster-secret" +} + // TiDBGroupSpec describes the common attributes of a TiDBGroup. type TiDBGroupSpec struct { Cluster ClusterReference `json:"cluster"` @@ -481,12 +506,12 @@ func (in *TiDBGroup) IsTLSClientEnabled() bool { // TiDBServerTLSSecretName returns the secret name used in TiDB server for the TLS between TiDB server and MySQL client. func (in *TiDBGroup) TiDBServerTLSSecretName() string { - return fmt.Sprintf("%s-%s-server-secret", in.Spec.Cluster.Name, in.Name) + return fmt.Sprintf("%s-tidb-server-secret", in.Name) } // TiDBClientTLSSecretName returns the secret name used in MySQL client for the TLS between TiDB server and MySQL client. func (in *TiDBGroup) TiDBClientTLSSecretName() string { - return fmt.Sprintf("%s-%s-client-secret", in.Spec.Cluster.Name, in.Name) + return fmt.Sprintf("%s-tidb-client-secret", in.Name) } func (in *TiDBGroup) IsBootstrapSQLEnabled() bool { diff --git a/apis/core/v1alpha1/tiflash_types.go b/apis/core/v1alpha1/tiflash_types.go index df8d28b636..9f4b2ad723 100644 --- a/apis/core/v1alpha1/tiflash_types.go +++ b/apis/core/v1alpha1/tiflash_types.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "strings" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -269,6 +271,30 @@ func (in *TiFlash) GetProxyStatusPort() int32 { return DefaultTiFlashPortProxyStatus } +// NOTE: name prefix is used to generate all names of underlying resources of this instance +func (in *TiFlash) NamePrefixAndSuffix() (prefix, suffix string) { + index := strings.LastIndexByte(in.Name, '-') + // TODO(liubo02): validate name to avoid '-' is not found + if index == -1 { + panic("cannot get name prefix") + } + return in.Name[:index], in.Name[index+1:] +} + +// This name is not only for pod, but also configMap, hostname and almost all underlying resources +// TODO(liubo02): rename to more reasonable one +func (in *TiFlash) PodName() string { + prefix, suffix := in.NamePrefixAndSuffix() + return prefix + "-tiflash-" + suffix +} + +// TLSClusterSecretName returns the mTLS secret name for a component. +// TODO(liubo02): move to namer +func (in *TiFlash) TLSClusterSecretName() string { + prefix, _ := in.NamePrefixAndSuffix() + return prefix + "-tiflash-cluster-secret" +} + type TiFlashGroupSpec struct { Cluster ClusterReference `json:"cluster"` Replicas *int32 `json:"replicas"` diff --git a/apis/core/v1alpha1/tikv_types.go b/apis/core/v1alpha1/tikv_types.go index 5c904c98ce..f043e2211a 100644 --- a/apis/core/v1alpha1/tikv_types.go +++ b/apis/core/v1alpha1/tikv_types.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "strings" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -259,6 +261,30 @@ func (in *TiKV) GetStatusPort() int32 { return DefaultTiKVPortStatus } +// NOTE: name prefix is used to generate all names of underlying resources of this instance +func (in *TiKV) NamePrefixAndSuffix() (prefix, suffix string) { + index := strings.LastIndexByte(in.Name, '-') + // TODO(liubo02): validate name to avoid '-' is not found + if index == -1 { + panic("cannot get name prefix") + } + return in.Name[:index], in.Name[index+1:] +} + +// This name is not only for pod, but also configMap, hostname and almost all underlying resources +// TODO(liubo02): rename to more reasonable one +func (in *TiKV) PodName() string { + prefix, suffix := in.NamePrefixAndSuffix() + return prefix + "-tikv-" + suffix +} + +// TLSClusterSecretName returns the mTLS secret name for a component. +// TODO(liubo02): move to namer +func (in *TiKV) TLSClusterSecretName() string { + prefix, _ := in.NamePrefixAndSuffix() + return prefix + "-tikv-cluster-secret" +} + // TiKVGroupSpec describes the common attributes of a TiKVGroup type TiKVGroupSpec struct { Cluster ClusterReference `json:"cluster"` diff --git a/pkg/configs/pd/config.go b/pkg/configs/pd/config.go index bb9136813c..ced087b9f3 100644 --- a/pkg/configs/pd/config.go +++ b/pkg/configs/pd/config.go @@ -162,7 +162,7 @@ func getAdvertiseClientURLs(pd *v1alpha1.PD, scheme string) string { if ns == "" { ns = corev1.NamespaceDefault } - host := pd.Name + "." + pd.Spec.Subdomain + "." + ns + host := pd.PodName() + "." + pd.Spec.Subdomain + "." + ns return fmt.Sprintf("%s://%s:%d", scheme, host, pd.GetClientPort()) } @@ -175,7 +175,7 @@ func getAdvertisePeerURLs(pd *v1alpha1.PD, scheme string) string { if ns == "" { ns = corev1.NamespaceDefault } - host := pd.Name + "." + pd.Spec.Subdomain + "." + ns + host := pd.PodName() + "." + pd.Spec.Subdomain + "." + ns return fmt.Sprintf("%s://%s:%d", scheme, host, pd.GetPeerPort()) } diff --git a/pkg/configs/tidb/config.go b/pkg/configs/tidb/config.go index 12f52019c8..7ca488aee4 100644 --- a/pkg/configs/tidb/config.go +++ b/pkg/configs/tidb/config.go @@ -136,7 +136,7 @@ func getAdvertiseAddress(tidb *v1alpha1.TiDB) string { if ns == "" { ns = corev1.NamespaceDefault } - return tidb.Name + "." + tidb.Spec.Subdomain + "." + ns + ".svc" + return tidb.PodName() + "." + tidb.Spec.Subdomain + "." + ns + ".svc" } func removeHTTPPrefix(url string) string { diff --git a/pkg/configs/tiflash/config.go b/pkg/configs/tiflash/config.go index 39e5a276d4..c541d28379 100644 --- a/pkg/configs/tiflash/config.go +++ b/pkg/configs/tiflash/config.go @@ -188,7 +188,7 @@ func GetServiceAddr(tiflash *v1alpha1.TiFlash) string { if ns == "" { ns = corev1.NamespaceDefault } - return fmt.Sprintf("%s.%s.%s:%d", tiflash.Name, tiflash.Spec.Subdomain, ns, tiflash.GetFlashPort()) + return fmt.Sprintf("%s.%s.%s:%d", tiflash.PodName(), tiflash.Spec.Subdomain, ns, tiflash.GetFlashPort()) } func getProxyAddr(tiflash *v1alpha1.TiFlash) string { @@ -200,7 +200,7 @@ func getProxyAdvertiseAddr(tiflash *v1alpha1.TiFlash) string { if ns == "" { ns = corev1.NamespaceDefault } - return fmt.Sprintf("%s.%s.%s:%d", tiflash.Name, tiflash.Spec.Subdomain, ns, tiflash.GetProxyPort()) + return fmt.Sprintf("%s.%s.%s:%d", tiflash.PodName(), tiflash.Spec.Subdomain, ns, tiflash.GetProxyPort()) } func getProxyAdvertiseStatusAddr(tiflash *v1alpha1.TiFlash) string { @@ -208,7 +208,7 @@ func getProxyAdvertiseStatusAddr(tiflash *v1alpha1.TiFlash) string { if ns == "" { ns = corev1.NamespaceDefault } - return fmt.Sprintf("%s.%s.%s:%d", tiflash.Name, tiflash.Spec.Subdomain, ns, tiflash.GetProxyStatusPort()) + return fmt.Sprintf("%s.%s.%s:%d", tiflash.PodName(), tiflash.Spec.Subdomain, ns, tiflash.GetProxyStatusPort()) } func GetServerLogPath(tiflash *v1alpha1.TiFlash) string { diff --git a/pkg/configs/tikv/config.go b/pkg/configs/tikv/config.go index fe80013256..d2f3ba6398 100644 --- a/pkg/configs/tikv/config.go +++ b/pkg/configs/tikv/config.go @@ -157,7 +157,7 @@ func GetAdvertiseClientURLs(tikv *v1alpha1.TiKV) string { if ns == "" { ns = corev1.NamespaceDefault } - return fmt.Sprintf("%s.%s.%s:%d", tikv.Name, tikv.Spec.Subdomain, ns, tikv.GetClientPort()) + return fmt.Sprintf("%s.%s.%s:%d", tikv.PodName(), tikv.Spec.Subdomain, ns, tikv.GetClientPort()) } func getStatusURLs(tikv *v1alpha1.TiKV) string { @@ -169,5 +169,5 @@ func getAdvertiseStatusURLs(tikv *v1alpha1.TiKV) string { if ns == "" { ns = corev1.NamespaceDefault } - return fmt.Sprintf("%s.%s.%s:%d", tikv.Name, tikv.Spec.Subdomain, ns, tikv.GetStatusPort()) + return fmt.Sprintf("%s.%s.%s:%d", tikv.PodName(), tikv.Spec.Subdomain, ns, tikv.GetStatusPort()) } diff --git a/pkg/controllers/cluster/tasks/status.go b/pkg/controllers/cluster/tasks/status.go index ab9f08969c..c6dd23d029 100644 --- a/pkg/controllers/cluster/tasks/status.go +++ b/pkg/controllers/cluster/tasks/status.go @@ -58,8 +58,8 @@ func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { if rtx.Cluster.IsTLSClusterEnabled() { scheme = "https" } - pdAddr := fmt.Sprintf("%s://%s-%s.%s:%d", scheme, rtx.Cluster.Name, - rtx.PDGroup.Name, rtx.PDGroup.Namespace, rtx.PDGroup.GetClientPort()) + // TODO(liubo02): extract a common util to get pd addr + pdAddr := fmt.Sprintf("%s://%s-pd.%s:%d", scheme, rtx.PDGroup.Name, rtx.PDGroup.Namespace, rtx.PDGroup.GetClientPort()) if rtx.Cluster.Status.PD != pdAddr { // TODO(csuzhangxc): verify switch between TLS and non-TLS rtx.Cluster.Status.PD = pdAddr needUpdate = true diff --git a/pkg/controllers/pd/tasks/cm.go b/pkg/controllers/pd/tasks/cm.go index a18ec1b1cd..11798b6f65 100644 --- a/pkg/controllers/pd/tasks/cm.go +++ b/pkg/controllers/pd/tasks/cm.go @@ -62,7 +62,7 @@ func TaskConfigMap(ctx *ReconcileContext, _ logr.Logger, c client.Client) task.T func newConfigMap(pd *v1alpha1.PD, data []byte, hash string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(pd.Name), + Name: pd.PodName(), Namespace: pd.Namespace, Labels: maputil.Merge(pd.Labels, map[string]string{ v1alpha1.LabelKeyInstance: pd.Name, diff --git a/pkg/controllers/pd/tasks/ctx.go b/pkg/controllers/pd/tasks/ctx.go index 63f0f7f63c..f486e7e6ba 100644 --- a/pkg/controllers/pd/tasks/ctx.go +++ b/pkg/controllers/pd/tasks/ctx.go @@ -85,9 +85,11 @@ func (ctx *ReconcileContext) SetCluster(c *v1alpha1.Cluster) { ctx.Cluster = c } -// Pod always uses same namespace and name of PD func (ctx *ReconcileContext) PodKey() types.NamespacedName { - return ctx.Key + return types.NamespacedName{ + Namespace: ctx.PD.Namespace, + Name: ctx.PD.PodName(), + } } func (ctx *ReconcileContext) GetPod() *corev1.Pod { diff --git a/pkg/controllers/pd/tasks/finalizer.go b/pkg/controllers/pd/tasks/finalizer.go index 8acc62752b..17199a9351 100644 --- a/pkg/controllers/pd/tasks/finalizer.go +++ b/pkg/controllers/pd/tasks/finalizer.go @@ -15,7 +15,13 @@ package tasks import ( + "context" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/utils/k8s" "github.com/pingcap/tidb-operator/pkg/utils/task/v3" ) @@ -30,19 +36,26 @@ func TaskFinalizerDel(ctx *ReconcileContext, c client.Client) task.Task { return task.Fail().With("cannot delete member: %v", err) } - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - ctx.PD.Namespace, ctx.PD.Name); err != nil { + wait, err := EnsureSubResourcesDeleted(ctx, c, ctx.PD) + if err != nil { return task.Fail().With("cannot delete subresources: %v", err) } + if wait { + return task.Wait().With("wait all subresources deleted") + } + if err := k8s.RemoveFinalizer(ctx, c, ctx.PD); err != nil { return task.Fail().With("cannot remove finalizer: %v", err) } case ctx.IsAvailable: - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - ctx.PD.Namespace, ctx.PD.Name); err != nil { + wait, err := EnsureSubResourcesDeleted(ctx, c, ctx.PD) + if err != nil { return task.Fail().With("cannot delete subresources: %v", err) } + if wait { + return task.Wait().With("wait all subresources deleted") + } if err := k8s.RemoveFinalizer(ctx, c, ctx.PD); err != nil { return task.Fail().With("cannot remove finalizer: %v", err) @@ -64,3 +77,20 @@ func TaskFinalizerAdd(ctx *ReconcileContext, c client.Client) task.Task { return task.Complete().With("finalizer is added") }) } + +func EnsureSubResourcesDeleted(ctx context.Context, c client.Client, pd *v1alpha1.PD) (wait bool, _ error) { + wait1, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromPD(pd), &corev1.PodList{}) + if err != nil { + return false, err + } + wait2, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromPD(pd), &corev1.ConfigMapList{}) + if err != nil { + return false, err + } + wait3, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromPD(pd), &corev1.PersistentVolumeClaimList{}) + if err != nil { + return false, err + } + + return wait1 || wait2 || wait3, nil +} diff --git a/pkg/controllers/pd/tasks/pod.go b/pkg/controllers/pd/tasks/pod.go index fa4bc72e1b..6ab993ad93 100644 --- a/pkg/controllers/pd/tasks/pod.go +++ b/pkg/controllers/pd/tasks/pod.go @@ -40,7 +40,7 @@ const ( ) func TaskPod(ctx *ReconcileContext, logger logr.Logger, c client.Client) task.Task { - return task.NameTaskFunc("ConfigMap", func() task.Result { + return task.NameTaskFunc("Pod", func() task.Result { expected := newPod(ctx.Cluster, ctx.PDGroup, ctx.PD, ctx.ConfigHash) if ctx.Pod == nil { // We have to refresh cache of members to make sure a pd without pod is unhealthy. @@ -134,7 +134,7 @@ func newPod(cluster *v1alpha1.Cluster, pdg *v1alpha1.PDGroup, pd *v1alpha1.PD, c VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: ConfigMapName(pd.Name), + Name: pd.PodName(), }, }, }, @@ -158,7 +158,7 @@ func newPod(cluster *v1alpha1.Cluster, pdg *v1alpha1.PDGroup, pd *v1alpha1.PD, c Name: name, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: PersistentVolumeClaimName(pd.Name, vol.Name), + ClaimName: PersistentVolumeClaimName(pd.PodName(), vol.Name), }, }, }) @@ -169,12 +169,11 @@ func newPod(cluster *v1alpha1.Cluster, pdg *v1alpha1.PDGroup, pd *v1alpha1.PD, c } if cluster.IsTLSClusterEnabled() { - groupName := pd.Labels[v1alpha1.LabelKeyGroup] vols = append(vols, corev1.Volume{ Name: v1alpha1.PDClusterTLSVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: cluster.TLSClusterSecretName(groupName), + SecretName: pd.TLSClusterSecretName(), }, }, }) @@ -207,7 +206,7 @@ func newPod(cluster *v1alpha1.Cluster, pdg *v1alpha1.PDGroup, pd *v1alpha1.PD, c pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: pd.Namespace, - Name: pd.Name, + Name: pd.PodName(), Labels: maputil.Merge(pd.Labels, map[string]string{ v1alpha1.LabelKeyInstance: pd.Name, v1alpha1.LabelKeyConfigHash: configHash, @@ -218,7 +217,7 @@ func newPod(cluster *v1alpha1.Cluster, pdg *v1alpha1.PDGroup, pd *v1alpha1.PD, c }, }, Spec: corev1.PodSpec{ - Hostname: pd.Name, + Hostname: pd.PodName(), Subdomain: pd.Spec.Subdomain, NodeSelector: pd.Spec.Topology, Containers: []corev1.Container{ diff --git a/pkg/controllers/pd/tasks/pvc.go b/pkg/controllers/pd/tasks/pvc.go index c8262d7893..91b9224904 100644 --- a/pkg/controllers/pd/tasks/pvc.go +++ b/pkg/controllers/pd/tasks/pvc.go @@ -47,7 +47,7 @@ func newPVCs(pd *v1alpha1.PD) []*corev1.PersistentVolumeClaim { vol := &pd.Spec.Volumes[i] pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: PersistentVolumeClaimName(pd.Name, vol.Name), + Name: PersistentVolumeClaimName(pd.PodName(), vol.Name), Namespace: pd.Namespace, Labels: maputil.Merge(pd.Labels, map[string]string{ v1alpha1.LabelKeyInstance: pd.Name, diff --git a/pkg/controllers/pd/tasks/util.go b/pkg/controllers/pd/tasks/util.go index 3a35826811..d0281664b9 100644 --- a/pkg/controllers/pd/tasks/util.go +++ b/pkg/controllers/pd/tasks/util.go @@ -23,16 +23,12 @@ import ( "github.com/pingcap/tidb-operator/apis/core/v1alpha1" ) -func ConfigMapName(pdName string) string { - return pdName -} - -func PersistentVolumeClaimName(pdName, volName string) string { +func PersistentVolumeClaimName(podName, volName string) string { // ref: https://github.com/pingcap/tidb-operator/blob/v1.6.0/pkg/apis/pingcap/v1alpha1/helpers.go#L92 if volName == "" { - return "pd-" + pdName + return "pd-" + podName } - return "pd-" + pdName + "-" + volName + return "pd-" + podName + "-" + volName } func LongestHealthPeer(pd *v1alpha1.PD, peers []*v1alpha1.PD) string { diff --git a/pkg/controllers/pdgroup/tasks/svc.go b/pkg/controllers/pdgroup/tasks/svc.go index fafea1b251..20f29640c3 100644 --- a/pkg/controllers/pdgroup/tasks/svc.go +++ b/pkg/controllers/pdgroup/tasks/svc.go @@ -69,7 +69,7 @@ func newHeadlessService(pdg *v1alpha1.PDGroup) *corev1.Service { ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: HeadlessServiceName(pdg.Spec.Cluster.Name, pdg.Name), + Name: HeadlessServiceName(pdg.Name), Namespace: pdg.Namespace, Labels: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, @@ -114,7 +114,7 @@ func newInternalService(pdg *v1alpha1.PDGroup) *corev1.Service { ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", pdg.Spec.Cluster.Name, pdg.Name), + Name: fmt.Sprintf("%s-pd", pdg.Name), Namespace: pdg.Namespace, Labels: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, diff --git a/pkg/controllers/pdgroup/tasks/updater.go b/pkg/controllers/pdgroup/tasks/updater.go index 233fbd015d..68fc7eab9c 100644 --- a/pkg/controllers/pdgroup/tasks/updater.go +++ b/pkg/controllers/pdgroup/tasks/updater.go @@ -165,10 +165,13 @@ func needVersionUpgrade(pdg *v1alpha1.PDGroup) bool { return pdg.Spec.Version != pdg.Status.Version && pdg.Status.Version != "" } +const ( + suffixLen = 6 +) + func PDNewer(pdg *v1alpha1.PDGroup, rev string) updater.NewFactory[*runtime.PD] { return updater.NewFunc[*runtime.PD](func() *runtime.PD { - //nolint:mnd // refactor to use a constant - name := fmt.Sprintf("%s-%s-%s", pdg.Spec.Cluster.Name, pdg.Name, random.Random(6)) + name := fmt.Sprintf("%s-%s", pdg.Name, random.Random(suffixLen)) spec := pdg.Spec.Template.Spec.DeepCopy() var bootAnno map[string]string @@ -202,7 +205,7 @@ func PDNewer(pdg *v1alpha1.PDGroup, rev string) updater.NewFactory[*runtime.PD] Spec: v1alpha1.PDSpec{ Cluster: pdg.Spec.Cluster, Version: pdg.Spec.Version, - Subdomain: HeadlessServiceName(pdg.Spec.Cluster.Name, pdg.Name), + Subdomain: HeadlessServiceName(pdg.Name), PDTemplateSpec: *spec, }, } diff --git a/pkg/controllers/pdgroup/tasks/util.go b/pkg/controllers/pdgroup/tasks/util.go index 9e0cd0c29c..6d946f7f3c 100644 --- a/pkg/controllers/pdgroup/tasks/util.go +++ b/pkg/controllers/pdgroup/tasks/util.go @@ -21,9 +21,10 @@ import ( "github.com/pingcap/tidb-operator/pkg/updater" ) -// TODO: fix length issue -func HeadlessServiceName(clusterName, groupName string) string { - return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +// TODO(liubo02): fix length issue +// TODO(liubo02): extract into common utils +func HeadlessServiceName(groupName string) string { + return fmt.Sprintf("%s-pd-peer", groupName) } func NotLeaderPolicy() updater.PreferPolicy[*runtime.PD] { diff --git a/pkg/controllers/tidb/tasks/cm.go b/pkg/controllers/tidb/tasks/cm.go index 8512a231dc..c570c498ca 100644 --- a/pkg/controllers/tidb/tasks/cm.go +++ b/pkg/controllers/tidb/tasks/cm.go @@ -75,7 +75,7 @@ func (t *TaskConfigMap) Sync(ctx task.Context[ReconcileContext]) task.Result { func newConfigMap(tidb *v1alpha1.TiDB, data []byte, hash string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(tidb.Name), + Name: tidb.PodName(), Namespace: tidb.Namespace, Labels: maputil.Merge(tidb.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tidb.Name, diff --git a/pkg/controllers/tidb/tasks/cm_test.go b/pkg/controllers/tidb/tasks/cm_test.go index 10ee3b1ec4..887e354fbc 100644 --- a/pkg/controllers/tidb/tasks/cm_test.go +++ b/pkg/controllers/tidb/tasks/cm_test.go @@ -107,7 +107,7 @@ func TestConfigMap(t *testing.T) { expected: task.Complete().With(""), expectedCM: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-tidb", + Name: "test-tidb-tidb", Labels: map[string]string{ "aaa": "bbb", v1alpha1.LabelKeyInstance: "test-tidb", @@ -126,7 +126,7 @@ func TestConfigMap(t *testing.T) { }, }, Data: map[string]string{ - v1alpha1.ConfigFileName: `advertise-address = 'test-tidb.subdomain.default.svc' + v1alpha1.ConfigFileName: `advertise-address = 'test-tidb-tidb.subdomain.default.svc' graceful-wait-before-shutdown = 30 host = '::' path = 'test-pd.default:2379' @@ -165,7 +165,7 @@ graceful-wait-before-shutdown = 60`), expected: task.Complete().With(""), expectedCM: &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-tidb", + Name: "test-tidb-tidb", Labels: map[string]string{ "aaa": "bbb", v1alpha1.LabelKeyInstance: "test-tidb", @@ -184,7 +184,7 @@ graceful-wait-before-shutdown = 60`), }, }, Data: map[string]string{ - v1alpha1.ConfigFileName: `advertise-address = 'test-tidb.subdomain.default.svc' + v1alpha1.ConfigFileName: `advertise-address = 'test-tidb-tidb.subdomain.default.svc' graceful-wait-before-shutdown = 60 host = '::' path = 'test-pd.default:2379' @@ -230,7 +230,7 @@ slow-query-file = '/var/log/tidb/slowlog' cm := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(ctx.TiDB.Name), + Name: ConfigMapName(ctx.TiDB.PodName()), }, } err := fc.Get(ctx, client.ObjectKeyFromObject(&cm), &cm) diff --git a/pkg/controllers/tidb/tasks/ctx.go b/pkg/controllers/tidb/tasks/ctx.go index 67c3022a24..4b5da308a2 100644 --- a/pkg/controllers/tidb/tasks/ctx.go +++ b/pkg/controllers/tidb/tasks/ctx.go @@ -104,7 +104,7 @@ func TaskContextPod(c client.Client) task.Task[ReconcileContext] { rtx := ctx.Self() var pod corev1.Pod if err := c.Get(ctx, client.ObjectKey{ - Name: rtx.TiDB.Name, + Name: rtx.TiDB.PodName(), Namespace: rtx.TiDB.Namespace, }, &pod); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/tidb/tasks/finalizer.go b/pkg/controllers/tidb/tasks/finalizer.go index 0ce3227725..96735f6cd3 100644 --- a/pkg/controllers/tidb/tasks/finalizer.go +++ b/pkg/controllers/tidb/tasks/finalizer.go @@ -15,7 +15,13 @@ package tasks import ( + "context" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/utils/k8s" "github.com/pingcap/tidb-operator/pkg/utils/task/v2" ) @@ -23,10 +29,13 @@ import ( func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { return task.NameTaskFunc("FinalizerDel", func(ctx task.Context[ReconcileContext]) task.Result { rtx := ctx.Self() - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - rtx.TiDB.Namespace, rtx.TiDB.Name); err != nil { + wait, err := EnsureSubResourcesDeleted(ctx, c, rtx.TiDB) + if err != nil { return task.Fail().With("cannot delete subresources: %w", err) } + if wait { + return task.Wait().With("wait all subresources deleted") + } if err := k8s.RemoveFinalizer(ctx, c, rtx.TiDB); err != nil { return task.Fail().With("cannot remove finalizer: %w", err) } @@ -45,3 +54,20 @@ func TaskFinalizerAdd(c client.Client) task.Task[ReconcileContext] { return task.Complete().With("finalizer is added") }) } + +func EnsureSubResourcesDeleted(ctx context.Context, c client.Client, db *v1alpha1.TiDB) (wait bool, _ error) { + wait1, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiDB(db), &corev1.PodList{}) + if err != nil { + return false, err + } + wait2, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiDB(db), &corev1.ConfigMapList{}) + if err != nil { + return false, err + } + wait3, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiDB(db), &corev1.PersistentVolumeClaimList{}) + if err != nil { + return false, err + } + + return wait1 || wait2 || wait3, nil +} diff --git a/pkg/controllers/tidb/tasks/pod.go b/pkg/controllers/tidb/tasks/pod.go index e9536c6369..1d3403f1ab 100644 --- a/pkg/controllers/tidb/tasks/pod.go +++ b/pkg/controllers/tidb/tasks/pod.go @@ -115,14 +115,15 @@ func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { } func (*TaskPod) newPod(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, - tidb *v1alpha1.TiDB, gracePeriod int64, configHash string) *corev1.Pod { + tidb *v1alpha1.TiDB, gracePeriod int64, configHash string, +) *corev1.Pod { vols := []corev1.Volume{ { Name: v1alpha1.VolumeNameConfig, VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: ConfigMapName(tidb.Name), + Name: tidb.PodName(), }, }, }, @@ -143,7 +144,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, Name: name, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: PersistentVolumeClaimName(tidb.Name, vol.Name), + ClaimName: PersistentVolumeClaimName(tidb.PodName(), vol.Name), }, }, }) @@ -174,7 +175,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, Name: v1alpha1.TiDBClusterTLSVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: cluster.TLSClusterSecretName(dbg.Name), + SecretName: tidb.TLSClusterSecretName(), }, }, }) @@ -240,7 +241,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: tidb.Namespace, - Name: tidb.Name, + Name: tidb.PodName(), Labels: maputil.Merge(tidb.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tidb.Name, v1alpha1.LabelKeyConfigHash: configHash, @@ -251,7 +252,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, dbg *v1alpha1.TiDBGroup, }, }, Spec: corev1.PodSpec{ - Hostname: tidb.Name, + Hostname: tidb.PodName(), Subdomain: tidb.Spec.Subdomain, NodeSelector: tidb.Spec.Topology, Containers: []corev1.Container{ diff --git a/pkg/controllers/tidb/tasks/pvc.go b/pkg/controllers/tidb/tasks/pvc.go index a546845201..49a572cbdd 100644 --- a/pkg/controllers/tidb/tasks/pvc.go +++ b/pkg/controllers/tidb/tasks/pvc.go @@ -63,7 +63,7 @@ func newPVCs(tidb *v1alpha1.TiDB) []*corev1.PersistentVolumeClaim { vol := &tidb.Spec.Volumes[i] pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: PersistentVolumeClaimName(tidb.Name, vol.Name), + Name: PersistentVolumeClaimName(tidb.PodName(), vol.Name), Namespace: tidb.Namespace, Labels: maputil.Merge(tidb.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tidb.Name, diff --git a/pkg/controllers/tidb/tasks/server_labels.go b/pkg/controllers/tidb/tasks/server_labels.go index 073e82596c..9f23a8fdb3 100644 --- a/pkg/controllers/tidb/tasks/server_labels.go +++ b/pkg/controllers/tidb/tasks/server_labels.go @@ -56,7 +56,7 @@ func (t *TaskServerLabels) Sync(ctx task.Context[ReconcileContext]) task.Result nodeName := rtx.Pod.Spec.NodeName if nodeName == "" { - return task.Fail().With("pod %s/%s has not been scheduled", rtx.TiDB.Namespace, rtx.TiDB.Name) + return task.Fail().With("pod %s/%s has not been scheduled", rtx.Pod.Namespace, rtx.Pod.Name) } var node corev1.Node if err := t.Client.Get(ctx, client.ObjectKey{Name: nodeName}, &node); err != nil { diff --git a/pkg/controllers/tidb/tasks/util.go b/pkg/controllers/tidb/tasks/util.go index 90a536eb4a..0940a5c54c 100644 --- a/pkg/controllers/tidb/tasks/util.go +++ b/pkg/controllers/tidb/tasks/util.go @@ -24,15 +24,15 @@ func ConfigMapName(tidbName string) string { return tidbName } -func PersistentVolumeClaimName(tidbName, volName string) string { +func PersistentVolumeClaimName(podName, volName string) string { // ref: https://github.com/pingcap/tidb-operator/blob/v1.6.0/pkg/apis/pingcap/v1alpha1/helpers.go#L92 if volName == "" { - return "tidb-" + tidbName + return "tidb-" + podName } - return "tidb-" + tidbName + "-" + volName + return "tidb-" + podName + "-" + volName } // TiDBServiceURL returns the service URL of a tidb member. func TiDBServiceURL(tidb *v1alpha1.TiDB, scheme string) string { - return fmt.Sprintf("%s://%s.%s.%s.svc:%d", scheme, tidb.Name, tidb.Spec.Subdomain, tidb.Namespace, tidb.GetStatusPort()) + return fmt.Sprintf("%s://%s.%s.%s.svc:%d", scheme, tidb.PodName(), tidb.Spec.Subdomain, tidb.Namespace, tidb.GetStatusPort()) } diff --git a/pkg/controllers/tidbgroup/tasks/svc.go b/pkg/controllers/tidbgroup/tasks/svc.go index 94e8a40656..e79a44b37e 100644 --- a/pkg/controllers/tidbgroup/tasks/svc.go +++ b/pkg/controllers/tidbgroup/tasks/svc.go @@ -69,7 +69,7 @@ func newHeadlessService(tidbg *v1alpha1.TiDBGroup) *corev1.Service { ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: HeadlessServiceName(tidbg.Spec.Cluster.Name, tidbg.Name), + Name: HeadlessServiceName(tidbg.Name), Namespace: tidbg.Namespace, Labels: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, @@ -112,7 +112,7 @@ func newService(tidbg *v1alpha1.TiDBGroup) *corev1.Service { return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", tidbg.Spec.Cluster.Name, tidbg.Name), + Name: tidbg.Name + "-tidb", Namespace: tidbg.Namespace, Labels: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, diff --git a/pkg/controllers/tidbgroup/tasks/updater.go b/pkg/controllers/tidbgroup/tasks/updater.go index 46b5d2bfac..5b7c5ada81 100644 --- a/pkg/controllers/tidbgroup/tasks/updater.go +++ b/pkg/controllers/tidbgroup/tasks/updater.go @@ -153,10 +153,13 @@ func needVersionUpgrade(dbg *v1alpha1.TiDBGroup) bool { return dbg.Spec.Version != dbg.Status.Version && dbg.Status.Version != "" } +const ( + suffixLen = 6 +) + func TiDBNewer(dbg *v1alpha1.TiDBGroup, rev string) updater.NewFactory[*runtime.TiDB] { return updater.NewFunc[*runtime.TiDB](func() *runtime.TiDB { - //nolint:mnd // refactor to use a constant - name := fmt.Sprintf("%s-%s-%s", dbg.Spec.Cluster.Name, dbg.Name, random.Random(6)) + name := fmt.Sprintf("%s-%s", dbg.Name, random.Random(suffixLen)) spec := dbg.Spec.Template.Spec.DeepCopy() tidb := &v1alpha1.TiDB{ @@ -178,7 +181,7 @@ func TiDBNewer(dbg *v1alpha1.TiDBGroup, rev string) updater.NewFactory[*runtime. Spec: v1alpha1.TiDBSpec{ Cluster: dbg.Spec.Cluster, Version: dbg.Spec.Version, - Subdomain: HeadlessServiceName(dbg.Spec.Cluster.Name, dbg.Name), // same as headless service + Subdomain: HeadlessServiceName(dbg.Name), // same as headless service TiDBTemplateSpec: *spec, }, } diff --git a/pkg/controllers/tidbgroup/tasks/util.go b/pkg/controllers/tidbgroup/tasks/util.go index cb1ec89780..c30620815f 100644 --- a/pkg/controllers/tidbgroup/tasks/util.go +++ b/pkg/controllers/tidbgroup/tasks/util.go @@ -18,6 +18,6 @@ import ( "fmt" ) -func HeadlessServiceName(clusterName, groupName string) string { - return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +func HeadlessServiceName(groupName string) string { + return fmt.Sprintf("%s-tidb-peer", groupName) } diff --git a/pkg/controllers/tiflash/tasks/cm.go b/pkg/controllers/tiflash/tasks/cm.go index 4b2fb74d7e..4919f484f5 100644 --- a/pkg/controllers/tiflash/tasks/cm.go +++ b/pkg/controllers/tiflash/tasks/cm.go @@ -86,7 +86,7 @@ func (t *TaskConfigMap) Sync(ctx task.Context[ReconcileContext]) task.Result { func newConfigMap(tiflash *v1alpha1.TiFlash, flashData, proxyData []byte, hash string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(tiflash.Name), + Name: tiflash.PodName(), Namespace: tiflash.Namespace, Labels: maputil.Merge(tiflash.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tiflash.Name, diff --git a/pkg/controllers/tiflash/tasks/ctx.go b/pkg/controllers/tiflash/tasks/ctx.go index ad6eaf05df..d923f39f79 100644 --- a/pkg/controllers/tiflash/tasks/ctx.go +++ b/pkg/controllers/tiflash/tasks/ctx.go @@ -140,7 +140,7 @@ func TaskContextPod(c client.Client) task.Task[ReconcileContext] { rtx := ctx.Self() var pod corev1.Pod if err := c.Get(ctx, client.ObjectKey{ - Name: rtx.TiFlash.Name, + Name: rtx.TiFlash.PodName(), Namespace: rtx.TiFlash.Namespace, }, &pod); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/tiflash/tasks/finalizer.go b/pkg/controllers/tiflash/tasks/finalizer.go index 55048a0497..92d48717c7 100644 --- a/pkg/controllers/tiflash/tasks/finalizer.go +++ b/pkg/controllers/tiflash/tasks/finalizer.go @@ -15,10 +15,14 @@ package tasks import ( + "context" "time" + corev1 "k8s.io/api/core/v1" + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/utils/k8s" "github.com/pingcap/tidb-operator/pkg/utils/task/v2" ) @@ -32,10 +36,15 @@ func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { rtx := ctx.Self() switch { case !rtx.Cluster.GetDeletionTimestamp().IsZero(): - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - rtx.TiFlash.Namespace, rtx.TiFlash.Name, client.GracePeriodSeconds(1)); err != nil { + wait, err := EnsureSubResourcesDeleted(ctx, c, rtx.TiFlash) + if err != nil { return task.Fail().With("cannot delete sub resources: %w", err) } + + if wait { + return task.Wait().With("wait all subresources deleted") + } + // whole cluster is deleting if err := k8s.RemoveFinalizer(ctx, c, rtx.TiFlash); err != nil { return task.Fail().With("cannot remove finalizer: %w", err) @@ -46,9 +55,13 @@ func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { return task.Retry(removingWaitInterval).With("wait until the store is removed") case rtx.StoreState == v1alpha1.StoreStateRemoved || rtx.StoreID == "": - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - rtx.TiFlash.Namespace, rtx.TiFlash.Name, client.GracePeriodSeconds(1)); err != nil { - return task.Fail().With("cannot delete subresources: %w", err) + wait, err := EnsureSubResourcesDeleted(ctx, c, rtx.TiFlash) + if err != nil { + return task.Fail().With("cannot delete sub resources: %w", err) + } + + if wait { + return task.Wait().With("wait all subresources deleted") } // Store ID is empty may because of tiflash is not initialized // TODO: check whether tiflash is initialized @@ -76,3 +89,20 @@ func TaskFinalizerAdd(c client.Client) task.Task[ReconcileContext] { return task.Complete().With("finalizer is added") }) } + +func EnsureSubResourcesDeleted(ctx context.Context, c client.Client, f *v1alpha1.TiFlash) (wait bool, _ error) { + wait1, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiFlash(f), &corev1.PodList{}) + if err != nil { + return false, err + } + wait2, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiFlash(f), &corev1.ConfigMapList{}) + if err != nil { + return false, err + } + wait3, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiFlash(f), &corev1.PersistentVolumeClaimList{}) + if err != nil { + return false, err + } + + return wait1 || wait2 || wait3, nil +} diff --git a/pkg/controllers/tiflash/tasks/pod.go b/pkg/controllers/tiflash/tasks/pod.go index c0f94ece76..24e20e6a21 100644 --- a/pkg/controllers/tiflash/tasks/pod.go +++ b/pkg/controllers/tiflash/tasks/pod.go @@ -107,7 +107,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, _ *v1alpha1.TiFlashGroup, tifl VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: ConfigMapName(tiflash.Name), + Name: tiflash.PodName(), }, }, }, @@ -133,7 +133,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, _ *v1alpha1.TiFlashGroup, tifl VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ // the format is "data{i}-tiflash-xxx" to compatible with TiDB Operator v1 - ClaimName: PersistentVolumeClaimName(tiflash.Name, i), + ClaimName: PersistentVolumeClaimName(tiflash.PodName(), i), }, }, }) @@ -148,12 +148,11 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, _ *v1alpha1.TiFlashGroup, tifl } if cluster.IsTLSClusterEnabled() { - groupName := tiflash.Labels[v1alpha1.LabelKeyGroup] vols = append(vols, corev1.Volume{ Name: v1alpha1.TiFlashClusterTLSVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: cluster.TLSClusterSecretName(groupName), + SecretName: tiflash.TLSClusterSecretName(), }, }, }) @@ -167,7 +166,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, _ *v1alpha1.TiFlashGroup, tifl pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: tiflash.Namespace, - Name: tiflash.Name, + Name: tiflash.PodName(), Labels: maputil.Merge(tiflash.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tiflash.Name, v1alpha1.LabelKeyConfigHash: configHash, @@ -178,7 +177,7 @@ func (*TaskPod) newPod(cluster *v1alpha1.Cluster, _ *v1alpha1.TiFlashGroup, tifl }, }, Spec: corev1.PodSpec{ - Hostname: tiflash.Name, + Hostname: tiflash.PodName(), Subdomain: tiflash.Spec.Subdomain, NodeSelector: tiflash.Spec.Topology, InitContainers: []corev1.Container{ diff --git a/pkg/controllers/tiflash/tasks/pvc.go b/pkg/controllers/tiflash/tasks/pvc.go index ddbfcb4f5e..872dee19fb 100644 --- a/pkg/controllers/tiflash/tasks/pvc.go +++ b/pkg/controllers/tiflash/tasks/pvc.go @@ -64,7 +64,7 @@ func newPVCs(tiflash *v1alpha1.TiFlash) []*corev1.PersistentVolumeClaim { pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ // the format is "data{i}-tiflash-xxx" to compatible with TiDB Operator v1 - Name: PersistentVolumeClaimName(tiflash.Name, i), + Name: PersistentVolumeClaimName(tiflash.PodName(), i), Namespace: tiflash.Namespace, Labels: maputil.Merge(tiflash.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tiflash.Name, diff --git a/pkg/controllers/tiflash/tasks/store_labels.go b/pkg/controllers/tiflash/tasks/store_labels.go index 93c037e34d..3101171b85 100644 --- a/pkg/controllers/tiflash/tasks/store_labels.go +++ b/pkg/controllers/tiflash/tasks/store_labels.go @@ -53,7 +53,7 @@ func (t *TaskStoreLabels) Sync(ctx task.Context[ReconcileContext]) task.Result { nodeName := rtx.Pod.Spec.NodeName if nodeName == "" { - return task.Fail().With("pod %s/%s has not been scheduled", rtx.TiFlash.Namespace, rtx.TiFlash.Name) + return task.Fail().With("pod %s/%s has not been scheduled", rtx.Pod.Namespace, rtx.Pod.Name) } var node corev1.Node diff --git a/pkg/controllers/tiflashgroup/tasks/svc.go b/pkg/controllers/tiflashgroup/tasks/svc.go index ebe4305c41..e8ff42dfcb 100644 --- a/pkg/controllers/tiflashgroup/tasks/svc.go +++ b/pkg/controllers/tiflashgroup/tasks/svc.go @@ -60,20 +60,20 @@ func (t *TaskService) Sync(ctx task.Context[ReconcileContext]) task.Result { return task.Complete().With("headless service of tiflash has been applied") } -func newHeadlessService(flashg *v1alpha1.TiFlashGroup) *corev1.Service { +func newHeadlessService(fg *v1alpha1.TiFlashGroup) *corev1.Service { ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: HeadlessServiceName(flashg.Spec.Cluster.Name, flashg.Name), - Namespace: flashg.Namespace, + Name: HeadlessServiceName(fg.Name), + Namespace: fg.Namespace, Labels: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, - v1alpha1.LabelKeyCluster: flashg.Spec.Cluster.Name, - v1alpha1.LabelKeyGroup: flashg.Name, + v1alpha1.LabelKeyCluster: fg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: fg.Name, }, OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(flashg, v1alpha1.SchemeGroupVersion.WithKind("TiFlashGroup")), + *metav1.NewControllerRef(fg, v1alpha1.SchemeGroupVersion.WithKind("TiFlashGroup")), }, }, Spec: corev1.ServiceSpec{ @@ -83,31 +83,31 @@ func newHeadlessService(flashg *v1alpha1.TiFlashGroup) *corev1.Service { Selector: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, - v1alpha1.LabelKeyCluster: flashg.Spec.Cluster.Name, - v1alpha1.LabelKeyGroup: flashg.Name, + v1alpha1.LabelKeyCluster: fg.Spec.Cluster.Name, + v1alpha1.LabelKeyGroup: fg.Name, }, Ports: []corev1.ServicePort{ { Name: v1alpha1.TiFlashPortNameFlash, - Port: flashg.GetFlashPort(), + Port: fg.GetFlashPort(), Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameFlash), }, { Name: v1alpha1.TiFlashPortNameProxy, - Port: flashg.GetProxyPort(), + Port: fg.GetProxyPort(), Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameProxy), }, { Name: v1alpha1.TiFlashPortNameMetrics, - Port: flashg.GetMetricsPort(), + Port: fg.GetMetricsPort(), Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameMetrics), }, { Name: v1alpha1.TiFlashPortNameProxyStatus, - Port: flashg.GetProxyStatusPort(), + Port: fg.GetProxyStatusPort(), Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromString(v1alpha1.TiFlashPortNameProxyStatus), }, diff --git a/pkg/controllers/tiflashgroup/tasks/updater.go b/pkg/controllers/tiflashgroup/tasks/updater.go index 6fe7396f77..35e99f5c2d 100644 --- a/pkg/controllers/tiflashgroup/tasks/updater.go +++ b/pkg/controllers/tiflashgroup/tasks/updater.go @@ -150,10 +150,13 @@ func needVersionUpgrade(flashg *v1alpha1.TiFlashGroup) bool { return flashg.Spec.Version != flashg.Status.Version && flashg.Status.Version != "" } +const ( + suffixLen = 6 +) + func TiFlashNewer(fg *v1alpha1.TiFlashGroup, rev string) updater.NewFactory[*runtime.TiFlash] { return updater.NewFunc[*runtime.TiFlash](func() *runtime.TiFlash { - //nolint:mnd // refactor to use a constant - name := fmt.Sprintf("%s-%s-%s", fg.Spec.Cluster.Name, fg.Name, random.Random(6)) + name := fmt.Sprintf("%s-%s", fg.Name, random.Random(suffixLen)) spec := fg.Spec.Template.Spec.DeepCopy() tiflash := &v1alpha1.TiFlash{ @@ -175,7 +178,7 @@ func TiFlashNewer(fg *v1alpha1.TiFlashGroup, rev string) updater.NewFactory[*run Spec: v1alpha1.TiFlashSpec{ Cluster: fg.Spec.Cluster, Version: fg.Spec.Version, - Subdomain: HeadlessServiceName(fg.Spec.Cluster.Name, fg.Name), + Subdomain: HeadlessServiceName(fg.Name), TiFlashTemplateSpec: *spec, }, } diff --git a/pkg/controllers/tiflashgroup/tasks/util.go b/pkg/controllers/tiflashgroup/tasks/util.go index 2b10ada1e2..94cf26869b 100644 --- a/pkg/controllers/tiflashgroup/tasks/util.go +++ b/pkg/controllers/tiflashgroup/tasks/util.go @@ -19,6 +19,6 @@ import ( ) // TODO: fix length issue -func HeadlessServiceName(clusterName, groupName string) string { - return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +func HeadlessServiceName(groupName string) string { + return fmt.Sprintf("%s-tiflash-peer", groupName) } diff --git a/pkg/controllers/tikv/tasks/cm.go b/pkg/controllers/tikv/tasks/cm.go index e2cef72b2a..b4dc0417c6 100644 --- a/pkg/controllers/tikv/tasks/cm.go +++ b/pkg/controllers/tikv/tasks/cm.go @@ -74,7 +74,7 @@ func (t *TaskConfigMap) Sync(ctx task.Context[ReconcileContext]) task.Result { func newConfigMap(tikv *v1alpha1.TiKV, data []byte, hash string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(tikv.Name), + Name: tikv.PodName(), Namespace: tikv.Namespace, Labels: maputil.Merge(tikv.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tikv.Name, diff --git a/pkg/controllers/tikv/tasks/ctx.go b/pkg/controllers/tikv/tasks/ctx.go index b61c79e3df..963e296d8a 100644 --- a/pkg/controllers/tikv/tasks/ctx.go +++ b/pkg/controllers/tikv/tasks/ctx.go @@ -141,7 +141,7 @@ func TaskContextPod(c client.Client) task.Task[ReconcileContext] { rtx := ctx.Self() var pod corev1.Pod if err := c.Get(ctx, client.ObjectKey{ - Name: rtx.TiKV.Name, + Name: rtx.TiKV.PodName(), Namespace: rtx.TiKV.Namespace, }, &pod); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/tikv/tasks/finalizer.go b/pkg/controllers/tikv/tasks/finalizer.go index c1f984f90d..cca2333525 100644 --- a/pkg/controllers/tikv/tasks/finalizer.go +++ b/pkg/controllers/tikv/tasks/finalizer.go @@ -15,10 +15,14 @@ package tasks import ( + "context" "time" + corev1 "k8s.io/api/core/v1" + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/utils/k8s" "github.com/pingcap/tidb-operator/pkg/utils/task/v2" ) @@ -30,12 +34,19 @@ const ( func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { return task.NameTaskFunc("FinalizerDel", func(ctx task.Context[ReconcileContext]) task.Result { rtx := ctx.Self() + regionCount := 0 + if rtx.Store != nil { + regionCount = rtx.Store.RegionCount + } switch { case !rtx.Cluster.GetDeletionTimestamp().IsZero(): - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - rtx.TiKV.Namespace, rtx.TiKV.Name, client.GracePeriodSeconds(1)); err != nil { + wait, err := EnsureSubResourcesDeleted(ctx, c, rtx.TiKV, regionCount) + if err != nil { return task.Fail().With("cannot delete subresources: %w", err) } + if wait { + return task.Wait().With("wait all subresources deleted") + } // whole cluster is deleting if err := k8s.RemoveFinalizer(ctx, c, rtx.TiKV); err != nil { @@ -46,10 +57,13 @@ func TaskFinalizerDel(c client.Client) task.Task[ReconcileContext] { return task.Retry(removingWaitInterval).With("wait until the store is removed") case rtx.StoreState == v1alpha1.StoreStateRemoved || rtx.StoreID == "": - if err := k8s.EnsureInstanceSubResourceDeleted(ctx, c, - rtx.TiKV.Namespace, rtx.TiKV.Name, client.GracePeriodSeconds(1)); err != nil { + wait, err := EnsureSubResourcesDeleted(ctx, c, rtx.TiKV, regionCount) + if err != nil { return task.Fail().With("cannot delete subresources: %w", err) } + if wait { + return task.Wait().With("wait all subresources deleted") + } // Store ID is empty may because of tikv is not initialized // TODO: check whether tikv is initialized if err := k8s.RemoveFinalizer(ctx, c, rtx.TiKV); err != nil { @@ -78,3 +92,21 @@ func TaskFinalizerAdd(c client.Client) task.Task[ReconcileContext] { return task.Complete().With("finalizer is added") }) } + +func EnsureSubResourcesDeleted(ctx context.Context, c client.Client, tikv *v1alpha1.TiKV, regionCount int) (wait bool, _ error) { + gracePeriod := CalcGracePeriod(regionCount) + wait1, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiKV(tikv), &corev1.PodList{}, client.GracePeriodSeconds(gracePeriod)) + if err != nil { + return false, err + } + wait2, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiKV(tikv), &corev1.ConfigMapList{}) + if err != nil { + return false, err + } + wait3, err := k8s.DeleteInstanceSubresource(ctx, c, runtime.FromTiKV(tikv), &corev1.PersistentVolumeClaimList{}) + if err != nil { + return false, err + } + + return wait1 || wait2 || wait3, nil +} diff --git a/pkg/controllers/tikv/tasks/pod.go b/pkg/controllers/tikv/tasks/pod.go index d926f31bcc..041e0875ba 100644 --- a/pkg/controllers/tikv/tasks/pod.go +++ b/pkg/controllers/tikv/tasks/pod.go @@ -45,7 +45,11 @@ func TaskPodSuspend(c client.Client) task.Task[ReconcileContext] { if rtx.Pod == nil { return task.Complete().With("pod has been deleted") } - if err := c.Delete(rtx, rtx.Pod); err != nil { + regionCount := 0 + if rtx.Store != nil { + regionCount = rtx.Store.RegionCount + } + if err := DeletePodWithGracePeriod(rtx, c, rtx.Pod, regionCount); err != nil { return task.Fail().With("can't delete pod of tikv: %w", err) } rtx.PodIsTerminating = true @@ -69,7 +73,6 @@ func (*TaskPod) Name() string { return "Pod" } -//nolint:gocyclo // refactor if possible func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { rtx := ctx.Self() @@ -85,21 +88,12 @@ func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { // minimize the deletion grace period seconds if !rtx.Pod.GetDeletionTimestamp().IsZero() { - sec := rtx.Pod.GetDeletionGracePeriodSeconds() - regionCount := 0 if rtx.Store != nil { regionCount = rtx.Store.RegionCount } - gracePeriod := int64(regionCount/RegionsPerSecond + 1) - if gracePeriod < MinGracePeriodSeconds { - gracePeriod = MinGracePeriodSeconds - } - - if sec != nil && rtx.Store != nil && *sec > gracePeriod { - if err := t.Client.Delete(ctx, rtx.Pod, client.GracePeriodSeconds(gracePeriod)); err != nil { - return task.Fail().With("cannot minimize the shutdown timeout: %w", err) - } + if err := DeletePodWithGracePeriod(ctx, t.Client, rtx.Pod, regionCount); err != nil { + return task.Fail().With("can't minimize the deletion grace period of pod of tikv: %w", err) } // key will be requeued after the pod is changed @@ -114,8 +108,12 @@ func (t *TaskPod) Sync(ctx task.Context[ReconcileContext]) task.Result { if res == k8s.CompareResultRecreate || (configChanged && rtx.TiKVGroup.Spec.ConfigUpdateStrategy == v1alpha1.ConfigUpdateStrategyRollingUpdate) { t.Logger.Info("will recreate the pod") - if err := t.Client.Delete(rtx, rtx.Pod); err != nil { - return task.Fail().With("can't delete pod of tikv: %w", err) + regionCount := 0 + if rtx.Store != nil { + regionCount = rtx.Store.RegionCount + } + if err := DeletePodWithGracePeriod(ctx, t.Client, rtx.Pod, regionCount); err != nil { + return task.Fail().With("can't minimize the deletion grace period of pod of tikv: %w", err) } rtx.PodIsTerminating = true @@ -140,7 +138,7 @@ func (t *TaskPod) newPod(cluster *v1alpha1.Cluster, kvg *v1alpha1.TiKVGroup, tik VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: ConfigMapName(tikv.Name), + Name: tikv.PodName(), }, }, }, @@ -174,7 +172,7 @@ func (t *TaskPod) newPod(cluster *v1alpha1.Cluster, kvg *v1alpha1.TiKVGroup, tik Name: name, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: PersistentVolumeClaimName(tikv.Name, vol.Name), + ClaimName: PersistentVolumeClaimName(tikv.PodName(), vol.Name), }, }, }) @@ -185,12 +183,11 @@ func (t *TaskPod) newPod(cluster *v1alpha1.Cluster, kvg *v1alpha1.TiKVGroup, tik } if cluster.IsTLSClusterEnabled() { - groupName := tikv.Labels[v1alpha1.LabelKeyGroup] vols = append(vols, corev1.Volume{ Name: v1alpha1.TiKVClusterTLSVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: cluster.TLSClusterSecretName(groupName), + SecretName: tikv.TLSClusterSecretName(), }, }, }) @@ -225,7 +222,7 @@ func (t *TaskPod) newPod(cluster *v1alpha1.Cluster, kvg *v1alpha1.TiKVGroup, tik pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: tikv.Namespace, - Name: tikv.Name, + Name: tikv.PodName(), Labels: maputil.Merge(tikv.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tikv.Name, v1alpha1.LabelKeyConfigHash: configHash, @@ -239,7 +236,7 @@ func (t *TaskPod) newPod(cluster *v1alpha1.Cluster, kvg *v1alpha1.TiKVGroup, tik // TODO: make the max grace period seconds configurable //nolint:mnd // refactor to use a constant TerminationGracePeriodSeconds: ptr.To[int64](65535), - Hostname: tikv.Name, + Hostname: tikv.PodName(), Subdomain: tikv.Spec.Subdomain, NodeSelector: tikv.Spec.Topology, InitContainers: []corev1.Container{ diff --git a/pkg/controllers/tikv/tasks/pvc.go b/pkg/controllers/tikv/tasks/pvc.go index 47fd28a82a..c54adae11d 100644 --- a/pkg/controllers/tikv/tasks/pvc.go +++ b/pkg/controllers/tikv/tasks/pvc.go @@ -63,7 +63,7 @@ func newPVCs(tikv *v1alpha1.TiKV) []*corev1.PersistentVolumeClaim { vol := tikv.Spec.Volumes[i] pvcs = append(pvcs, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: PersistentVolumeClaimName(tikv.Name, vol.Name), + Name: PersistentVolumeClaimName(tikv.PodName(), vol.Name), Namespace: tikv.Namespace, Labels: maputil.Merge(tikv.Labels, map[string]string{ v1alpha1.LabelKeyInstance: tikv.Name, diff --git a/pkg/controllers/tikv/tasks/util.go b/pkg/controllers/tikv/tasks/util.go index ed51a327fa..fac5809fe7 100644 --- a/pkg/controllers/tikv/tasks/util.go +++ b/pkg/controllers/tikv/tasks/util.go @@ -14,14 +14,52 @@ package tasks +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + + "github.com/pingcap/tidb-operator/pkg/client" +) + func ConfigMapName(tikvName string) string { return tikvName } -func PersistentVolumeClaimName(tikvName, volName string) string { +func PersistentVolumeClaimName(podName, volName string) string { // ref: https://github.com/pingcap/tidb-operator/blob/v1.6.0/pkg/apis/pingcap/v1alpha1/helpers.go#L92 if volName == "" { - return "tikv-" + tikvName + return "tikv-" + podName } - return "tikv-" + tikvName + "-" + volName + return "tikv-" + podName + "-" + volName +} + +func DeletePodWithGracePeriod(ctx context.Context, c client.Client, pod *corev1.Pod, regionCount int) error { + fmt.Println("xxx: delete pod", pod, regionCount) + if pod == nil { + return nil + } + sec := pod.GetDeletionGracePeriodSeconds() + gracePeriod := CalcGracePeriod(regionCount) + + if sec == nil || *sec > gracePeriod { + fmt.Println("xxx: try to delete with gracePeriod", gracePeriod) + if err := c.Delete(ctx, pod, client.GracePeriodSeconds(gracePeriod)); err != nil { + return err + } + } else { + fmt.Println("xxx: skip deletion with gracePeriod", gracePeriod) + } + + return nil +} + +func CalcGracePeriod(regionCount int) int64 { + gracePeriod := int64(regionCount/RegionsPerSecond + 1) + if gracePeriod < MinGracePeriodSeconds { + gracePeriod = MinGracePeriodSeconds + } + + return gracePeriod } diff --git a/pkg/controllers/tikvgroup/tasks/svc.go b/pkg/controllers/tikvgroup/tasks/svc.go index 46f6022115..a10fffdf5a 100644 --- a/pkg/controllers/tikvgroup/tasks/svc.go +++ b/pkg/controllers/tikvgroup/tasks/svc.go @@ -64,7 +64,7 @@ func newHeadlessService(kvg *v1alpha1.TiKVGroup) *corev1.Service { ipFamilyPolicy := corev1.IPFamilyPolicyPreferDualStack return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: HeadlessServiceName(kvg.Spec.Cluster.Name, kvg.Name), + Name: HeadlessServiceName(kvg.Name), Namespace: kvg.Namespace, Labels: map[string]string{ v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, diff --git a/pkg/controllers/tikvgroup/tasks/updater.go b/pkg/controllers/tikvgroup/tasks/updater.go index 04fd7ed324..eb760e986d 100644 --- a/pkg/controllers/tikvgroup/tasks/updater.go +++ b/pkg/controllers/tikvgroup/tasks/updater.go @@ -148,10 +148,13 @@ func needVersionUpgrade(kvg *v1alpha1.TiKVGroup) bool { return kvg.Spec.Version != kvg.Status.Version && kvg.Status.Version != "" } +const ( + suffixLen = 6 +) + func TiKVNewer(kvg *v1alpha1.TiKVGroup, rev string) updater.NewFactory[*runtime.TiKV] { return updater.NewFunc[*runtime.TiKV](func() *runtime.TiKV { - //nolint:mnd // refactor to use a constant - name := fmt.Sprintf("%s-%s-%s", kvg.Spec.Cluster.Name, kvg.Name, random.Random(6)) + name := fmt.Sprintf("%s-%s", kvg.Name, random.Random(suffixLen)) spec := kvg.Spec.Template.Spec.DeepCopy() @@ -174,7 +177,7 @@ func TiKVNewer(kvg *v1alpha1.TiKVGroup, rev string) updater.NewFactory[*runtime. Spec: v1alpha1.TiKVSpec{ Cluster: kvg.Spec.Cluster, Version: kvg.Spec.Version, - Subdomain: HeadlessServiceName(kvg.Spec.Cluster.Name, kvg.Name), + Subdomain: HeadlessServiceName(kvg.Name), TiKVTemplateSpec: *spec, }, } diff --git a/pkg/controllers/tikvgroup/tasks/util.go b/pkg/controllers/tikvgroup/tasks/util.go index 2b10ada1e2..447c95eb0e 100644 --- a/pkg/controllers/tikvgroup/tasks/util.go +++ b/pkg/controllers/tikvgroup/tasks/util.go @@ -19,6 +19,6 @@ import ( ) // TODO: fix length issue -func HeadlessServiceName(clusterName, groupName string) string { - return fmt.Sprintf("%s-%s-peer", clusterName, groupName) +func HeadlessServiceName(groupName string) string { + return fmt.Sprintf("%s-tikv-peer", groupName) } diff --git a/pkg/runtime/group.go b/pkg/runtime/group.go index fe6a5be97b..eddc5b4a0e 100644 --- a/pkg/runtime/group.go +++ b/pkg/runtime/group.go @@ -19,8 +19,6 @@ type group interface { SetReplicas(replicas *int32) Replicas() *int32 - Cluster() string - Component() string } type Group interface { diff --git a/pkg/runtime/object.go b/pkg/runtime/object.go index ae3c1697d0..8242857174 100644 --- a/pkg/runtime/object.go +++ b/pkg/runtime/object.go @@ -22,6 +22,8 @@ import ( type object interface { metav1.Object + Cluster() string + Component() string To() client.Object Conditions() []metav1.Condition } diff --git a/pkg/runtime/pd.go b/pkg/runtime/pd.go index e13f15f4e8..3918a50e52 100644 --- a/pkg/runtime/pd.go +++ b/pkg/runtime/pd.go @@ -99,6 +99,14 @@ func (pd *PD) Conditions() []metav1.Condition { return pd.Status.Conditions } +func (pd *PD) Cluster() string { + return pd.Spec.Cluster.Name +} + +func (*PD) Component() string { + return v1alpha1.LabelValComponentPD +} + var _ group = &PDGroup{} func (pdg *PDGroup) DeepCopyObject() runtime.Object { diff --git a/pkg/runtime/tidb.go b/pkg/runtime/tidb.go index 3773d67dcb..eb757b27a6 100644 --- a/pkg/runtime/tidb.go +++ b/pkg/runtime/tidb.go @@ -74,3 +74,11 @@ func (db *TiDB) IsUpToDate() bool { func (db *TiDB) Conditions() []metav1.Condition { return db.Status.Conditions } + +func (db *TiDB) Cluster() string { + return db.Spec.Cluster.Name +} + +func (*TiDB) Component() string { + return v1alpha1.LabelValComponentTiDB +} diff --git a/pkg/runtime/tiflash.go b/pkg/runtime/tiflash.go index 8305f594e5..bed868ceba 100644 --- a/pkg/runtime/tiflash.go +++ b/pkg/runtime/tiflash.go @@ -74,3 +74,11 @@ func (f *TiFlash) IsUpToDate() bool { func (f *TiFlash) Conditions() []metav1.Condition { return f.Status.Conditions } + +func (f *TiFlash) Cluster() string { + return f.Spec.Cluster.Name +} + +func (*TiFlash) Component() string { + return v1alpha1.LabelValComponentTiFlash +} diff --git a/pkg/runtime/tikv.go b/pkg/runtime/tikv.go index 9c968735b1..2e4b0cec7a 100644 --- a/pkg/runtime/tikv.go +++ b/pkg/runtime/tikv.go @@ -74,3 +74,11 @@ func (kv *TiKV) IsUpToDate() bool { func (kv *TiKV) Conditions() []metav1.Condition { return kv.Status.Conditions } + +func (kv *TiKV) Cluster() string { + return kv.Spec.Cluster.Name +} + +func (*TiKV) Component() string { + return v1alpha1.LabelValComponentTiKV +} diff --git a/pkg/timanager/pd/pd.go b/pkg/timanager/pd/pd.go index b7ac37cecd..33a9e82c04 100644 --- a/pkg/timanager/pd/pd.go +++ b/pkg/timanager/pd/pd.go @@ -129,7 +129,7 @@ func NewUnderlayClientFunc(c client.Client) timanager.NewUnderlayClientFunc[*v1a return nil, fmt.Errorf("cannot find cluster %s: %w", pdg.Spec.Cluster.Name, err) } - host := fmt.Sprintf("%s-%s.%s:%d", pdg.Spec.Cluster.Name, pdg.Name, pdg.Namespace, pdg.GetClientPort()) + host := fmt.Sprintf("%s-pd.%s:%d", pdg.Name, pdg.Namespace, pdg.GetClientPort()) if cluster.IsTLSClusterEnabled() { tlsConfig, err := tlsutil.GetTLSConfigFromSecret(ctx, c, diff --git a/pkg/updater/builder.go b/pkg/updater/builder.go index da7bbec6b4..e366e012b9 100644 --- a/pkg/updater/builder.go +++ b/pkg/updater/builder.go @@ -142,7 +142,12 @@ func (b *builder[PT]) WithUpdatePreferPolicy(ps ...PreferPolicy[PT]) Builder[PT] func split[PT runtime.Instance](all []PT, rev string) (update, outdated []PT) { for _, instance := range all { - if instance.GetUpdateRevision() == rev && instance.GetDeletionTimestamp().IsZero() { + // if instance is deleting, just ignore it + // TODO(liubo02): make sure it's ok for PD + if !instance.GetDeletionTimestamp().IsZero() { + continue + } + if instance.GetUpdateRevision() == rev { update = append(update, instance) } else { outdated = append(outdated, instance) diff --git a/pkg/utils/k8s/deletion.go b/pkg/utils/k8s/deletion.go index 7f74e366cc..32d3631857 100644 --- a/pkg/utils/k8s/deletion.go +++ b/pkg/utils/k8s/deletion.go @@ -20,9 +20,11 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" ) // EnsureGroupSubResourceDeleted ensures the sub resources of a group are deleted. @@ -62,6 +64,7 @@ func EnsureGroupSubResourceDeleted(ctx context.Context, cli client.Client, // For pod and configmap, the name of the resource is the same as the instance name. // For pvc, it should contain the instance name as the value of the label "app.kubernetes.io/instance". // TODO: retain policy support +// Deprecated: remove this function, prefer DeleteInstanceSubresource func EnsureInstanceSubResourceDeleted(ctx context.Context, cli client.Client, namespace, name string, podOpts ...client.DeleteOption, ) error { @@ -115,3 +118,55 @@ func EnsureInstanceSubResourceDeleted(ctx context.Context, cli client.Client, return nil } + +// DeleteInstanceSubresource try to delete a subresource of an instance, e.g. pods, cms, pvcs +func DeleteInstanceSubresource[T runtime.Instance]( + ctx context.Context, + c client.Client, + instance T, + objs client.ObjectList, + opts ...client.DeleteOption, +) (wait bool, _ error) { + if err := c.List(ctx, objs, client.InNamespace(instance.GetNamespace()), client.MatchingLabels{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyInstance: instance.GetName(), + v1alpha1.LabelKeyCluster: instance.Cluster(), + v1alpha1.LabelKeyComponent: instance.Component(), + }); err != nil { + return false, fmt.Errorf("failed to list %T for instance %s/%s: %w", objs, instance.GetNamespace(), instance.GetName(), err) + } + + if meta.LenList(objs) == 0 { + return false, nil + } + + items, err := meta.ExtractList(objs) + if err != nil { + return false, fmt.Errorf("failed to extract %T for instance %s/%s: %w", objs, instance.GetNamespace(), instance.GetName(), err) + } + for _, item := range items { + obj, ok := item.(client.Object) + if !ok { + return false, fmt.Errorf("unexpected %T for instance %s/%s", item, instance.GetNamespace(), instance.GetName()) + } + if !obj.GetDeletionTimestamp().IsZero() { + wait = true + continue + } + if err := c.Delete(ctx, obj, opts...); err != nil { + if !errors.IsNotFound(err) { + return false, fmt.Errorf("failed to delete sub resource %s/%s of instance %s/%s: %w", + obj.GetNamespace(), + obj.GetName(), + instance.GetNamespace(), + instance.GetName(), + err, + ) + } + continue + } + wait = true + } + + return wait, nil +} diff --git a/tests/e2e/cluster/cluster.go b/tests/e2e/cluster/cluster.go index 6b86389889..b3d4389929 100644 --- a/tests/e2e/cluster/cluster.go +++ b/tests/e2e/cluster/cluster.go @@ -580,7 +580,8 @@ var _ = Describe("TiDB Cluster", func() { By("Recording the pod's UID") listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name), + } podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) Expect(err).To(BeNil()) Expect(len(podList.Items)).To(Equal(1)) @@ -657,7 +658,8 @@ var _ = Describe("TiDB Cluster", func() { By("Checking the pvc size") listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, kvg.Name)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, kvg.Name), + } pvcList, err := clientSet.CoreV1().PersistentVolumeClaims(tc.Namespace).List(ctx, listOpts) Expect(err).To(BeNil()) Expect(len(pvcList.Items)).To(Equal(1)) @@ -713,7 +715,8 @@ var _ = Describe("TiDB Cluster", func() { By("Recording the pod's UID") listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name), + } podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) Expect(err).To(BeNil()) Expect(len(podList.Items)).To(Equal(1)) @@ -765,7 +768,8 @@ var _ = Describe("TiDB Cluster", func() { By("Checking the number of ControllerRevisions, and revision names in tidb group's status") listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name), + } crList, err := clientSet.AppsV1().ControllerRevisions(tc.Namespace).List(ctx, listOpts) Expect(err).To(BeNil()) Expect(len(crList.Items)).To(Equal(1)) @@ -931,7 +935,8 @@ var _ = Describe("TiDB Cluster", func() { By("Checking the config") listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, kvg.Name)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, kvg.Name), + } cms, err := clientSet.CoreV1().ConfigMaps(tc.Namespace).List(ctx, listOpts) Expect(err).To(BeNil()) Expect(len(cms.Items)).To(Equal(3)) @@ -1004,7 +1009,8 @@ var _ = Describe("TiDB Cluster", func() { By("Checking the logic of rolling update for " + groupName) listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, groupName)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, groupName), + } By("collecting the events of pods for verifying rolling update") watchCtx, cancel := context.WithCancel(outerCtx) @@ -1284,7 +1290,8 @@ var _ = Describe("TiDB Cluster", func() { checkComponent := func(groupName, componentName string, expectedReplicas *int32) { listOptions := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, groupName)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, groupName), + } podList, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOptions) g.Expect(err).To(BeNil()) g.Expect(len(podList.Items)).To(Equal(int(*expectedReplicas))) @@ -1296,7 +1303,8 @@ var _ = Describe("TiDB Cluster", func() { Name: fmt.Sprintf("%s%s-tls", v1alpha1.NamePrefix, componentName), VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: tc.TLSClusterSecretName(groupName), + // TODO: extract to a common utils + SecretName: groupName + "-" + componentName + "-cluster-secret", //nolint:mnd // easy to understand DefaultMode: ptr.To(int32(420)), }, @@ -1332,7 +1340,7 @@ var _ = Describe("TiDB Cluster", func() { Name: v1alpha1.TiDBServerTLSVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-%s-server-secret", tc.Name, groupName), + SecretName: dbg.TiDBServerTLSSecretName(), //nolint:mnd // easy to understand DefaultMode: ptr.To(int32(420)), }, @@ -1518,7 +1526,8 @@ location-labels = ["region", "zone", "host"]` }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) By("Checking the store labels and server labels") - svcName := fmt.Sprintf("%s-%s", tc.Name, dbg.Name) + // TODO: extract it to a common utils + svcName := dbg.Name + "-tidb" dsn, cancel, err := utiltidb.PortForwardAndGetTiDBDSN(fw, tc.Namespace, svcName, "root", "", "test", "charset=utf8mb4") Expect(err).To(BeNil()) defer cancel() @@ -1642,7 +1651,8 @@ location-labels = ["region", "zone", "host"]` }).WithTimeout(createClusterTimeout).WithPolling(createClusterPolling).Should(Succeed()) By("Connect to the TiDB cluster to run transactions") - svcName := fmt.Sprintf("%s-%s", tc.Name, dbg.Name) + // TODO: extract it to a common utils + svcName := dbg.Name + "-tidb" dsn, cancel, err := utiltidb.PortForwardAndGetTiDBDSN(fw, tc.Namespace, svcName, "root", "", "test", "charset=utf8mb4") Expect(err).To(BeNil()) defer cancel() @@ -1724,7 +1734,8 @@ location-labels = ["region", "zone", "host"]` By("Recording the terminationGracePeriodSeconds before overlay") listOpts := metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", - v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name)} + v1alpha1.LabelKeyCluster, tc.Name, v1alpha1.LabelKeyGroup, dbg.Name), + } pods, err := clientSet.CoreV1().Pods(tc.Namespace).List(ctx, listOpts) Expect(err).To(BeNil()) Expect(len(pods.Items)).To(Equal(1)) diff --git a/tests/e2e/cluster/tls.go b/tests/e2e/cluster/tls.go index 6ace719ae8..840e545d4b 100644 --- a/tests/e2e/cluster/tls.go +++ b/tests/e2e/cluster/tls.go @@ -59,10 +59,10 @@ var tidbCertificatesTmpl = ` apiVersion: cert-manager.io/v1 kind: Certificate metadata: - name: {{ .ClusterName }}-{{ .TiDBGroupName}}-server-secret + name: {{ .TiDBGroupName}}-tidb-server-secret namespace: {{ .Namespace }} spec: - secretName: {{ .ClusterName }}-{{ .TiDBGroupName}}-server-secret + secretName: {{ .TiDBGroupName}}-tidb-server-secret duration: 8760h # 365d renewBefore: 360h # 15d subject: @@ -72,12 +72,12 @@ spec: usages: - server auth dnsNames: - - "{{ .ClusterName }}-{{ .TiDBGroupName}}" - - "{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}.svc" - - "*.{{ .ClusterName }}-{{ .TiDBGroupName}}" - - "*.{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}" - - "*.{{ .ClusterName }}-{{ .TiDBGroupName}}.{{ .Namespace }}.svc" + - "{{ .TiDBGroupName}}-tidb" + - "{{ .TiDBGroupName}}-tidb.{{ .Namespace }}" + - "{{ .TiDBGroupName}}-tidb.{{ .Namespace }}.svc" + - "*.{{ .TiDBGroupName}}-tidb" + - "*.{{ .TiDBGroupName}}-tidb.{{ .Namespace }}" + - "*.{{ .TiDBGroupName}}-tidb.{{ .Namespace }}.svc" ipAddresses: - 127.0.0.1 - ::1 @@ -89,10 +89,10 @@ spec: apiVersion: cert-manager.io/v1 kind: Certificate metadata: - name: {{ .ClusterName }}-{{ .TiDBGroupName}}-client-secret + name: {{ .TiDBGroupName}}-tidb-client-secret namespace: {{ .Namespace }} spec: - secretName: {{ .ClusterName }}-{{ .TiDBGroupName}}-client-secret + secretName: {{ .TiDBGroupName}}-tidb-client-secret duration: 8760h # 365d renewBefore: 360h # 15d subject: @@ -111,10 +111,10 @@ var tidbComponentsCertificatesTmpl = ` apiVersion: cert-manager.io/v1 kind: Certificate metadata: - name: {{ .ClusterName }}-{{ .PDGroupName }}-cluster-secret + name: {{ .PDGroupName }}-pd-cluster-secret namespace: {{ .Namespace }} spec: - secretName: {{ .ClusterName }}-{{ .PDGroupName }}-cluster-secret + secretName: {{ .PDGroupName }}-pd-cluster-secret duration: 8760h # 365d renewBefore: 360h # 15d subject: @@ -125,15 +125,15 @@ spec: - server auth - client auth dnsNames: - - "{{ .ClusterName }}-{{ .PDGroupName }}" - - "{{ .ClusterName }}-{{ .PDGroupName }}.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .PDGroupName }}.{{ .Namespace }}.svc" - - "{{ .ClusterName }}-{{ .PDGroupName }}-peer" - - "{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}.svc" - - "*.{{ .ClusterName }}-{{ .PDGroupName }}-peer" - - "*.{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}" - - "*.{{ .ClusterName }}-{{ .PDGroupName }}-peer.{{ .Namespace }}.svc" + - "{{ .PDGroupName }}-pd" + - "{{ .PDGroupName }}-pd.{{ .Namespace }}" + - "{{ .PDGroupName }}-pd.{{ .Namespace }}.svc" + - "{{ .PDGroupName }}-pd-peer" + - "{{ .PDGroupName }}-pd-peer.{{ .Namespace }}" + - "{{ .PDGroupName }}-pd-peer.{{ .Namespace }}.svc" + - "*.{{ .PDGroupName }}-pd-peer" + - "*.{{ .PDGroupName }}-pd-peer.{{ .Namespace }}" + - "*.{{ .PDGroupName }}-pd-peer.{{ .Namespace }}.svc" ipAddresses: - 127.0.0.1 - ::1 @@ -145,10 +145,10 @@ spec: apiVersion: cert-manager.io/v1 kind: Certificate metadata: - name: {{ .ClusterName }}-{{ .TiKVGroupName }}-cluster-secret + name: {{ .TiKVGroupName }}-tikv-cluster-secret namespace: {{ .Namespace }} spec: - secretName: {{ .ClusterName }}-{{ .TiKVGroupName }}-cluster-secret + secretName: {{ .TiKVGroupName }}-tikv-cluster-secret duration: 8760h # 365d renewBefore: 360h # 15d subject: @@ -159,15 +159,15 @@ spec: - server auth - client auth dnsNames: - - "{{ .ClusterName }}-{{ .TiKVGroupName }}" - - "{{ .ClusterName }}-{{ .TiKVGroupName }}.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiKVGroupName }}.{{ .Namespace }}.svc" - - "{{ .ClusterName }}-{{ .TiKVGroupName }}-peer" - - "{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}.svc" - - "*.{{ .ClusterName }}-{{ .TiKVGroupName }}-peer" - - "*.{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}" - - "*.{{ .ClusterName }}-{{ .TiKVGroupName }}-peer.{{ .Namespace }}.svc" + - "{{ .TiKVGroupName }}-tikv" + - "{{ .TiKVGroupName }}-tikv.{{ .Namespace }}" + - "{{ .TiKVGroupName }}-tikv.{{ .Namespace }}.svc" + - "{{ .TiKVGroupName }}-tikv-peer" + - "{{ .TiKVGroupName }}-tikv-peer.{{ .Namespace }}" + - "{{ .TiKVGroupName }}-tikv-peer.{{ .Namespace }}.svc" + - "*.{{ .TiKVGroupName }}-tikv-peer" + - "*.{{ .TiKVGroupName }}-tikv-peer.{{ .Namespace }}" + - "*.{{ .TiKVGroupName }}-tikv-peer.{{ .Namespace }}.svc" ipAddresses: - 127.0.0.1 - ::1 @@ -179,10 +179,10 @@ spec: apiVersion: cert-manager.io/v1 kind: Certificate metadata: - name: {{ .ClusterName }}-{{ .TiDBGroupName }}-cluster-secret + name: {{ .TiDBGroupName }}-tidb-cluster-secret namespace: {{ .Namespace }} spec: - secretName: {{ .ClusterName }}-{{ .TiDBGroupName }}-cluster-secret + secretName: {{ .TiDBGroupName }}-tidb-cluster-secret duration: 8760h # 365d renewBefore: 360h # 15d subject: @@ -193,15 +193,15 @@ spec: - server auth - client auth dnsNames: - - "{{ .ClusterName }}-{{ .TiDBGroupName }}" - - "{{ .ClusterName }}-{{ .TiDBGroupName }}.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiDBGroupName }}.{{ .Namespace }}.svc" - - "{{ .ClusterName }}-{{ .TiDBGroupName }}-peer" - - "{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}.svc" - - "*.{{ .ClusterName }}-{{ .TiDBGroupName }}-peer" - - "*.{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}" - - "*.{{ .ClusterName }}-{{ .TiDBGroupName }}-peer.{{ .Namespace }}.svc" + - "{{ .TiDBGroupName }}-tidb" + - "{{ .TiDBGroupName }}-tidb.{{ .Namespace }}" + - "{{ .TiDBGroupName }}-tidb.{{ .Namespace }}.svc" + - "{{ .TiDBGroupName }}-tidb-peer" + - "{{ .TiDBGroupName }}-tidb-peer.{{ .Namespace }}" + - "{{ .TiDBGroupName }}-tidb-peer.{{ .Namespace }}.svc" + - "*.{{ .TiDBGroupName }}-tidb-peer" + - "*.{{ .TiDBGroupName }}-tidb-peer.{{ .Namespace }}" + - "*.{{ .TiDBGroupName }}-tidb-peer.{{ .Namespace }}.svc" ipAddresses: - 127.0.0.1 - ::1 @@ -213,10 +213,10 @@ spec: apiVersion: cert-manager.io/v1 kind: Certificate metadata: - name: {{ .ClusterName }}-{{ .TiFlashGroupName }}-cluster-secret + name: {{ .TiFlashGroupName }}-tiflash-cluster-secret namespace: {{ .Namespace }} spec: - secretName: {{ .ClusterName }}-{{ .TiFlashGroupName }}-cluster-secret + secretName: {{ .TiFlashGroupName }}-tiflash-cluster-secret duration: 8760h # 365d renewBefore: 360h # 15d subject: @@ -227,15 +227,15 @@ spec: - server auth - client auth dnsNames: - - "{{ .ClusterName }}-{{ .TiFlashGroupName }}" - - "{{ .ClusterName }}-{{ .TiFlashGroupName }}.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiFlashGroupName }}.{{ .Namespace }}.svc" - - "{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer" - - "{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}" - - "{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}.svc" - - "*.{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer" - - "*.{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}" - - "*.{{ .ClusterName }}-{{ .TiFlashGroupName }}-peer.{{ .Namespace }}.svc" + - "{{ .TiFlashGroupName }}-tiflash" + - "{{ .TiFlashGroupName }}-tiflash.{{ .Namespace }}" + - "{{ .TiFlashGroupName }}-tiflash.{{ .Namespace }}.svc" + - "{{ .TiFlashGroupName }}-tiflash-peer" + - "{{ .TiFlashGroupName }}-tiflash-peer.{{ .Namespace }}" + - "{{ .TiFlashGroupName }}-tiflash-peer.{{ .Namespace }}.svc" + - "*.{{ .TiFlashGroupName }}-tiflash-peer" + - "*.{{ .TiFlashGroupName }}-tiflash-peer.{{ .Namespace }}" + - "*.{{ .TiFlashGroupName }}-tiflash-peer.{{ .Namespace }}.svc" ipAddresses: - 127.0.0.1 - ::1 @@ -280,14 +280,17 @@ func installTiDBIssuer(ctx context.Context, yamlApplier *k8s.YAMLApplier, ns, cl func installTiDBCertificates(ctx context.Context, yamlApplier *k8s.YAMLApplier, ns, clusterName, tidbGroupName string) error { return installCert(ctx, yamlApplier, tidbCertificatesTmpl, tcTmplMeta{ - Namespace: ns, ClusterName: clusterName, TiDBGroupName: tidbGroupName}) + Namespace: ns, ClusterName: clusterName, TiDBGroupName: tidbGroupName, + }) } func installTiDBComponentsCertificates(ctx context.Context, yamlApplier *k8s.YAMLApplier, ns, clusterName string, - pdGroupName, tikvGroupName, tidbGroupName, tiFlashGroupName string) error { + pdGroupName, tikvGroupName, tidbGroupName, tiFlashGroupName string, +) error { return installCert(ctx, yamlApplier, tidbComponentsCertificatesTmpl, tcTmplMeta{ Namespace: ns, ClusterName: clusterName, - PDGroupName: pdGroupName, TiKVGroupName: tikvGroupName, TiDBGroupName: tidbGroupName, TiFlashGroupName: tiFlashGroupName}) + PDGroupName: pdGroupName, TiKVGroupName: tikvGroupName, TiDBGroupName: tidbGroupName, TiFlashGroupName: tiFlashGroupName, + }) } func installCert(ctx context.Context, yamlApplier *k8s.YAMLApplier, tmplStr string, tp any) error {