Skip to content

Commit

Permalink
K8SPXC-1366: Add suspendedDeadlineSeconds
Browse files Browse the repository at this point in the history
suspendedDeadlineSeconds allows user to configure maximum duration that
backup job can wait in suspended state. This field is optional and can
be configured in two separate places:

1. PerconaXtraDBClusterBackup.spec.suspendedDeadlineSeconds: Always used if defined.
2. PerconaXtraDBCluster.spec.backup.suspendedDeadlineSeconds: Used only if not defined in PerconaXtraDBClusterBackup.

Commit 5a07740 introduced startingDeadlineSeconds to fail backup it
doesn't start before configured deadline. This commit also allows user
to configure it globally, just like suspendedDeadlineSeconds.

startingDeadlineSeconds is an optinal field and can be configured in two
separate places:

1. PerconaXtraDBClusterBackup.spec.startedDeadlineSeconds: Always used if defined.
2. PerconaXtraDBCluster.spec.backup.startedDeadlineSeconds: Used only if not defined in PerconaXtraDBClusterBackup.
  • Loading branch information
egegunes committed Feb 7, 2025
1 parent 8088760 commit 2a0baf2
Show file tree
Hide file tree
Showing 11 changed files with 158 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,9 @@ spec:
type: integer
storageName:
type: string
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
status:
properties:
Expand Down
3 changes: 3 additions & 0 deletions config/crd/bases/pxc.percona.com_perconaxtradbclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1083,6 +1083,9 @@ spec:
type: object
type: object
type: object
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
crVersion:
type: string
Expand Down
1 change: 1 addition & 0 deletions deploy/backup/backup.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ spec:
storageName: fs-pvc
# activeDeadlineSeconds: 3600
# startingDeadlineSeconds: 300
# suspendedDeadlineSeconds: 1200
# containerOptions:
# env:
# - name: VERIFY_TLS
Expand Down
6 changes: 6 additions & 0 deletions deploy/bundle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,9 @@ spec:
type: integer
storageName:
type: string
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
status:
properties:
Expand Down Expand Up @@ -1997,6 +2000,9 @@ spec:
type: object
type: object
type: object
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
crVersion:
type: string
Expand Down
1 change: 1 addition & 0 deletions deploy/cr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -606,6 +606,7 @@ spec:
# backoffLimit: 6
# activeDeadlineSeconds: 3600
# startingDeadlineSeconds: 300
# suspendedDeadlineSeconds: 1200
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullSecrets:
# - name: private-registry-credentials
Expand Down
6 changes: 6 additions & 0 deletions deploy/crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,9 @@ spec:
type: integer
storageName:
type: string
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
status:
properties:
Expand Down Expand Up @@ -1997,6 +2000,9 @@ spec:
type: object
type: object
type: object
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
crVersion:
type: string
Expand Down
6 changes: 6 additions & 0 deletions deploy/cw-bundle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,9 @@ spec:
type: integer
storageName:
type: string
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
status:
properties:
Expand Down Expand Up @@ -1997,6 +2000,9 @@ spec:
type: object
type: object
type: object
suspendedDeadlineSeconds:
format: int64
type: integer
type: object
crVersion:
type: string
Expand Down
11 changes: 6 additions & 5 deletions pkg/apis/pxc/v1/pxc_backup_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,12 @@ type PerconaXtraDBClusterBackup struct {
}

type PXCBackupSpec struct {
PXCCluster string `json:"pxcCluster"`
StorageName string `json:"storageName,omitempty"`
ContainerOptions *BackupContainerOptions `json:"containerOptions,omitempty"`
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
PXCCluster string `json:"pxcCluster"`
StorageName string `json:"storageName,omitempty"`
ContainerOptions *BackupContainerOptions `json:"containerOptions,omitempty"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
SuspendedDeadlineSeconds *int64 `json:"suspendedDeadlineSeconds,omitempty"`
}

type PXCBackupStatus struct {
Expand Down
25 changes: 13 additions & 12 deletions pkg/apis/pxc/v1/pxc_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,18 +160,19 @@ const (
)

type PXCScheduledBackup struct {
AllowParallel *bool `json:"allowParallel,omitempty"`
Image string `json:"image,omitempty"`
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
Schedule []PXCScheduledBackupSchedule `json:"schedule,omitempty"`
Storages map[string]*BackupStorageSpec `json:"storages,omitempty"`
ServiceAccountName string `json:"serviceAccountName,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
PITR PITRSpec `json:"pitr,omitempty"`
BackoffLimit *int32 `json:"backoffLimit,omitempty"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
AllowParallel *bool `json:"allowParallel,omitempty"`
Image string `json:"image,omitempty"`
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
Schedule []PXCScheduledBackupSchedule `json:"schedule,omitempty"`
Storages map[string]*BackupStorageSpec `json:"storages,omitempty"`
ServiceAccountName string `json:"serviceAccountName,omitempty"`
Annotations map[string]string `json:"annotations,omitempty"`
PITR PITRSpec `json:"pitr,omitempty"`
BackoffLimit *int32 `json:"backoffLimit,omitempty"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
SuspendedDeadlineSeconds *int64 `json:"suspendedDeadlineSeconds,omitempty"`
}

func (b *PXCScheduledBackup) GetAllowParallel() bool {
Expand Down
14 changes: 12 additions & 2 deletions pkg/apis/pxc/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

132 changes: 101 additions & 31 deletions pkg/controller/pxcbackup/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,14 +151,6 @@ func (r *ReconcilePerconaXtraDBClusterBackup) Reconcile(ctx context.Context, req
return rr, nil
}

if err := r.checkStartingDeadline(ctx, cr); err != nil {
if err := r.setFailedStatus(ctx, cr, err); err != nil {
return rr, errors.Wrap(err, "update status")
}

return reconcile.Result{}, nil
}

cluster, err := r.getCluster(ctx, cr)
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "get cluster")
Expand Down Expand Up @@ -187,6 +179,14 @@ func (r *ReconcilePerconaXtraDBClusterBackup) Reconcile(ctx context.Context, req
return reconcile.Result{}, err
}

if err := r.checkDeadlines(ctx, cluster, cr); err != nil {
if err := r.setFailedStatus(ctx, cr, err); err != nil {
return rr, errors.Wrap(err, "update status")
}

return reconcile.Result{}, nil
}

if err := r.reconcileBackupJob(ctx, cr, cluster); err != nil {
return rr, errors.Wrap(err, "reconcile backup job")
}
Expand Down Expand Up @@ -661,24 +661,86 @@ func (r *ReconcilePerconaXtraDBClusterBackup) updateJobStatus(
return nil
}

func (r *ReconcilePerconaXtraDBClusterBackup) checkStartingDeadline(ctx context.Context, cr *api.PerconaXtraDBClusterBackup) error {
func (r *ReconcilePerconaXtraDBClusterBackup) checkDeadlines(ctx context.Context, cluster *api.PerconaXtraDBCluster, cr *api.PerconaXtraDBClusterBackup) error {
if err := checkStartingDeadline(ctx, cluster, cr); err != nil {
return err
}

if err := r.checkSuspendedDeadline(ctx, cluster, cr); err != nil {
return err
}

return nil
}

func checkStartingDeadline(ctx context.Context, cluster *api.PerconaXtraDBCluster, cr *api.PerconaXtraDBClusterBackup) error {
log := logf.FromContext(ctx)

since := time.Since(cr.CreationTimestamp.Time).Seconds()
if cr.Status.State != api.BackupNew {
return nil
}

deadlineSeconds := new(int64)
if cr.Spec.StartingDeadlineSeconds != nil {
deadlineSeconds = cr.Spec.StartingDeadlineSeconds
} else if cluster.Spec.Backup.StartingDeadlineSeconds != nil {
deadlineSeconds = cluster.Spec.Backup.StartingDeadlineSeconds
}

if cr.Spec.StartingDeadlineSeconds == nil {
if deadlineSeconds == nil {
return nil
}

if since < float64(*cr.Spec.StartingDeadlineSeconds) {
since := time.Since(cr.CreationTimestamp.Time).Seconds()
if since < float64(*deadlineSeconds) {
return nil
}

log.Info("Backup didn't start in startingDeadlineSeconds, failing the backup",
"startingDeadlineSeconds", *deadlineSeconds,
"passedSeconds", since)

return errors.New("starting deadline seconds exceeded")
}

func (r *ReconcilePerconaXtraDBClusterBackup) checkSuspendedDeadline(
ctx context.Context,
cluster *api.PerconaXtraDBCluster,
cr *api.PerconaXtraDBClusterBackup,
) error {
log := logf.FromContext(ctx)

job, err := r.getBackupJob(ctx, cluster, cr)
if err != nil {
if k8sErrors.IsNotFound(err) {
return nil
}

return err
}

deadlineSeconds := new(int64)
if cr.Spec.SuspendedDeadlineSeconds != nil {
deadlineSeconds = cr.Spec.SuspendedDeadlineSeconds
} else if cluster.Spec.Backup.SuspendedDeadlineSeconds != nil {
deadlineSeconds = cluster.Spec.Backup.SuspendedDeadlineSeconds
}

if deadlineSeconds == nil {
return nil
}

if cr.Status.State == api.BackupNew {
log.Info("Backup didn't start in startingDeadlineSeconds, failing the backup",
"startingDeadlineSeconds", *cr.Spec.StartingDeadlineSeconds,
"passedSeconds", since)
return errors.New("starting deadline seconds exceeded")
for _, cond := range job.Status.Conditions {
if cond.Type != batchv1.JobSuspended || cond.Status != corev1.ConditionTrue {
continue
}

if since := time.Since(cond.LastTransitionTime.Time).Seconds(); since > float64(*deadlineSeconds) {
log.Info("Backup didn't resume in suspendedDeadlineSeconds, failing the backup",
"suspendedDeadlineSeconds", *deadlineSeconds,
"passedSeconds", since)
return errors.New("suspended deadline seconds exceeded")
}
}

return nil
Expand Down Expand Up @@ -726,13 +788,8 @@ func (r *ReconcilePerconaXtraDBClusterBackup) suspendJobIfNeeded(

log := logf.FromContext(ctx)

labelKeyBackupType := naming.GetLabelBackupType(cluster)
jobName := naming.BackupJobName(cr.Name, cr.Labels[labelKeyBackupType] == "cron")

err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
job := new(batchv1.Job)

err := r.client.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: jobName}, job)
job, err := r.getBackupJob(ctx, cluster, cr)
if err != nil {
if k8sErrors.IsNotFound(err) {
return nil
Expand All @@ -752,7 +809,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) suspendJobIfNeeded(
}

log.Info("Suspending backup job",
"job", jobName,
"job", job.Name,
"clusterStatus", cluster.Status.Status,
"readyPXC", cluster.Status.PXC.Ready)

Expand Down Expand Up @@ -785,13 +842,8 @@ func (r *ReconcilePerconaXtraDBClusterBackup) resumeJobIfNeeded(

log := logf.FromContext(ctx)

labelKeyBackupType := naming.GetLabelBackupType(cluster)
jobName := naming.BackupJobName(cr.Name, cr.Labels[labelKeyBackupType] == "cron")

err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
job := new(batchv1.Job)

err := r.client.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: jobName}, job)
job, err := r.getBackupJob(ctx, cluster, cr)
if err != nil {
if k8sErrors.IsNotFound(err) {
return nil
Expand All @@ -811,7 +863,7 @@ func (r *ReconcilePerconaXtraDBClusterBackup) resumeJobIfNeeded(
}

log.Info("Resuming backup job",
"job", jobName,
"job", job.Name,
"clusterStatus", cluster.Status.Status,
"readyPXC", cluster.Status.PXC.Ready)

Expand All @@ -838,3 +890,21 @@ func (r *ReconcilePerconaXtraDBClusterBackup) reconcileBackupJob(

return nil
}

func (r *ReconcilePerconaXtraDBClusterBackup) getBackupJob(
ctx context.Context,
cluster *api.PerconaXtraDBCluster,
cr *api.PerconaXtraDBClusterBackup,
) (*batchv1.Job, error) {
labelKeyBackupType := naming.GetLabelBackupType(cluster)
jobName := naming.BackupJobName(cr.Name, cr.Labels[labelKeyBackupType] == "cron")

job := new(batchv1.Job)

err := r.client.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: jobName}, job)
if err != nil {
return nil, err
}

return job, nil
}

0 comments on commit 2a0baf2

Please sign in to comment.