diff --git a/Dockerfile.sidecar b/Dockerfile.sidecar index 4eac67be..39339044 100644 --- a/Dockerfile.sidecar +++ b/Dockerfile.sidecar @@ -43,13 +43,20 @@ RUN set -ex; \ ARG XTRABACKUP_PKG=percona-xtrabackup-24 RUN set -ex; \ apt-get update; \ - apt-get install -y --no-install-recommends gnupg2 wget lsb-release curl bc; \ + apt-get install -y --no-install-recommends gnupg2 wget lsb-release curl bc fuse jq openssh-server; \ wget -P /tmp --no-check-certificate https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb; \ dpkg -i /tmp/percona-release_latest.$(lsb_release -sc)_all.deb; \ apt-get update; \ apt-get install -y --no-install-recommends ${XTRABACKUP_PKG}; \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - +#ADD http://mirrors.woqutech.com/download/qfusion/files/bin/juicefs-1.0.0-rc1-linux-amd64 /usr/local/bin/juicefs +# COPY juicefs/juicefs /usr/local/bin/juicefs +RUN wget --no-check-certificate "https://d.juicefs.com/juicefs/releases/download/v1.0.2/juicefs-1.0.2-linux-amd64.tar.gz" && tar -zxf "juicefs-1.0.2-linux-amd64.tar.gz" ;\ + mv juicefs /usr/local/bin/juicefs; \ + chmod +x /usr/local/bin/juicefs ; mkdir -p /run/sshd; \ + mkdir -p /root/.ssh; \ + chmod 700 /root/.ssh WORKDIR / COPY --from=builder /workspace/bin/sidecar /usr/local/bin/sidecar -ENTRYPOINT ["sidecar"] +COPY script/*.sh / +CMD [ "sidecar" ] diff --git a/api/v1alpha1/backup_types.go b/api/v1alpha1/backup_types.go index 3fc018a2..d177fbde 100644 --- a/api/v1alpha1/backup_types.go +++ b/api/v1alpha1/backup_types.go @@ -21,6 +21,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type JuiceOpt struct { + // sqlite or redis + JuiceMeta string `json:"juiceMeta"` + // backupSecrete name for S3 + BackupSecretName string `json:"backupSecretName"` + JuiceName string `json:"juiceName"` +} + // This is the backup Job CRD. // BackupSpec defines the desired state of Backup type BackupSpec struct { @@ -40,6 +48,9 @@ type BackupSpec struct { // +optional NFSServerAddress string `json:"nfsServerAddress,omitempty"` + // Represents the juicefs parameters which need. + // +optional + JuiceOpt *JuiceOpt `json:"juiceOpt,omitempty"` // ClusterName represents the cluster name to backup ClusterName string `json:"clusterName"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index db850e7c..444caadf 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -104,6 +104,11 @@ func (in *BackupList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { *out = *in + if in.JuiceOpt != nil { + in, out := &in.JuiceOpt, &out.JuiceOpt + *out = new(JuiceOpt) + **out = **in + } if in.HistoryLimit != nil { in, out := &in.HistoryLimit, &out.HistoryLimit *out = new(int32) @@ -174,6 +179,21 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JuiceOpt) DeepCopyInto(out *JuiceOpt) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JuiceOpt. +func (in *JuiceOpt) DeepCopy() *JuiceOpt { + if in == nil { + return nil + } + out := new(JuiceOpt) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricsOpts) DeepCopyInto(out *MetricsOpts) { *out = *in diff --git a/backup/syncer/job.go b/backup/syncer/job.go index b1e7e8a2..54d7fda4 100644 --- a/backup/syncer/job.go +++ b/backup/syncer/job.go @@ -17,13 +17,16 @@ limitations under the License. package syncer import ( + "context" "fmt" + "strings" "github.com/presslabs/controller-util/pkg/syncer" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" v1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" @@ -33,6 +36,7 @@ import ( ) type jobSyncer struct { + client client.Client job *batchv1.Job backup *backup.Backup } @@ -50,6 +54,7 @@ func NewJobSyncer(c client.Client, backup *backup.Backup) syncer.Interface { } sync := &jobSyncer{ + client: c, job: obj, backup: backup, } @@ -174,6 +179,10 @@ func (s *jobSyncer) ensurePodSpec(in corev1.PodSpec) corev1.PodSpec { MountPath: utils.XtrabckupLocal, }, } + } else if s.backup.Spec.JuiceOpt != nil { + // Deal it for juiceOpt + s.buildJuicefsBackPod(&in) + } else { // in.Containers[0].ImagePullPolicy = s.opt.ImagePullPolicy in.Containers[0].Args = []string{ @@ -238,3 +247,84 @@ func (s *jobSyncer) ensurePodSpec(in corev1.PodSpec) corev1.PodSpec { } return in } + +func (s *jobSyncer) buildJuicefsBackPod(in *corev1.PodSpec) error { + // add volumn about pvc + var defMode int32 = 0600 + var err error + var cmdstr string + in.Volumes = []corev1.Volume{ + { + Name: utils.SShVolumnName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-ssh-key", s.backup.Spec.ClusterName), + DefaultMode: &defMode, + }, + }, + }, + } + + in.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: utils.SShVolumnName, + MountPath: utils.SshVolumnPath, + }, + } + + // PodName.clusterName-mysql.Namespace + // sample-mysql-0.sample-mysql.default + hostname := fmt.Sprintf("%s.%s-mysql.%s", s.backup.Spec.HostName, s.backup.Spec.ClusterName, s.backup.Namespace) + if cmdstr, err = s.buildJuicefsCmd(s.backup.Spec.JuiceOpt.BackupSecretName); err != nil { + return err + } + + in.Containers[0].Command = []string{"bash", "-c", "--", `cp /etc/secret-ssh/* /root/.ssh +chmod 600 /root/.ssh/authorized_keys ;` + + strings.Join([]string{ + "ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", hostname, cmdstr, + }, " ")} + + return nil +} + +func (s *jobSyncer) buildJuicefsCmd(secName string) (string, error) { + juiceopt := s.backup.Spec.JuiceOpt + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secName, + Namespace: s.backup.Namespace, + }, + } + err := s.client.Get(context.TODO(), + types.NamespacedName{Namespace: s.backup.Namespace, + Name: secName}, secret) + + if err != nil { + return "", err + } + url, bucket := secret.Data["s3-endpoint"], secret.Data["s3-bucket"] + accesskey, secretkey := secret.Data["s3-access-key"], secret.Data["s3-secret-key"] + juicebucket := utils.InstallBucket(string(url), string(bucket)) + cmdstr := fmt.Sprintf(`<:/ + ``` +in the example of this article, redis-server-name is redis-leader, the number of database is 1, So the redis url is `redis://redis-leader:6379/1` + + 3. Verfiy whether it works: suppose the backup directory is juicefs , you can login in Pod's backup container , execute commanas as follow: + + ``` + juicefs format --storage s3 \ + --bucket http://test.minio-1668754867.minio:9000/ \ + --access-key \ + --secret-key > \ + redis://redis-leader:6379/1 \ + juicefs + ``` + then execute : + `juicefs mount -d redis://redis-leader:6379/1 /juicefs` + +check whether juicefs is exist, write files, and check S3 storage whether has changed. + +## fill backup crd's yaml configs +In backup crd's yaml file, such as in samples/mysql_v1alpha_backup.yaml, add fields information under spec: + +``` + juiceOpt: + juiceMeta: + backupSecretName: + juiceName: +``` +for example: +``` + juiceOpt: + juiceMeta: "redis://redis-leader:6379/1" + backupSecretName: sample-backup-secret + juiceName: juicefs +``` +Others refer to [backup and restore config](./backup_and_restoration_s3.md) + +## Running backup. + +use command `kubectl apply -f ` , for examples: + +``` + kubectl apply -f config/samples/mysql_v1alpha1_backup.yaml +``` + +# Restore +## prerequest for restore + I suppose that the cluster you want restore is `sample2` +### and `config map` + 1. At first give the `config map` a name,name's form is -restore, this article suppose that cluster name is sample2, so `config map`'s name is `sample2-restore` + 2. Create config map + * prepare for juiceopt parameters: + build a yaml file, named `juiceopt.yaml`, fill it with: + ``` + juiceMeta: + backupSecretName: + juiceName: + ``` + for example, in the example of this article, juiceopt.yaml is: + ``` + juiceMeta: "redis://redis-leader:6379/1" + backupSecretName: sample-backup-secret + juiceName: juicefs + ``` + * use `kubectl create configmap` create a configmap + configmap has two keys , `from` and `juice.opt` that respectively indicate the cluster has been backuped which we should restore from, and the juice parameter. + but `date` key is optional, it indicates the time where restore to (format is:"2006-01-02 09:07:41"), if it does not have got this key , it will restore to now, use the commands as follows: + `kubectl create configmap sample2-restore --from-literal=from=sample --from-file="juice.opt"=./juiceopt.yaml ` + + +### config mysql cluster's yaml + in the example of this article, we suppose the cluster need to restore is sample2, the config method can refer to [radondb cluster configuration](./deploy_radondb-mysql_operator_on_k8s.md) +### use kubectl apply the yaml + use `kubectl apply ` apply the yaml file, for the example, use the commands as follow: + + `kubectl apply -f config/samples/mysql_v1alpha1_mysqlcluster.yaml ` + \ No newline at end of file diff --git a/docs/zh-cn/juicefs_backup_and_restore.md b/docs/zh-cn/juicefs_backup_and_restore.md new file mode 100644 index 00000000..8f2dab40 --- /dev/null +++ b/docs/zh-cn/juicefs_backup_and_restore.md @@ -0,0 +1,99 @@ +[English](../en-us/juicefs_backup_and_restore.md) | 简体中文 + +目录 +============= + * [juiceopt备份](#开启juiceopt-备份) + * [准备工作](#准备工作) + * [填写配置](#填写backup-crd-的-yaml-配置信息) + * [运行备份](#运行备份) + +* [恢复](#恢复) + * [准备工作](#恢复集群的准备工作) + * [添加configmap](#添加config-map-配置) + * [配置mysql cluster](#配置mysql-cluster-的yaml) + * [应用新集群yaml](#使用kubectl-apply-应用yaml) + +# 开启juiceopt 备份 +## 准备工作 + 1. 准备S3存储 (其他类型存储参见 juicefs 文档),获取 access-key 和 secret-key 本例中采用minio 安装,实例为 minio-1668754867, bucket 名为 test 的url 连接为 http://test.minio-1668754867.minio:9000/ 可以依据具体情况修改,参见 juicefs 官方文档 + + 2. 安装 redis , 虽然 juicefs 也支持使用 sqlite 作为元数据存储, 但是需要每次从 S3 存储的元数据备份中下载元数据文件, 因此不推荐用 sqlite 作为元数据看存储数据库. redis 的连接组成方式如下: + ``` + redis://:<端口>/<数据库编号> + ``` + 在本文挡中, redis-服务名为 redis-leader, 数据库号为1, 所以redis 连接串为 `redis://redis-leader:6379/1` + + 3. 验证可用性: 假设备份的文件夹为 juicefs , 可以直接登录集群 Pod 的 backup 容器, 执行如下命令: + + ``` + juicefs format --storage s3 \ + --bucket http://test.minio-1668754867.minio:9000/ \ + --access-key \ + --secret-key > \ + redis://redis-leader:6379/1 \ + juicefs + ``` + 再执行: + `juicefs mount -d redis://redis-leader:6379/1 /juicefs` + +查看当前目录是否存在juicefs 文件夹, 写入文件, 检查 S3 存储是否有变化 + +## 填写backup crd 的 yaml 配置信息 +在 backup crd 的yaml 文件, 如 samples/mysql_v1alpha_backup.yaml 中, spec 字段下添加如下信息: + +``` + juiceOpt: + juiceMeta: <填写你的 redis url 信息> + backupSecretName: + juiceName: +``` +示例: +``` + juiceOpt: + juiceMeta: "redis://redis-leader:6379/1" + backupSecretName: sample-backup-secret + juiceName: juicefs +``` +其他信息参见[备份与恢复配置](./backup_and_restoration_s3.md) + +## 运行备份 + +使用 kubectl apply -f <你的备份yaml> , 如: + +``` + kubectl apply -f config/samples/mysql_v1alpha1_backup.yaml +``` + +# 恢复 +## 恢复集群的准备工作 + 假设要恢复的集群名称为sample2 +### 添加config map 配置 + 1. 首先给config map一个名字, 名字为 <恢复集群名称>-restore, 本文中假设要恢复的集群为sample2, 所以config map的名称为 + sample2-restore + 2. 创建config map + * juiceopt 参数准备: + 准备一个yaml文件, 名为juiceopt.yaml, 填写内容如下 + ``` + juiceMeta: + backupSecretName: + juiceName: + ``` + 例如: 本文示例中, juiceopt.yaml内容如下: + ``` + juiceMeta: "redis://redis-leader:6379/1" + backupSecretName: sample-backup-secret + juiceName: juicefs + ``` + * 使用`kubectl create configmap` 创建configmap + config map 中必须的两个key 为 `from` 和 `juice.opt` 分别表示已经备份集群名称和juice备份的参数 + 而 `date` key是可选的, 表示需要恢复的时间点(格式为:"2006-01-02 09:07:41"), 如果不选,则以当前时间点为准恢复, 使用如下命令执行: + `kubectl create configmap sample2-restore --from-literal=from=sample --from-file="juice.opt"=./juiceopt.yaml ` + + +### 配置mysql cluster 的yaml + 本例中, 需要恢复的集群为sample2, 配置方法参见[radondb cluster配置方法](./deploy_radondb-mysql_operator_on_k8s.md) +### 使用kubectl apply 应用yaml + 使用 `kubectl apply ` 应用yaml 文件, 本例中,使用: + + `kubectl apply -f config/samples/mysql_v1alpha1_mysqlcluster.yaml ` + \ No newline at end of file diff --git a/mysqlcluster/container/backup.go b/mysqlcluster/container/backup.go index 8b8e0519..a3e14ad8 100644 --- a/mysqlcluster/container/backup.go +++ b/mysqlcluster/container/backup.go @@ -38,7 +38,7 @@ func (c *backupSidecar) getImage() string { } func (c *backupSidecar) getCommand() []string { - return []string{"sidecar", "http"} + return []string{"sh", "-c", "/sshd.sh ; sidecar http"} } func (c *backupSidecar) getEnvVars() []corev1.EnvVar { @@ -147,5 +147,13 @@ func (c *backupSidecar) getVolumeMounts() []corev1.VolumeMount { Name: utils.SysLocalTimeZone, MountPath: utils.SysLocalTimeZoneMountPath, }, + { + Name: utils.SysFuseVolume, + MountPath: utils.SysFuseVolumnMountPath, + }, + { + Name: utils.SShVolumnName, + MountPath: utils.SshVolumnPath, + }, } } diff --git a/mysqlcluster/container/container.go b/mysqlcluster/container/container.go index 5f347390..a61bd6d8 100644 --- a/mysqlcluster/container/container.go +++ b/mysqlcluster/container/container.go @@ -63,6 +63,7 @@ func getStartupProbe(name string) *corev1.Probe { // EnsureContainer ensure a container by the giving name. func EnsureContainer(name string, c *mysqlcluster.MysqlCluster) corev1.Container { var ctr container + var security *corev1.SecurityContext = nil switch name { case utils.ContainerInitSidecarName: ctr = &initSidecar{c, name} @@ -80,6 +81,14 @@ func EnsureContainer(name string, c *mysqlcluster.MysqlCluster) corev1.Container ctr = &auditLog{c, name} case utils.ContainerBackupName: ctr = &backupSidecar{c, name} + needAdmin := true + security = &corev1.SecurityContext{ + Privileged: &needAdmin, + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"CAP_SYS_ADMIN", + "DAC_READ_SEARCH", + }, + }} } return corev1.Container{ @@ -95,5 +104,6 @@ func EnsureContainer(name string, c *mysqlcluster.MysqlCluster) corev1.Container ReadinessProbe: ctr.getReadinessProbe(), StartupProbe: getStartupProbe(name), VolumeMounts: ctr.getVolumeMounts(), + SecurityContext: security, } } diff --git a/mysqlcluster/mysqlcluster.go b/mysqlcluster/mysqlcluster.go index 88f00b30..9a9babf9 100644 --- a/mysqlcluster/mysqlcluster.go +++ b/mysqlcluster/mysqlcluster.go @@ -180,7 +180,7 @@ func (c *MysqlCluster) EnsureVolumes() []corev1.Volume { }, ) } - + var defMode int32 = 0600 volumes = append(volumes, corev1.Volume{ Name: utils.MysqlConfVolumeName, @@ -246,6 +246,23 @@ func (c *MysqlCluster) EnsureVolumes() []corev1.Volume { }, }, }, + corev1.Volume{ + Name: utils.SysFuseVolume, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/dev/fuse", + }, + }, + }, + corev1.Volume{ + Name: utils.SShVolumnName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: c.GetNameForResource(utils.SShKey), + DefaultMode: &defMode, + }, + }, + }, ) // add the nfs volumn mount if len(c.Spec.NFSServerAddress) != 0 { @@ -328,8 +345,12 @@ func (c *MysqlCluster) GetNameForResource(name utils.ResourceName) string { return fmt.Sprintf("%s-metrics", c.Name) case utils.Secret: return fmt.Sprintf("%s-secret", c.Name) + case utils.SShKey: + return fmt.Sprintf("%s-ssh-key", c.Name) case utils.XenonMetaData: return fmt.Sprintf("%s-xenon", c.Name) + case utils.RestoreCMN: + return fmt.Sprintf("%s-restore", c.Name) case utils.ConfigMap: if template := c.Spec.MysqlOpts.MysqlConfTemplate; template != "" { return template diff --git a/mysqlcluster/mysqlcluster_test.go b/mysqlcluster/mysqlcluster_test.go index 8efe01c2..1b9cb0fb 100644 --- a/mysqlcluster/mysqlcluster_test.go +++ b/mysqlcluster/mysqlcluster_test.go @@ -253,6 +253,7 @@ func TestGetPodHostName(t *testing.T) { } func TestEnsureVolumes(t *testing.T) { + var defMode int32 = 0600 volume := []corev1.Volume{ { Name: utils.MysqlConfVolumeName, @@ -318,6 +319,25 @@ func TestEnsureVolumes(t *testing.T) { }, }, }, + //host fuse + { + Name: utils.SysFuseVolume, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: utils.SysFuseVolumnMountPath, + }, + }, + }, + //ssh key + { + Name: utils.SShVolumnName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "sample-ssh-key", + DefaultMode: &defMode, + }, + }, + }, } // when disable Persistence { diff --git a/mysqlcluster/syncer/headless_service.go b/mysqlcluster/syncer/headless_service.go index 4de862c8..36501629 100644 --- a/mysqlcluster/syncer/headless_service.go +++ b/mysqlcluster/syncer/headless_service.go @@ -57,8 +57,8 @@ func NewHeadlessSVCSyncer(cli client.Client, c *mysqlcluster.MysqlCluster) synce // Use `publishNotReadyAddresses` to be able to access pods even if the pod is not ready. service.Spec.PublishNotReadyAddresses = true - if len(service.Spec.Ports) != 2 { - service.Spec.Ports = make([]corev1.ServicePort, 2) + if len(service.Spec.Ports) != 3 { + service.Spec.Ports = make([]corev1.ServicePort, 3) } service.Spec.Ports[0].Name = utils.MysqlPortName @@ -68,6 +68,10 @@ func NewHeadlessSVCSyncer(cli client.Client, c *mysqlcluster.MysqlCluster) synce service.Spec.Ports[1].Name = utils.XBackupPortName service.Spec.Ports[1].Port = utils.XBackupPort service.Spec.Ports[1].TargetPort = intstr.FromInt(utils.XBackupPort) + // ssh port + service.Spec.Ports[2].Name = utils.SshPortName + service.Spec.Ports[2].Port = utils.SshPort + service.Spec.Ports[2].TargetPort = intstr.FromInt(utils.SshPort) return nil }) } diff --git a/mysqlcluster/syncer/pdb.go b/mysqlcluster/syncer/pdb.go index 1c0a2afc..0e1ab8ea 100644 --- a/mysqlcluster/syncer/pdb.go +++ b/mysqlcluster/syncer/pdb.go @@ -18,7 +18,7 @@ package syncer import ( "github.com/presslabs/controller-util/pkg/syncer" - policyv1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/mysqlcluster/syncer/sfsRestoreJob.go b/mysqlcluster/syncer/sfsRestoreJob.go new file mode 100644 index 00000000..13ebdaa6 --- /dev/null +++ b/mysqlcluster/syncer/sfsRestoreJob.go @@ -0,0 +1,319 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncer + +import ( + "context" + "fmt" + "strconv" + "time" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/yaml" + + "github.com/pkg/errors" + apiv1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + "github.com/radondb/radondb-mysql-kubernetes/utils" +) + +/** Algorithm +1. check configmao exist? +2. if exist goto 3 else goto 7 +3. create pvc +4. create job restore the pvc +5. set pvc owner. +6. delete configmap +7. exit, then create statefulset +*/ + +// 1. check configmap exist? +func (s *StatefulSetSyncer) checkConfigMap(ctx context.Context) (*corev1.ConfigMap, *apiv1alpha1.JuiceOpt, error) { + // get config map + cm := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: s.GetNameForResource(utils.RestoreCMN), + Namespace: s.Namespace, + Labels: s.GetLabels(), + }, + } + err := s.cli.Get(ctx, + types.NamespacedName{Namespace: s.Namespace, + Name: s.GetNameForResource(utils.RestoreCMN)}, cm) + + if err != nil { + return nil, nil, err + } + if f, ok := cm.Data["juice.opt"]; ok { + juiceopt := &apiv1alpha1.JuiceOpt{} + if err := yaml.Unmarshal([]byte(f), juiceopt); err != nil { + return cm, nil, err + } else { + return cm, juiceopt, nil + } + } else { + return nil, nil, fmt.Errorf("do not has %s", cm.Name) + } + +} + +// 3. create pvc +func (s *StatefulSetSyncer) createPvcs(ctx context.Context) error { + logger := s.log + replicas := *s.Spec.Replicas + var i int32 + //var pvcarr []*corev1.PersistentVolumeClaim + for i = 0; i < replicas; i++ { + pvc := s.CreateOnePVC(fmt.Sprintf("data-%s-mysql-%d", s.Name, i)) + s.setPvcOwner(ctx, pvc) + // Check exist + err := s.cli.Get(ctx, types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace}, pvc) + if err != nil && k8sErrors.IsNotFound(err) { + logger.Info("Creating a new volume for restore", "Namespace", pvc.Namespace, "Name", pvc.Name) + err = s.cli.Create(ctx, pvc) + if err != nil { + return errors.Wrap(err, "create restore pvc") + } + } else if err != nil { + return errors.Wrap(err, "get restore pvc") + } + //pvcarr = append(pvcarr, pvc) + } + + return nil +} + +// get the screte data, and generate command string +func (s *StatefulSetSyncer) genCmdStr(ctx context.Context, from, fromDate string, juiceopt *apiv1alpha1.JuiceOpt) (string, error) { + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: juiceopt.BackupSecretName, + Namespace: s.Namespace, + }, + } + err := s.cli.Get(ctx, + types.NamespacedName{Namespace: s.Namespace, + Name: juiceopt.BackupSecretName}, secret) + + if err != nil { + return "", err + } + url, bucket := secret.Data["s3-endpoint"], secret.Data["s3-bucket"] + + accesskey, secretkey := secret.Data["s3-access-key"], secret.Data["s3-secret-key"] + juicebucket := utils.InstallBucket(string(url), string(bucket)) + fmt.Println(url, bucket, accesskey, secretkey, juicebucket) + cmdstr := fmt.Sprintf(` + rm -rf /var/lib/mysql/* + juicefs format --storage s3 \ + --bucket %s \ + --access-key %s \ + --secret-key %s \ + %s \ + %s`, juicebucket, accesskey, secretkey, juiceopt.JuiceMeta, juiceopt.JuiceName) + cmdstr += fmt.Sprintf(` + juicefs mount -d %s /%s/ + `, juiceopt.JuiceMeta, juiceopt.JuiceName) + cmdstr += fmt.Sprintf(` + export CLUSTER_NAME=%s + source /backup.sh + restore \"%s\" %s + juicefs umount /%s/ + touch /var/lib/mysql/restore-file + chown -R mysql.mysql /var/lib/mysql +`, from, fromDate, from, juiceopt.JuiceName) + //fmt.Println(cmdstr) + + return cmdstr, nil +} + +// create resetore job +func (s *StatefulSetSyncer) createRestoreJob(ctx context.Context) error { + var cmds string + if cm, juiceopt, err := s.checkConfigMap(ctx); err != nil { + return err + } else { + if from, ok := cm.Data["from"]; ok { + DateTime := time.Now().Format("2006-01-02 09:07:41") + if D, nice := cm.Data["date"]; nice { + // check where correct time + DateTime = D + } + if cmds, err = s.genCmdStr(ctx, from, DateTime, juiceopt); err != nil { + return err + } else { + //create pvcs + if err = s.createPvcs(ctx); err != nil { + return err + } + } + + } + } + + envs := []corev1.EnvVar{ + { + Name: "CONTAINER_TYPE", + Value: utils.ContainerBackupJobName, + }, + { + Name: "NAMESPACE", + Value: s.Namespace, + }, + { + Name: "SERVICE_NAME", + Value: fmt.Sprintf("%s-mysql", s.Name), + }, + + { + Name: "REPLICAS", + Value: "1", + }, + } + jobarr := []*batchv1.Job{} + for i := 0; i < int(*s.Spec.Replicas); i++ { + job := &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "batch/v1", + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "restore-" + strconv.Itoa(i), + Namespace: s.Namespace, + }, + } + job.Labels = map[string]string{ + + "Type": "restore", + + // Cluster used as selector. + "Cluster": s.Name, + } + + Containers := make([]corev1.Container, 1) + Containers[0].Name = "restore" + Containers[0].Image = s.Spec.PodPolicy.SidecarImage + Volumes := []corev1.Volume{ + { + Name: fmt.Sprintf("data-%s-mysql-%d", s.Name, i), + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("data-%s-mysql-%d", s.Name, i), + }, + }, + }, + { + Name: utils.SysFuseVolume, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/dev/fuse", + }, + }, + }, + } + Containers[0].VolumeMounts = []corev1.VolumeMount{ + + { + Name: fmt.Sprintf("data-%s-mysql-%d", s.Name, i), + MountPath: utils.DataVolumeMountPath, + }, + { + Name: utils.SysFuseVolume, + MountPath: utils.SysFuseVolumnMountPath, + }, + } + Containers[0].Env = envs + Containers[0].Command = []string{"bash", "-c", "-x", cmds} + Containers[0].SecurityContext = &corev1.SecurityContext{ + Privileged: func(i bool) *bool { return &i }(true), + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{"CAP_SYS_ADMIN", + "DAC_READ_SEARCH", + }, + }} + + job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + job.Spec.Template.Spec.ServiceAccountName = s.Name + job.Spec.Template.Spec.Containers = Containers + job.Spec.Template.Spec.Volumes = Volumes + + ownerRefs := s.sfs.GetOwnerReferences() + job.SetOwnerReferences(ownerRefs) + err := s.cli.Create(context.TODO(), job) + if err != nil && !k8sErrors.IsAlreadyExists(err) { + return errors.Wrap(err, "create restore job") + } else if err == nil { + jobarr = append(jobarr, job) + s.log.Info("Created a new restore job", "Namespace", job.Namespace, "Name", job.Name) + } + } + // Wait all job complete + count := 0 +retry: + for _, job := range jobarr { + if err := s.cli.Get(context.TODO(), + types.NamespacedName{Name: job.Name, + Namespace: job.Namespace}, job); err != nil { + return errors.Wrap(err, "get restore job") + } + switch { + case job.Status.Active == 1: + // it is running + s.log.Info("job is running", "Namespace", job.Namespace, "Name", job.Name) + time.Sleep(2 * time.Second) + goto retry + case job.Status.Succeeded == 1: + count++ + case job.Status.Failed >= 1: + return fmt.Errorf("restore job %s fail", job.Name) + } + } + return nil +} + +// 5. set pvc owner. +func (s *StatefulSetSyncer) setPvcOwner(ctx context.Context, pvc *corev1.PersistentVolumeClaim) error { + ownerRefs := s.sfs.GetOwnerReferences() + pvc.SetOwnerReferences(ownerRefs) + return nil +} + +func (s *StatefulSetSyncer) CreateOnePVC(name string) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: s.Namespace, + }, + Spec: s.sfs.Spec.VolumeClaimTemplates[0].Spec, + } +} diff --git a/mysqlcluster/syncer/sshSecret.go b/mysqlcluster/syncer/sshSecret.go new file mode 100644 index 00000000..0744ba64 --- /dev/null +++ b/mysqlcluster/syncer/sshSecret.go @@ -0,0 +1,59 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncer + +import ( + "github.com/presslabs/controller-util/pkg/syncer" + "github.com/radondb/radondb-mysql-kubernetes/mysqlcluster" + "github.com/radondb/radondb-mysql-kubernetes/utils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NewSecretSyncer returns secret syncer. +func NewSShKeySyncer(cli client.Client, c *mysqlcluster.MysqlCluster) syncer.Interface { + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: c.GetNameForResource(utils.SShKey), + Namespace: c.Namespace, + }, + } + + return syncer.NewObjectSyncer("Secret", c.Unwrap(), secret, cli, func() error { + + if secret.Data == nil { + secret.Data = make(map[string][]byte) + } + + if len(secret.Data["id_ecdsa"]) == 0 { + pub, priv, err := GenSSHKey() + if err != nil { + return err + } + secret.Data["id_ecdsa"] = priv + secret.Data["authorized_keys"] = pub + + } + + return nil + }) +} diff --git a/mysqlcluster/syncer/sshkey.go b/mysqlcluster/syncer/sshkey.go new file mode 100644 index 00000000..80d1afdc --- /dev/null +++ b/mysqlcluster/syncer/sshkey.go @@ -0,0 +1,101 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syncer + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "log" + + "golang.org/x/crypto/ssh" +) + +// ssh -o UserKnownHostsFile=/dev/null +func GenSSHKey() (pubkey, privekey []byte, err error) { + + bitSize := 4096 + + privateKey, err := generatePrivateKey(bitSize) + if err != nil { + return nil, nil, err + } + + publicKeyBytes, err := generatePublicKey(&privateKey.PublicKey) + if err != nil { + return nil, nil, err + } + + privateKeyBytes := encodePrivateKeyToPEM(privateKey) + + return publicKeyBytes, privateKeyBytes, err + +} + +// generatePrivateKey creates a RSA Private Key of specified byte size +func generatePrivateKey(bitSize int) (*ecdsa.PrivateKey, error) { + // Private Key generation + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + + // Validate Private Key + // err = privateKey.Validate() + // if err != nil { + // return nil, err + // } + + log.Println("Private Key generated") + return privateKey, nil +} + +// encodePrivateKeyToPEM encodes Private Key from RSA to PEM format +func encodePrivateKeyToPEM(privateKey *ecdsa.PrivateKey) []byte { + // Get ASN.1 DER format + privDER, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + panic(err) + } + // pem.Block + privBlock := pem.Block{ + Type: "EC PRIVATE KEY", + Headers: nil, + Bytes: privDER, + } + + // Private key in PEM format + privatePEM := pem.EncodeToMemory(&privBlock) + + return privatePEM +} + +// generatePublicKey take a rsa.PublicKey and return bytes suitable for writing to .pub file +// returns in the format "ssh-rsa ..." +func generatePublicKey(privatekey *ecdsa.PublicKey) ([]byte, error) { + publicKey, err := ssh.NewPublicKey(privatekey) + if err != nil { + return nil, err + } + + pubKeyBytes := ssh.MarshalAuthorizedKey(publicKey) + + log.Println("Public key generated") + return pubKeyBytes, nil +} diff --git a/mysqlcluster/syncer/statefulset.go b/mysqlcluster/syncer/statefulset.go index c885fdcd..b8a7b066 100644 --- a/mysqlcluster/syncer/statefulset.go +++ b/mysqlcluster/syncer/statefulset.go @@ -262,7 +262,10 @@ func (s *StatefulSetSyncer) createOrUpdate(ctx context.Context) (controllerutil. if err = s.mutate(); err != nil { return controllerutil.OperationResultNone, err } - + //TODO: Do the Restore job + if err = s.createRestoreJob(ctx); err != nil { + s.log.Info("do not need restore the pvcs") + } if err = s.cli.Create(ctx, s.sfs); err != nil { return controllerutil.OperationResultNone, err } else { diff --git a/mysqlcluster/syncer/statefulset_test.go b/mysqlcluster/syncer/statefulset_test.go index abb8f4db..23c6e571 100644 --- a/mysqlcluster/syncer/statefulset_test.go +++ b/mysqlcluster/syncer/statefulset_test.go @@ -17,6 +17,7 @@ limitations under the License. package syncer import ( + "fmt" "testing" appsv1 "k8s.io/api/apps/v1" @@ -145,3 +146,9 @@ func TestStatefulSetSyncer_sfsUpdated(t *testing.T) { }) } } + +func TestSecretKey(t *testing.T) { + pub, priv, _ := GenSSHKey() + fmt.Println(string(pub)) + fmt.Println(string(priv)) +} diff --git a/script/backup.sh b/script/backup.sh new file mode 100755 index 00000000..43688779 --- /dev/null +++ b/script/backup.sh @@ -0,0 +1,158 @@ +# CLUSTER_NAME=sample +BASE=/juicefs +JSONFILE=$BASE/$CLUSTER_NAME-backup.json + +function checkfile() { + if ! [ -r $JSONFILE ] ; then + jq -n --arg cluster $CLUSTER_NAME --arg namespace $NAMESPACE '{"cluster_name": $cluster, "namespace": $namespace,"backup_chains": []}' >$JSONFILE + else + echo exist the json file + fi +} +function read() { + max=0 + IFS_OLD=$IFS + IFS=$(echo -en "\n\b") + for i in $(jq -c '.backup_chains[]' $JSONFILE); + do + #echo $i | jq '.type' + val=$(echo $i | jq '."target-dir"|match("\\d+")|.string|tonumber') + #echo $val + if [[ $max < $val ]] ; then + max=$val + fi + done + IFS=$IFS_OLD + echo $max +} + +function getDate() { + date '+%Y-%m-%d %H:%M:%S' +} + +function parseDateToUnix() { + local t=$1 + + echo date -d $t '+%s'|sh +} +function checkTime() { + local time=$1 # get the parameter + val=0 + IFS_OLD=$IFS + IFS=$(echo -en "\n\b") + for i in $(jq -c '.backup_chains[]' $JSONFILE); + do + traw=$(echo $i|jq '."time"') + val=$(echo $i | jq '."target-dir"|match("\\d+")|.string|tonumber') + t=$(echo date -d $traw '+%s'|sh) + cmptime=$(echo date -d "\"$time\"" '+%s'|sh) + if [ $t -ge $cmptime ]; then + break + fi + done + IFS=$IFS_OLD + echo $val + +} + +function appendinc() { + num=$1 + incbase="$BASE/backups/base" + #echo $BASE/backups/inc$(echo $num + 1|bc) + if ! [ $num -eq 0 ]; then + incbase=$BASE/backups/inc$num + fi + jq ".backup_chains += [{\"type\": \"incr-backup\", \"time\": \"$(getDate)\", \"target-dir\": \"$BASE/backups/inc$(echo $num + 1|bc)\", + \"incremental-basedir\": \"$incbase\" }]" $JSONFILE >"tmp.json" && mv ./tmp.json $JSONFILE +} + +function appendbase() { + jq ".backup_chains += [{\"type\": \"full-backup\", \"time\": \"$(getDate)\", \"target-dir\": \"$BASE/backups/base\"}]" $JSONFILE >"tmp.json" && mv ./tmp.json $JSONFILE + sleep 2 +} + +function fullbackup() { + mkdir -p /$BASE/backups/base + xtrabackup --backup --host=127.0.0.1 --user=root --password='' --datadir=/var/lib/mysql/ --target-dir=/$BASE/backups/base + success=$? + if [ $success ]; then + appendbase + fi +} + +function incrbackup() { + num=$1 + incbase="$BASE/backups/base" + #echo $BASE/backups/inc$(echo $num + 1|bc) + if ! [ $num -eq 0 ]; then + incbase=$BASE/backups/inc$num + fi + xtrabackup --backup --host=127.0.0.1 --user=root --password='' --datadir=/var/lib/mysql/ --target-dir=$BASE/backups/inc$(echo $num + 1|bc) \ + --incremental-basedir=$incbase + success=$? + if [ $success ]; then + appendinc $num + fi +} + +function backup() { + if ! [ -r $JSONFILE ] ; then + jq -n --arg cluster $CLUSTER_NAME --arg namespace $NAMESPACE '{"cluster_name": $cluster, "namespace": $namespace,"backup_chains": []}' >$JSONFILE + sleep 3 + echo now do the fullbackup + fullbackup + else + num=$(read) + incrbackup $num + fi + +} + + +function restore() { + local restorTime=$1 + local from=$2 + jsonfile=$BASE/$from-backup.json + if [ $# -ne 2 ] ; then + echo you can use it as restore date cluster-from + fi + local total=$(checkTime $restorTime) + for index in $(seq 0 $total); do + # at restore, base always use /backups/base + base=$(jq -c ".backup_chains[0][\"target-dir\"]" $jsonfile) + type=$(jq -c ".backup_chains[$index][\"type\"]" $jsonfile) + inc=$(jq -c ".backup_chains[$index][\"target-dir\"]" $jsonfile) + cmd="" + # echo $i, $base, $type,$inc + case $type in + "\"full-backup\"") + cmd=$(echo xtrabackup --prepare --apply-log-only --target-dir=$base) + echo $cmd + echo $cmd|sh + ;; + "\"incr-backup\"") + if [ $index -eq $total ]; then + cmd=$(echo xtrabackup --prepare --target-dir=$base --incremental-dir=$inc) + else + cmd=$(echo xtrabackup --prepare --apply-log-only --target-dir=$base --incremental-dir=$inc) + fi + echo $cmd + echo $cmd|sh + ;; + *) + echo nothing + ;; + esac + done + # check /var/lib/mysql is emtpty + if ! [ -d "/var/lib/mysql/mysql" ]; then + base=$(jq -c ".backup_chains[0][\"target-dir\"]" $JSONFILE) + cmd=$(echo xtrabackup --copy-back --target-dir=$base --datadir=/var/lib/mysql) + echo $cmd + echo $cmd|sh + chown -R mysql.mysql /var/lib/mysql + else + echo the dir is not empty, cannot copy back + fi +} + diff --git a/script/sshd.sh b/script/sshd.sh new file mode 100755 index 00000000..b0a49bbc --- /dev/null +++ b/script/sshd.sh @@ -0,0 +1,4 @@ +cp /etc/secret-ssh/* /root/.ssh +chmod 600 /root/.ssh/authorized_keys +/usr/sbin/sshd -D -e -f /etc/ssh/sshd_config & +echo "start..." diff --git a/sidecar/init.go b/sidecar/init.go index 4ae83767..1e6278a0 100644 --- a/sidecar/init.go +++ b/sidecar/init.go @@ -127,6 +127,8 @@ func runCloneAndInit(cfg *Config) (bool, error) { return hasInitialized, nil } log.Info("no leader or follower found") + // for restore job create, it must set hasInitialized to true + hasInitialized, _ = checkIfPathExists(path.Join(dataPath, xrestorefile)) return hasInitialized, nil } @@ -260,6 +262,16 @@ func runInitCommand(cfg *Config, hasInitialized bool) error { if err = ioutil.WriteFile(initSqlPath, cfg.buildInitSql(hasInitialized), 0644); err != nil { return fmt.Errorf("failed to write init.sql: %s", err) } + // check restore-file exist, remove it + if exist, _ := checkIfPathExists(path.Join(dataPath, xrestorefile)); exist { + //remove the xrestorefile + cmd := exec.Command("rm", "-rf", path.Join(dataPath, xrestorefile)) + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to remove restore-file : %s", err) + } + } + // build xenon.json. xenonFilePath := path.Join(xenonPath, "xenon.json") if err = ioutil.WriteFile(xenonFilePath, cfg.buildXenonConf(), 0644); err != nil { diff --git a/sidecar/util.go b/sidecar/util.go index f48c710b..f75f2bde 100644 --- a/sidecar/util.go +++ b/sidecar/util.go @@ -69,6 +69,9 @@ var ( // xcloudCommand is the upload tool file name. xcloudCommand = "xbcloud" + + // restore file check + xrestorefile = "restore-file" ) // copyFile the src file to dst. diff --git a/utils/common.go b/utils/common.go index 0efd9c45..85386514 100644 --- a/utils/common.go +++ b/utils/common.go @@ -159,3 +159,7 @@ func ParseIPAndPath(nfsaddr string) (string, string) { return res[0], "/" } } + +func InstallBucket(url, bucket string) string { + return strings.Join(strings.Split(url, "//"), "//"+bucket+".") +} diff --git a/utils/constants.go b/utils/constants.go index 8cab16bf..e08422cb 100644 --- a/utils/constants.go +++ b/utils/constants.go @@ -88,6 +88,12 @@ const ( LogsVolumeName = "logs" DataVolumeName = "data" SysVolumeName = "host-sys" + + // just for juicefs + SysFuseVolume = "host-fuse" + SshPortName = "ssh" + SshPort = 22 + ScriptsVolumeName = "scripts" XenonConfVolumeName = "xenon-conf" InitFileVolumeName = "init-mysql" @@ -100,6 +106,8 @@ const ( LogsVolumeMountPath = "/var/log/mysql" DataVolumeMountPath = "/var/lib/mysql" SysVolumeMountPath = "/host-sys" + + SysFuseVolumnMountPath = "/dev/fuse" ScriptsVolumeMountPath = "/scripts" XenonConfVolumeMountPath = "/etc/xenon" InitFileVolumeMountPath = "/docker-entrypoint-initdb.d" @@ -129,6 +137,10 @@ const ( TlsVolumeName = "tls" // TlsMountPath is the volume mount path for tls TlsMountPath = "/etc/mysql-ssl" + + // ssh path + SShVolumnName = "ssh-key" + SshVolumnPath = "/etc/secret-ssh" ) // ResourceName is the type for aliasing resources that will be created. @@ -165,6 +177,10 @@ const ( JobAnonationDate = "backupDate" // Job Annonations type JobAnonationType = "backupType" + // SSh key + SShKey = "ssh" + // restore config + RestoreCMN = "restore" ) // JobType