From 77a61398521af56e21267cca0924f69f0c24cacd Mon Sep 17 00:00:00 2001 From: Dweb Fan Date: Mon, 13 May 2024 22:29:10 -0700 Subject: [PATCH] add storage class option Signed-off-by: Dweb Fan --- README.md | 5 ++-- clients/upload.go | 6 +++-- cmd/lomob/main.go | 48 +++++++++++++++++++++++++++++++++------ cmd/lomob/upload-files.go | 7 +++++- cmd/lomob/upload-iso.go | 21 ++++++++++------- 5 files changed, 67 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 8238bf7..dcd66ae 100644 --- a/README.md +++ b/README.md @@ -56,8 +56,8 @@ Features: - :heavy_check_mark: pack all photos/videos into multiple ISOs and upload to S3 - :heavy_check_mark: metadata to track which file is in which iso - :heavy_check_mark: backup files not in ISO to staging station, Google drive -- [ ] pack all photos/videos into multiple ISOs and upload to Glancier -- [ ] encrypt iso files before upload to Glacier, Google drive +- :heavy_check_mark: pack all photos/videos into multiple ISOs and upload to Glancier +- :heavy_check_mark: encrypt iso files before upload to Glacier, Google drive - [ ] metadata to track which files are in staging station - [ ] daemon running mode to watch folder change only, avoid scanning all folder daily - [ ] daily consistency check on staging station @@ -81,6 +81,7 @@ Also welcome to try our free Photo backup applications. https://lomorage.com. - Checksum validation during upload - Self define iso size - On the fly encryption all files as iso file size may be big, and we want to avoid intermittent file in order to save time and not require extra disks +- Original file hash and encrypted file hash are kept in cloud for future consistency check # Security Model The security model is from repository [filecrypt](https://github.com/kisom/filecrypt). Refer book [Practical Cryptography With Go](https://leanpub.com/gocrypto/read) for more detail. diff --git a/clients/upload.go b/clients/upload.go index e7f78a2..b1788dc 100644 --- a/clients/upload.go +++ b/clients/upload.go @@ -135,7 +135,7 @@ func (ac *AWSClient) createBucketIfNotExist(bucket string) error { return nil } -func (ac *AWSClient) PutObject(bucket, remotePath, checksum, fileType string, reader io.ReadSeeker) error { +func (ac *AWSClient) PutObject(bucket, remotePath, checksum, fileType, storageClass string, reader io.ReadSeeker) error { err := ac.createBucketIfNotExist(bucket) if err != nil { return err @@ -147,12 +147,13 @@ func (ac *AWSClient) PutObject(bucket, remotePath, checksum, fileType string, re ContentType: aws.String(fileType), ChecksumAlgorithm: &checksumAlgorithm, ChecksumSHA256: aws.String(checksum), + StorageClass: aws.String(storageClass), } _, err = ac.svc.PutObject(input) return err } -func (ac *AWSClient) CreateMultipartUpload(bucket, remotePath, fileType string) (*UploadRequest, error) { +func (ac *AWSClient) CreateMultipartUpload(bucket, remotePath, fileType, storageClass string) (*UploadRequest, error) { err := ac.createBucketIfNotExist(bucket) if err != nil { return nil, err @@ -163,6 +164,7 @@ func (ac *AWSClient) CreateMultipartUpload(bucket, remotePath, fileType string) Key: &remotePath, ContentType: &fileType, ChecksumAlgorithm: &checksumAlgorithm, + StorageClass: &storageClass, } resp, err := ac.svc.CreateMultipartUpload(input) diff --git a/cmd/lomob/main.go b/cmd/lomob/main.go index 773f0dc..1126061 100644 --- a/cmd/lomob/main.go +++ b/cmd/lomob/main.go @@ -2,6 +2,7 @@ package main import ( "errors" + "fmt" "os" "sync" @@ -179,10 +180,15 @@ func main() { Usage: "Save multiparts locally for debug", }, cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, + cli.StringFlag{ + Name: "storage-class", + Usage: "The type of storage to use for the object. Valid choices are: DEEP_ARCHIVE | GLACIER | GLACIER_IR | INTELLIGENT_TIERING | ONE-ZONE_IA | REDUCED_REDUNDANCY | STANDARD | STANDARD_IA.", + Value: "GLACIER_IR", + }, }, }, { @@ -206,7 +212,7 @@ func main() { Value: defaultBucket, }, cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, @@ -245,7 +251,7 @@ func main() { Value: defaultBucket, }, cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, @@ -268,7 +274,7 @@ func main() { Value: "gdrive-token.json", }, cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, @@ -354,7 +360,7 @@ func main() { ArgsUsage: "Usage: [input filename] [[output filename]]. If output filename is not given, it will be .enc", Flags: []cli.Flag{ cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, @@ -367,7 +373,7 @@ func main() { ArgsUsage: "[filename]", Flags: []cli.Flag{ cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, @@ -480,10 +486,15 @@ func main() { Value: defaultBucket, }, cli.StringFlag{ - Name: "encryt-key, k", + Name: "encrypt-key, k", Usage: "Master key to encrypt current upload file", EnvVar: "LOMOB_MASTER_KEY", }, + cli.StringFlag{ + Name: "storage-class", + Usage: "The type of storage to use for the object. Valid choices are: DEEP_ARCHIVE | GLACIER | GLACIER_IR | INTELLIGENT_TIERING | ONE-ZONE_IA | REDUCED_REDUNDANCY | STANDARD | STANDARD_IA.", + Value: "GLACIER_IR", + }, }, }, { @@ -551,3 +562,26 @@ func initDB(dbname string) (err error) { db, err = dbx.OpenDB(dbname) return err } + +func getAWSStorageClass(ctx *cli.Context) (string, error) { + c := ctx.String("storage-class") + switch c { + case "DEEP_ARCHIVE": + fallthrough + case "GLACIER": + fallthrough + case "GLACIER_IR": + fallthrough + case "REDUCED_REDUNDANCY": + fallthrough + case "INTELLIGENT_TIERING": + fallthrough + case "ONEZONE_IA": + fallthrough + case "STANDARD": + fallthrough + case "STANDARD_IA": + return c, nil + } + return "", fmt.Errorf("Invalid storage class: %s", c) +} diff --git a/cmd/lomob/upload-files.go b/cmd/lomob/upload-files.go index 215fdc1..9e1a161 100644 --- a/cmd/lomob/upload-files.go +++ b/cmd/lomob/upload-files.go @@ -201,6 +201,11 @@ func uploadFileToS3(ctx *cli.Context) error { region := ctx.String("awsBucketRegion") bucket := ctx.String("awsBucketName") + storageClass, err := getAWSStorageClass(ctx) + if err != nil { + return err + } + cli, err := clients.NewAWSClient(accessKeyID, accessKey, region) if err != nil { return err @@ -265,7 +270,7 @@ func uploadFileToS3(ctx *cli.Context) error { } fmt.Printf("Uploading file %s\n", remoteFilename) - err = cli.PutObject(bucket, remoteFilename, lomohash.CalculateHashBase64(hash), metaContentType, tmpFile) + err = cli.PutObject(bucket, remoteFilename, lomohash.CalculateHashBase64(hash), metaContentType, storageClass, tmpFile) if err != nil { fmt.Printf("Uploading file %s fail: %s\n", remoteFilename, err) } else { diff --git a/cmd/lomob/upload-iso.go b/cmd/lomob/upload-iso.go index 14afb5e..cc73dcb 100644 --- a/cmd/lomob/upload-iso.go +++ b/cmd/lomob/upload-iso.go @@ -111,7 +111,7 @@ func prepareUploadParts(isoFilename string, partSize int) (*os.File, *types.ISOI return isoFile, isoInfo, parts, db.UpdateIsoBase64Hash(isoInfo.ID, isoInfo.HashBase64) } -func prepareUploadRequest(cli *clients.AWSClient, region, bucket string, +func prepareUploadRequest(cli *clients.AWSClient, region, bucket, storageClass string, isoInfo *types.ISOInfo) (*clients.UploadRequest, error) { isoFilename := filepath.Base(isoInfo.Name) remoteInfo, err := cli.HeadObject(bucket, isoFilename) @@ -143,7 +143,7 @@ func prepareUploadRequest(cli *clients.AWSClient, region, bucket string, } // create new upload - request, err := cli.CreateMultipartUpload(bucket, isoFilename, isoContentType) + request, err := cli.CreateMultipartUpload(bucket, isoFilename, isoContentType, storageClass) if err != nil { return nil, err } @@ -191,7 +191,7 @@ func validateISOMetafile(metaFilename string, tree []byte) error { return os.WriteFile(metaFilename, tree, 0644) } -func uploadISOMetafile(cli *clients.AWSClient, bucket, isoFilename string) error { +func uploadISOMetafile(cli *clients.AWSClient, bucket, storageClass, isoFilename string) error { // TODO: create meta file if it is zero or not exist tree, err := genTreeInIso(isoFilename) if err != nil { @@ -233,7 +233,7 @@ func uploadISOMetafile(cli *clients.AWSClient, bucket, isoFilename string) error } fmt.Printf("Uploading metadata file %s\n", metaFilename) - err = cli.PutObject(bucket, filepath.Base(metaFilename), hashBase64, metaContentType, + err = cli.PutObject(bucket, filepath.Base(metaFilename), hashBase64, metaContentType, storageClass, bytes.NewReader(treeBuf)) if err != nil { fmt.Printf("Uploading metadata file %s fail: %s\n", metaFilename, err) @@ -243,7 +243,7 @@ func uploadISOMetafile(cli *clients.AWSClient, bucket, isoFilename string) error return err } -func uploadISO(accessKeyID, accessKey, region, bucket, isoFilename string, +func uploadISO(accessKeyID, accessKey, region, bucket, storageClass, isoFilename string, partSize int, saveParts bool) error { cli, err := clients.NewAWSClient(accessKeyID, accessKey, region) if err != nil { @@ -251,7 +251,7 @@ func uploadISO(accessKeyID, accessKey, region, bucket, isoFilename string, } // check metadata file firstly - err = uploadISOMetafile(cli, bucket, isoFilename) + err = uploadISOMetafile(cli, bucket, storageClass, isoFilename) if err != nil { return err } @@ -262,7 +262,7 @@ func uploadISO(accessKeyID, accessKey, region, bucket, isoFilename string, } defer isoFile.Close() - request, err := prepareUploadRequest(cli, region, bucket, isoInfo) + request, err := prepareUploadRequest(cli, region, bucket, storageClass, isoInfo) if err != nil { return err } @@ -368,8 +368,13 @@ func uploadISOs(ctx *cli.Context) error { return errors.New("Please supply one iso file name at least, or -a to upload all files not uploaded") } + storageClass, err := getAWSStorageClass(ctx) + if err != nil { + return err + } + for _, isoFilename := range ctx.Args() { - err = uploadISO(accessKeyID, secretAccessKey, region, bucket, + err = uploadISO(accessKeyID, secretAccessKey, region, bucket, storageClass, isoFilename, int(partSize), saveParts) if err != nil { return err