Skip to content

Commit

Permalink
chore: be more strict about storage limits
Browse files Browse the repository at this point in the history
  • Loading branch information
neurosnap committed Jan 24, 2024
1 parent 1370580 commit 39f3aaa
Show file tree
Hide file tree
Showing 5 changed files with 142 additions and 118 deletions.
97 changes: 0 additions & 97 deletions filehandlers/assets/asset.go

This file was deleted.

145 changes: 124 additions & 21 deletions filehandlers/assets/handler.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package uploadassets

import (
"bytes"
"encoding/binary"
"fmt"
"io"
Expand All @@ -21,7 +22,7 @@ import (
type ctxUserKey struct{}
type ctxFeatureFlagKey struct{}
type ctxBucketKey struct{}
type ctxBucketQuotaKey struct{}
type ctxStorageSizeKey struct{}
type ctxProjectKey struct{}

func getProject(s ssh.Session) *db.Project {
Expand Down Expand Up @@ -49,8 +50,15 @@ func getFeatureFlag(s ssh.Session) (*db.FeatureFlag, error) {
return ff, nil
}

func getBucketQuota(s ssh.Session) uint64 {
return s.Context().Value(ctxBucketQuotaKey{}).(uint64)
func getStorageSize(s ssh.Session) uint64 {
return s.Context().Value(ctxStorageSizeKey{}).(uint64)
}

func incrementStorageSize(s ssh.Session, fileSize uint64) uint64 {
curSize := getStorageSize(s)
nextStorageSize := curSize + fileSize
s.Context().SetValue(ctxStorageSizeKey{}, nextStorageSize)
return nextStorageSize
}

func getUser(s ssh.Session) (*db.User, error) {
Expand All @@ -63,11 +71,12 @@ func getUser(s ssh.Session) (*db.User, error) {

type FileData struct {
*utils.FileEntry
Text []byte
User *db.User
Bucket storage.Bucket
BucketQuota uint64
FeatureFlag *db.FeatureFlag
Text []byte
User *db.User
Bucket storage.Bucket
StorageSize uint64
FeatureFlag *db.FeatureFlag
DeltaFileSize uint64
}

type UploadAssetHandler struct {
Expand Down Expand Up @@ -200,12 +209,12 @@ func (h *UploadAssetHandler) Validate(s ssh.Session) error {
}
s.Context().SetValue(ctxBucketKey{}, bucket)

totalFileSize, err := h.Storage.GetBucketQuota(bucket)
totalStorageSize, err := h.Storage.GetBucketQuota(bucket)
if err != nil {
return err
}
s.Context().SetValue(ctxBucketQuotaKey{}, totalFileSize)
h.Cfg.Logger.Infof("(%s) bucket size is current (%d bytes)", user.Name, totalFileSize)
s.Context().SetValue(ctxStorageSizeKey{}, totalStorageSize)
h.Cfg.Logger.Infof("(%s) bucket size is current (%d bytes)", user.Name, totalStorageSize)

s.Context().SetValue(ctxUserKey{}, user)
h.Cfg.Logger.Infof("(%s) attempting to upload files to (%s)", user.Name, h.Cfg.Space)
Expand Down Expand Up @@ -262,40 +271,134 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *utils.FileEntry) (strin
s.Context().SetValue(ctxProjectKey{}, project)
}

bucketQuota := getBucketQuota(s)
storageSize := getStorageSize(s)
featureFlag, err := getFeatureFlag(s)
if err != nil {
return "", err
}
// calculate the filsize difference between the same file already
// stored and the updated file being uploaded
assetFilename := shared.GetAssetFileName(entry)
curFileSize, _ := h.Storage.GetFileSize(bucket, assetFilename)
deltaFileSize := curFileSize - entry.Size

data := &FileData{
FileEntry: entry,
User: user,
Text: origText,
Bucket: bucket,
BucketQuota: bucketQuota,
FeatureFlag: featureFlag,
FileEntry: entry,
User: user,
Text: origText,
Bucket: bucket,
StorageSize: storageSize,
FeatureFlag: featureFlag,
DeltaFileSize: uint64(deltaFileSize),
}
err = h.writeAsset(data)
if err != nil {
h.Cfg.Logger.Error(err)
return "", err
}
nextStorageSize := incrementStorageSize(s, uint64(deltaFileSize))

url := h.Cfg.AssetURL(
user.Name,
projectName,
strings.Replace(data.Filepath, "/"+projectName+"/", "", 1),
)

totalFileSize := bucketQuota
maxSize := int(featureFlag.Data.StorageMax)
str := fmt.Sprintf(
"%s (space: %.2f/%.2fGB, %.2f%%)",
url,
shared.BytesToGB(int(totalFileSize)),
shared.BytesToGB(int(nextStorageSize)),
shared.BytesToGB(maxSize),
(float32(totalFileSize)/float32(maxSize))*100,
(float32(nextStorageSize)/float32(maxSize))*100,
)

return str, nil
}

func (h *UploadAssetHandler) validateAsset(data *FileData) (bool, error) {
storageMax := data.FeatureFlag.Data.StorageMax
if data.StorageSize+data.DeltaFileSize >= storageMax {
return false, fmt.Errorf(
"ERROR: user (%s) has exceeded (%d bytes) max (%d bytes)",
data.User.Name,
data.StorageSize,
storageMax,
)
}

projectName := shared.GetProjectName(data.FileEntry)
if projectName == "" || projectName == "/" || projectName == "." {
return false, fmt.Errorf("ERROR: invalid project name, you must copy files to a non-root folder (e.g. pgs.sh:/project-name)")
}

fileSize := uint64(data.Size)
fname := filepath.Base(data.Filepath)
fileMax := data.FeatureFlag.Data.FileMax
if fileSize > fileMax {
return false, fmt.Errorf("ERROR: file (%s) has exceeded maximum file size (%d bytes)", fname, fileMax)
}

// ".well-known" is a special case
if strings.Contains(fname, "/.well-known/") {
if shared.IsTextFile(string(data.Text)) {
return true, nil
} else {
return false, fmt.Errorf("(%s) not a utf-8 text file", data.Filepath)
}
}

// special file we use for custom routing
if fname == "_redirects" {
return true, nil
}

if !shared.IsExtAllowed(fname, h.Cfg.AllowedExt) {
extStr := strings.Join(h.Cfg.AllowedExt, ",")
err := fmt.Errorf(
"ERROR: (%s) invalid file, format must be (%s), skipping",
fname,
extStr,
)
return false, err
}

return true, nil
}

func (h *UploadAssetHandler) writeAsset(data *FileData) error {
valid, err := h.validateAsset(data)
if !valid {
return err
}

assetFilename := shared.GetAssetFileName(data.FileEntry)

if data.Size == 0 {
err = h.Storage.DeleteFile(data.Bucket, assetFilename)
if err != nil {
return err
}
} else {
reader := bytes.NewReader(data.Text)

h.Cfg.Logger.Infof(
"(%s) uploading to (bucket: %s) (%s)",
data.User.Name,
data.Bucket.Name,
assetFilename,
)

_, err := h.Storage.PutFile(
data.Bucket,
assetFilename,
utils.NopReaderAtCloser(reader),
data.FileEntry,
)
if err != nil {
return err
}
}

return nil
}
9 changes: 9 additions & 0 deletions shared/storage/fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,15 @@ func (s *StorageFS) DeleteBucket(bucket Bucket) error {
return os.RemoveAll(bucket.Path)
}

func (s *StorageFS) GetFileSize(bucket Bucket, fpath string) (int64, error) {
fi, err := os.Stat(filepath.Join(bucket.Path, fpath))
if err != nil {
return 0, err
}
size := fi.Size()
return size, nil
}

func (s *StorageFS) GetFile(bucket Bucket, fpath string) (utils.ReaderAtCloser, int64, time.Time, error) {
dat, err := os.Open(filepath.Join(bucket.Path, fpath))
if err != nil {
Expand Down
8 changes: 8 additions & 0 deletions shared/storage/minio.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,14 @@ func (s *StorageMinio) DeleteBucket(bucket Bucket) error {
return s.Client.RemoveBucket(context.TODO(), bucket.Name)
}

func (s *StorageMinio) GetFileSize(bucket Bucket, fpath string) (int64, error) {
info, err := s.Client.StatObject(context.Background(), bucket.Name, fpath, minio.StatObjectOptions{})
if err != nil {
return 0, err
}
return info.Size, nil
}

func (s *StorageMinio) GetFile(bucket Bucket, fpath string) (utils.ReaderAtCloser, int64, time.Time, error) {
modTime := time.Time{}

Expand Down
1 change: 1 addition & 0 deletions shared/storage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ type ObjectStorage interface {

DeleteBucket(bucket Bucket) error
GetBucketQuota(bucket Bucket) (uint64, error)
GetFileSize(bucket Bucket, fpath string) (int64, error)
GetFile(bucket Bucket, fpath string) (utils.ReaderAtCloser, int64, time.Time, error)
ServeFile(bucket Bucket, fpath string, ratio *Ratio, original bool, useProxy bool) (io.ReadCloser, string, error)
PutFile(bucket Bucket, fpath string, contents utils.ReaderAtCloser, entry *utils.FileEntry) (string, error)
Expand Down

0 comments on commit 39f3aaa

Please sign in to comment.