Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add option for disabling multipart uploads to S3 #583

Merged
merged 1 commit into from
May 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
* New datastore option to ignore Redis cache when downloading media served by a `publicBaseUrl`. This can help ensure more requests get redirected to the CDN.
* `HEAD /download` is now supported, as per [MSC4120](https://github.com/matrix-org/matrix-spec-proposals/pull/4120).
* S3 datastores can now specify a `prefixLength` to improve S3 performance on some providers. See `config.sample.yaml` for details.
* Add `multipartUploads` flag for running MMR against unsupported S3 providers. See `config.sample.yaml` for details.

### Fixed

Expand Down
6 changes: 6 additions & 0 deletions config.sample.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,12 @@ datastores:
# help improve download speeds in some S3 providers. Should not be set to higher than
# 16 to avoid future incompatibilities with MMR. Defaults to zero (no prefix).
#prefixLength: 3
# Set to false to disable multipart uploads. This may be required for some *unsupported*
# S3 providers. Note that performance may be significantly degraded with this option set
# to false - installations with multipart uploads disabled will not receive support,
# particularly for performance concerns. If you are using AWS, DigitalOcean Spaces, or
# MinIO, you do not need to set or change this option - your environment is supported.
#multipartUploads: true

# Options for controlling archives. Archives are exports of a particular user's content for
# the purpose of GDPR or moving media to a different server.
Expand Down
8 changes: 8 additions & 0 deletions datastores/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ type s3 struct {
publicBaseUrl string
redirectWhenCached bool
prefixLength int
multipartUploads bool
}

func ResetS3Clients() {
Expand All @@ -44,6 +45,7 @@ func getS3(ds config.DatastoreConfig) (*s3, error) {
publicBaseUrl := ds.Options["publicBaseUrl"]
redirectWhenCachedStr, hasRedirectWhenCached := ds.Options["redirectWhenCached"]
prefixLengthStr, hasPrefixLength := ds.Options["prefixLength"]
useMultipartStr, hasMultipart := ds.Options["multipartUploads"]

if !hasStorageClass {
storageClass = "STANDARD"
Expand All @@ -54,6 +56,11 @@ func getS3(ds config.DatastoreConfig) (*s3, error) {
useSsl, _ = strconv.ParseBool(useSslStr)
}

useMultipart := true
if hasMultipart && useMultipartStr != "" {
useMultipart, _ = strconv.ParseBool(useMultipartStr)
}

redirectWhenCached := false
if hasRedirectWhenCached && redirectWhenCachedStr != "" {
redirectWhenCached, _ = strconv.ParseBool(redirectWhenCachedStr)
Expand Down Expand Up @@ -88,6 +95,7 @@ func getS3(ds config.DatastoreConfig) (*s3, error) {
publicBaseUrl: publicBaseUrl,
redirectWhenCached: redirectWhenCached,
prefixLength: prefixLength,
multipartUploads: useMultipart,
}
s3clients.Store(ds.Id, s3c)
return s3c, nil
Expand Down
6 changes: 5 additions & 1 deletion datastores/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,11 @@ func Upload(ctx rcontext.RequestContext, ds config.DatastoreConfig, data io.Read

metrics.S3Operations.With(prometheus.Labels{"operation": "PutObject"}).Inc()
var info minio.UploadInfo
info, err = s3c.client.PutObject(ctx.Context, s3c.bucket, objectName, tee, size, minio.PutObjectOptions{StorageClass: s3c.storageClass, ContentType: contentType})
info, err = s3c.client.PutObject(ctx.Context, s3c.bucket, objectName, tee, size, minio.PutObjectOptions{
StorageClass: s3c.storageClass,
ContentType: contentType,
DisableMultipart: !s3c.multipartUploads,
})
uploadedBytes = info.Size
} else if ds.Type == "file" {
basePath := ds.Options["path"]
Expand Down