From ddd0c99a2e2e59689f8bb68156771740e8a9a114 Mon Sep 17 00:00:00 2001 From: dantb Date: Tue, 28 Nov 2023 16:29:25 +0100 Subject: [PATCH] Add support for bulk copying files to another project --- .../delta/plugins/storage/files/Files.scala | 92 +++++++++++++--- .../files/model/CopyFileDestination.scala | 27 +++++ .../storage/files/model/FileAttributes.scala | 2 +- .../plugins/storage/files/model/FileId.scala | 1 + .../storage/files/model/FileRejection.scala | 20 ++++ .../files/routes/CopyFilePayload.scala | 36 ++++++ .../storage/files/routes/FilesRoutes.scala | 103 ++++++++++++------ .../storage/storages/model/Storage.scala | 8 +- .../storages/model/StorageRejection.scala | 5 +- .../storages/operations/CopyFile.scala | 25 +++++ .../operations/StorageFileRejection.scala | 16 +++ .../operations/disk/DiskStorageCopyFile.scala | 32 ++++++ .../remote/RemoteDiskStorageCopyFile.scala | 33 ++++++ .../client/RemoteDiskStorageClient.scala | 34 ++++++ .../errors/tag-and-rev-copy-error.json | 5 + .../plugins/storage/files/FileFixtures.scala | 62 ++++++----- .../plugins/storage/files/FilesSpec.scala | 64 ++++++++++- .../files/routes/FilesRoutesSpec.scala | 78 ++++++++++++- .../client/RemoteStorageClientSpec.scala | 4 +- .../docs/delta/api/assets/files/copy-put.json | 34 ++++++ .../docs/delta/api/assets/files/copy-put.sh | 10 ++ .../main/paradox/docs/delta/api/files-api.md | 58 +++++++++- .../bluebrain/nexus/tests/HttpClient.scala | 40 +++++-- .../nexus/tests/kg/CopyFileSpec.scala | 52 +++++++++ .../nexus/tests/kg/DiskStorageSpec.scala | 4 +- .../nexus/tests/kg/RemoteStorageSpec.scala | 4 +- .../nexus/tests/kg/S3StorageSpec.scala | 2 +- .../tests/kg/SearchConfigIndexingSpec.scala | 6 +- .../nexus/tests/kg/StorageSpec.scala | 12 +- 29 files changed, 748 insertions(+), 121 deletions(-) create mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala create mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFilePayload.scala create mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/CopyFile.scala create mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFile.scala create mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFile.scala create mode 100644 delta/plugins/storage/src/test/resources/errors/tag-and-rev-copy-error.json create mode 100644 docs/src/main/paradox/docs/delta/api/assets/files/copy-put.json create mode 100644 docs/src/main/paradox/docs/delta/api/assets/files/copy-put.sh create mode 100644 tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/CopyFileSpec.scala diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala index fb575da2b3..3f0dcc81ca 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala @@ -3,7 +3,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files import akka.actor.typed.ActorSystem import akka.actor.{ActorSystem => ClassicActorSystem} import akka.http.scaladsl.model.ContentTypes.`application/octet-stream` -import akka.http.scaladsl.model.{ContentType, HttpEntity, Uri} +import akka.http.scaladsl.model.{BodyPartEntity, ContentType, HttpEntity, Uri} import cats.effect.{Clock, IO} import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.kernel.cache.LocalCache @@ -19,9 +19,9 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.schemas.{files => fileSchema} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.{RemoteDiskStorageConfig, StorageTypeConfig} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{StorageFetchRejection, StorageIsDeprecated} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{DifferentStorageType, InvalidStorageType, StorageFetchRejection, StorageIsDeprecated} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, Storage, StorageRejection, StorageType} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchAttributeRejection, FetchFileRejection, SaveFileRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{CopyFileRejection, FetchAttributeRejection, FetchFileRejection, SaveFileRejection} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{Storages, StoragesStatistics} @@ -195,6 +195,57 @@ final class Files( } yield res }.span("createLink") + /** + * Create a file from a source file potentially in a different organization + * @param sourceId + * File lookup id for the source file + * @param dest + * Project, storage and file details for the file we're creating + */ + def copyTo( + sourceId: FileId, + dest: CopyFileDestination + )(implicit c: Caller): IO[FileResource] = { + for { + file <- fetchSourceFile(sourceId) + (pc, destStorageRef, destStorage) <- fetchDestinationStorage(dest) + _ <- validateStorageTypeForCopy(file.storageType, destStorage) + space <- fetchStorageAvailableSpace(destStorage) + _ <- IO.raiseUnless(space.exists(_ < file.attributes.bytes))( + FileTooLarge(destStorage.storageValue.maxFileSize, space) + ) + iri <- dest.fileId.fold(generateId(pc))(FileId(_, dest.project).expandIri(fetchContext.onCreate).map(_._1)) + destinationDesc <- FileDescription(dest.filename.getOrElse(file.attributes.filename), file.attributes.mediaType) + attributes <- CopyFile(destStorage, remoteDiskStorageClient).apply(file.attributes, destinationDesc).adaptError { + case r: CopyFileRejection => CopyRejection(file.id, file.storage.iri, destStorage.id, r) + } + res <- eval(CreateFile(iri, dest.project, destStorageRef, destStorage.tpe, attributes, c.subject, dest.tag)) + } yield res + }.span("copyFile") + + private def fetchSourceFile(id: FileId)(implicit c: Caller) = + for { + file <- fetch(id) + sourceStorage <- storages.fetch(file.value.storage, id.project) + _ <- validateAuth(id.project, sourceStorage.value.storageValue.readPermission) + } yield file.value + + private def fetchDestinationStorage(dest: CopyFileDestination)(implicit c: Caller) = + for { + pc <- fetchContext.onCreate(dest.project) + (destStorageRef, destStorage) <- fetchActiveStorage(dest.storage, dest.project, pc) + } yield (pc, destStorageRef, destStorage) + + private def validateStorageTypeForCopy(source: StorageType, destination: Storage): IO[Unit] = + IO.raiseWhen(source == StorageType.S3Storage)( + WrappedStorageRejection( + InvalidStorageType(destination.id, source, Set(StorageType.DiskStorage, StorageType.RemoteDiskStorage)) + ) + ) >> + IO.raiseUnless(source == destination.tpe)( + WrappedStorageRejection(DifferentStorageType(destination.id, found = destination.tpe, expected = source)) + ) + /** * Update an existing file * @@ -456,20 +507,31 @@ final class Files( private def extractFileAttributes(iri: Iri, entity: HttpEntity, storage: Storage): IO[FileAttributes] = for { - storageAvailableSpace <- storage.storageValue.capacity.fold(IO.none[Long]) { capacity => - storagesStatistics - .get(storage.id, storage.project) - .redeem( - _ => Some(capacity), - stat => Some(capacity - stat.spaceUsed) - ) - } - (description, source) <- formDataExtractor(iri, entity, storage.storageValue.maxFileSize, storageAvailableSpace) - attributes <- SaveFile(storage, remoteDiskStorageClient, config) - .apply(description, source) - .adaptError { case e: SaveFileRejection => SaveRejection(iri, storage.id, e) } + (description, source) <- extractFormData(iri, storage, entity) + attributes <- saveFile(iri, storage, description, source) } yield attributes + private def extractFormData(iri: Iri, storage: Storage, entity: HttpEntity): IO[(FileDescription, BodyPartEntity)] = + for { + storageAvailableSpace <- fetchStorageAvailableSpace(storage) + (description, source) <- formDataExtractor(iri, entity, storage.storageValue.maxFileSize, storageAvailableSpace) + } yield (description, source) + + private def saveFile(iri: Iri, storage: Storage, description: FileDescription, source: BodyPartEntity) = + SaveFile(storage, remoteDiskStorageClient, config) + .apply(description, source) + .adaptError { case e: SaveFileRejection => SaveRejection(iri, storage.id, e) } + + private def fetchStorageAvailableSpace(storage: Storage): IO[Option[Long]] = + storage.storageValue.capacity.fold(IO.none[Long]) { capacity => + storagesStatistics + .get(storage.id, storage.project) + .redeem( + _ => Some(capacity), + stat => Some(capacity - stat.spaceUsed) + ) + } + private def expandStorageIri(segment: IdSegment, pc: ProjectContext): IO[Iri] = Storages.expandIri(segment, pc).adaptError { case s: StorageRejection => WrappedStorageRejection(s) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala new file mode 100644 index 0000000000..8cce82a5b6 --- /dev/null +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/CopyFileDestination.scala @@ -0,0 +1,27 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model + +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag + +/** + * Details of the file we're creating in the copy + * + * @param project + * Orgnization and project for the new file + * @param fileId + * Optional identifier for the new file + * @param storage + * Optional storage for the new file which must have the same type as the source file's storage + * @param tag + * Optional tag to create the new file with + * @param filename + * Optional filename for the new file. If omitted, the source filename will be used + */ +final case class CopyFileDestination( + project: ProjectRef, + fileId: Option[IdSegment], + storage: Option[IdSegment], + tag: Option[UserTag], + filename: Option[String] +) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileAttributes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileAttributes.scala index 426d9334e2..65c61683a4 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileAttributes.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileAttributes.scala @@ -28,7 +28,7 @@ import scala.annotation.nowarn * @param mediaType * the optional media type of the file * @param bytes - * the size of the file file in bytes + * the size of the file in bytes * @param digest * the digest information of the file * @param origin diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala index 0af0c34bd0..a55f658f1f 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileId.scala @@ -19,6 +19,7 @@ object FileId { def apply(ref: ResourceRef, project: ProjectRef): FileId = FileId(IdSegmentRef(ref), project) def apply(id: IdSegment, tag: UserTag, project: ProjectRef): FileId = FileId(IdSegmentRef(id, tag), project) def apply(id: IdSegment, rev: Int, project: ProjectRef): FileId = FileId(IdSegmentRef(id, rev), project) + def apply(id: IdSegment, project: ProjectRef): FileId = FileId(IdSegmentRef(id), project) val iriExpander: ExpandIri[InvalidFileId] = new ExpandIri(InvalidFileId.apply) } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala index 2c6a60f187..b54d2efa1f 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala @@ -16,6 +16,7 @@ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.HttpResponseFields import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.RdfRejectionHandler.all._ +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext import ch.epfl.bluebrain.nexus.delta.sdk.syntax.httpResponseFieldsSyntax import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef @@ -147,6 +148,12 @@ object FileRejection { s"Linking a file '$id' cannot be performed without a 'filename' or a 'path' that does not end with a filename." ) + /** + * Rejection returned when attempting to fetch a file and including both the target tag and revision. + */ + final case class InvalidFileLookup(id: IdSegment) + extends FileRejection(s"Only one of 'tag' and 'rev' can be used to lookup file '$id'.") + /** * Rejection returned when attempting to create/update a file with a Multipart/Form-Data payload that does not * contain a ''file'' fieldName @@ -235,6 +242,19 @@ object FileRejection { final case class LinkRejection(id: Iri, storageId: Iri, rejection: StorageFileRejection) extends FileRejection(s"File '$id' could not be linked using storage '$storageId'", Some(rejection.loggedDetails)) + /** + * Rejection returned when interacting with the storage operations bundle to copy a file already in storage + */ + final case class CopyRejection( + sourceId: Iri, + sourceStorageId: Iri, + destStorageId: Iri, + rejection: StorageFileRejection + ) extends FileRejection( + s"File '$sourceId' could not be copied from storage '$sourceStorageId' to storage '$destStorageId'", + Some(rejection.loggedDetails) + ) + /** * Signals a rejection caused when interacting with other APIs when fetching a resource */ diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFilePayload.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFilePayload.scala new file mode 100644 index 0000000000..6ef65bed38 --- /dev/null +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/CopyFilePayload.scala @@ -0,0 +1,36 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes + +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileId +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.InvalidFileLookup +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag +import io.circe.Decoder + +final case class CopyFilePayload( + destFilename: Option[String], + sourceProj: ProjectRef, + sourceFile: IdSegment, + sourceTag: Option[UserTag], + sourceRev: Option[Int] +) { + def toSourceFileId: Either[InvalidFileLookup, FileId] = (sourceTag, sourceRev) match { + case (Some(tag), None) => Right(FileId(sourceFile, tag, sourceProj)) + case (None, Some(rev)) => Right(FileId(sourceFile, rev, sourceProj)) + case (None, None) => Right(FileId(sourceFile, sourceProj)) + case (Some(_), Some(_)) => Left(InvalidFileLookup(sourceFile)) + } +} + +object CopyFilePayload { + + implicit val dec: Decoder[CopyFilePayload] = Decoder.instance { cur => + for { + destFilename <- cur.get[Option[String]]("destinationFilename") + sourceProj <- cur.get[ProjectRef]("sourceProjectRef") + sourceFileId <- cur.get[String]("sourceFileId").map(IdSegment(_)) + sourceTag <- cur.get[Option[UserTag]]("sourceTag") + sourceRev <- cur.get[Option[Int]]("sourceRev") + } yield CopyFilePayload(destFilename, sourceProj, sourceFileId, sourceTag, sourceRev) + } +} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala index 7686829d3a..f44a284601 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala @@ -5,10 +5,11 @@ import akka.http.scaladsl.model.Uri.Path import akka.http.scaladsl.model.headers.Accept import akka.http.scaladsl.model.{ContentType, MediaRange} import akka.http.scaladsl.server._ +import cats.data.EitherT import cats.effect.IO import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{File, FileId, FileRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{CopyFileDestination, File, FileId, FileRejection} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.permissions.{read => Read, write => Write} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FilesRoutes._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{schemas, FileResource, Files} @@ -27,6 +28,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.model.routes.Tag import ch.epfl.bluebrain.nexus.delta.sdk.model.{BaseUri, IdSegment} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import io.circe.Decoder import io.circe.generic.extras.Configuration @@ -71,9 +73,9 @@ final class FilesRoutes( (baseUriPrefix(baseUri.prefix) & replaceUri("files", schemas.files)) { pathPrefix("files") { extractCaller { implicit caller => - resolveProjectRef.apply { ref => + resolveProjectRef.apply { projectRef => implicit class IndexOps(io: IO[FileResource]) { - def index(m: IndexingMode): IO[FileResource] = io.flatTap(self.index(ref, _, m)) + def index(m: IndexingMode): IO[FileResource] = io.flatTap(self.index(projectRef, _, m)) } concat( @@ -87,30 +89,68 @@ final class FilesRoutes( emit( Created, files - .createLink(storage, ref, filename, mediaType, path, tag) + .createLink(storage, projectRef, filename, mediaType, path, tag) .index(mode) .attemptNarrow[FileRejection] ) }, + // Create a file by copying from another project, without id segment + entity(as[CopyFilePayload]) { c: CopyFilePayload => + val copyTo = CopyFileDestination(projectRef, None, storage, tag, c.destFilename) + + emit(Created, copyFile(projectRef, mode, c, copyTo)) + }, // Create a file without id segment extractRequestEntity { entity => emit( Created, - files.create(storage, ref, entity, tag).index(mode).attemptNarrow[FileRejection] + files.create(storage, projectRef, entity, tag).index(mode).attemptNarrow[FileRejection] ) } ) } }, (idSegment & indexingMode) { (id, mode) => - val fileId = FileId(id, ref) + val fileId = FileId(id, projectRef) concat( pathEndOrSingleSlash { operationName(s"$prefixSegment/files/{org}/{project}/{id}") { concat( (put & pathEndOrSingleSlash) { - parameters("rev".as[Int].?, "storage".as[IdSegment].?, "tag".as[UserTag].?) { - case (None, storage, tag) => + concat( + // Create a file by copying from another project + parameters("storage".as[IdSegment].?, "tag".as[UserTag].?) { case (destStorage, destTag) => + entity(as[CopyFilePayload]) { c: CopyFilePayload => + val copyTo = + CopyFileDestination(projectRef, Some(id), destStorage, destTag, c.destFilename) + + emit(Created, copyFile(projectRef, mode, c, copyTo)) + } + }, + parameters("rev".as[Int], "storage".as[IdSegment].?, "tag".as[UserTag].?) { + case (rev, storage, tag) => + concat( + // Update a Link + entity(as[LinkFile]) { case LinkFile(filename, mediaType, path) => + emit( + files + .updateLink(fileId, storage, filename, mediaType, path, rev, tag) + .index(mode) + .attemptNarrow[FileRejection] + ) + }, + // Update a file + extractRequestEntity { entity => + emit( + files + .update(fileId, storage, rev, entity, tag) + .index(mode) + .attemptNarrow[FileRejection] + ) + } + ) + }, + parameters("storage".as[IdSegment].?, "tag".as[UserTag].?) { case (storage, tag) => concat( // Link a file with id segment entity(as[LinkFile]) { case LinkFile(filename, mediaType, path) => @@ -126,36 +166,19 @@ final class FilesRoutes( extractRequestEntity { entity => emit( Created, - files.create(fileId, storage, entity, tag).index(mode).attemptNarrow[FileRejection] - ) - } - ) - case (Some(rev), storage, tag) => - concat( - // Update a Link - entity(as[LinkFile]) { case LinkFile(filename, mediaType, path) => - emit( - files - .updateLink(fileId, storage, filename, mediaType, path, rev, tag) - .index(mode) - .attemptNarrow[FileRejection] - ) - }, - // Update a file - extractRequestEntity { entity => - emit( files - .update(fileId, storage, rev, entity, tag) + .create(fileId, storage, entity, tag) .index(mode) .attemptNarrow[FileRejection] ) } ) - } + } + ) }, // Deprecate a file (delete & parameter("rev".as[Int])) { rev => - authorizeFor(ref, Write).apply { + authorizeFor(projectRef, Write).apply { emit( files .deprecate(fileId, rev) @@ -168,7 +191,7 @@ final class FilesRoutes( // Fetch a file (get & idSegmentRef(id)) { id => - emitOrFusionRedirect(ref, id, fetch(FileId(id, ref))) + emitOrFusionRedirect(projectRef, id, fetch(FileId(id, projectRef))) } ) } @@ -177,9 +200,9 @@ final class FilesRoutes( operationName(s"$prefixSegment/files/{org}/{project}/{id}/tags") { concat( // Fetch a file tags - (get & idSegmentRef(id) & pathEndOrSingleSlash & authorizeFor(ref, Read)) { id => + (get & idSegmentRef(id) & pathEndOrSingleSlash & authorizeFor(projectRef, Read)) { id => emit( - fetchMetadata(FileId(id, ref)) + fetchMetadata(FileId(id, projectRef)) .map(_.value.tags) .attemptNarrow[FileRejection] .rejectOn[FileNotFound] @@ -187,7 +210,7 @@ final class FilesRoutes( }, // Tag a file (post & parameter("rev".as[Int]) & pathEndOrSingleSlash) { rev => - authorizeFor(ref, Write).apply { + authorizeFor(projectRef, Write).apply { entity(as[Tag]) { case Tag(tagRev, tag) => emit( Created, @@ -198,7 +221,7 @@ final class FilesRoutes( }, // Delete a tag (tagLabel & delete & parameter("rev".as[Int]) & pathEndOrSingleSlash & authorizeFor( - ref, + projectRef, Write )) { (tag, rev) => emit( @@ -213,7 +236,7 @@ final class FilesRoutes( } }, (pathPrefix("undeprecate") & put & parameter("rev".as[Int])) { rev => - authorizeFor(ref, Write).apply { + authorizeFor(projectRef, Write).apply { emit( files .undeprecate(fileId, rev) @@ -231,6 +254,16 @@ final class FilesRoutes( } } + private def copyFile(projectRef: ProjectRef, mode: IndexingMode, c: CopyFilePayload, copyTo: CopyFileDestination)( + implicit caller: Caller + ): IO[Either[FileRejection, FileResource]] = + (for { + _ <- EitherT.right(aclCheck.authorizeForOr(c.sourceProj, Read)(AuthorizationFailed(c.sourceProj.project, Read))) + sourceFileId <- EitherT.fromEither[IO](c.toSourceFileId) + result <- EitherT(files.copyTo(sourceFileId, copyTo).attemptNarrow[FileRejection]) + _ <- EitherT.right[FileRejection](index(projectRef, result, mode)) + } yield result).value + def fetch(id: FileId)(implicit caller: Caller): Route = (headerValueByType(Accept) & varyAcceptHeaders) { case accept if accept.mediaRanges.exists(metadataMediaRanges.contains) => diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala index 35a256462b..d22d6f816e 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala @@ -4,11 +4,11 @@ import akka.actor.ActorSystem import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.Metadata import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.{DiskStorageFetchFile, DiskStorageSaveFile} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.{DiskStorageCopyFile, DiskStorageFetchFile, DiskStorageSaveFile} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.{S3StorageFetchFile, S3StorageLinkFile, S3StorageSaveFile} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.{FetchAttributes, FetchFile, LinkFile, SaveFile} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{contexts, Storages} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue @@ -89,6 +89,7 @@ object Storage { def saveFile(implicit as: ActorSystem): SaveFile = new DiskStorageSaveFile(this) + def copyFile: CopyFile = new DiskStorageCopyFile(this) } /** @@ -138,6 +139,9 @@ object Storage { def linkFile(client: RemoteDiskStorageClient): LinkFile = new RemoteDiskStorageLinkFile(this, client) + def copyFile(client: RemoteDiskStorageClient): CopyFile = + new RemoteDiskStorageCopyFile(this, client) + def fetchComputedAttributes(client: RemoteDiskStorageClient): FetchAttributes = new RemoteStorageFetchAttributes(value, client) } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala index 0858b82946..a253b45156 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageRejection.scala @@ -143,10 +143,11 @@ object StorageRejection { extends StorageRejection(s"Storage ${id.fold("")(id => s"'$id'")} has invalid JSON-LD payload.") /** - * Rejection returned when attempting to create a storage with an id that already exists. + * Signals an attempt to update/create a storage based on a previous revision with a different storage type * * @param id - * the storage identifier + * @param found + * @param expected */ final case class DifferentStorageType(id: Iri, found: StorageType, expected: StorageType) extends StorageRejection(s"Storage '$id' is of type '$found' and can't be updated to be a '$expected' .") diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/CopyFile.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/CopyFile.scala new file mode 100644 index 0000000000..c23c4976c7 --- /dev/null +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/CopyFile.scala @@ -0,0 +1,25 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations + +import cats.effect.IO +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageType} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.CopyFileRejection +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient + +trait CopyFile { + def apply(source: FileAttributes, dest: FileDescription): IO[FileAttributes] +} + +object CopyFile { + + def apply(storage: Storage, client: RemoteDiskStorageClient): CopyFile = + storage match { + case storage: Storage.DiskStorage => storage.copyFile + case storage: Storage.S3Storage => unsupported(storage.tpe) + case storage: Storage.RemoteDiskStorage => storage.copyFile(client) + } + + private def unsupported(storageType: StorageType): CopyFile = + (_, _) => IO.raiseError(CopyFileRejection.UnsupportedOperation(storageType)) + +} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala index b8f6122b90..a3511184cc 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala @@ -65,6 +65,22 @@ object StorageFileRejection { extends FetchAttributeRejection(rejection.loggedDetails) } + /** + * Rejection returned when a storage cannot fetch a file's attributes + */ + sealed abstract class CopyFileRejection(loggedDetails: String) extends StorageFileRejection(loggedDetails) + + object CopyFileRejection { + + /** + * Rejection performing this operation because the storage does not support it + */ + final case class UnsupportedOperation(tpe: StorageType) + extends FetchAttributeRejection( + s"Copying a file attributes is not supported for storages of type '${tpe.iri}'" + ) + } + /** * Rejection returned when a storage cannot save a file */ diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFile.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFile.scala new file mode 100644 index 0000000000..fbe4ffe1dc --- /dev/null +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageCopyFile.scala @@ -0,0 +1,32 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk + +import akka.http.scaladsl.model.Uri +import cats.effect.IO +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.DiskStorage +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.CopyFile +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskStorageSaveFile.initLocation + +import java.net.URI +import java.nio.file.{Paths, StandardCopyOption} +import scala.annotation.nowarn + +class DiskStorageCopyFile(storage: DiskStorage) extends CopyFile { + @nowarn + override def apply(source: FileAttributes, dest: FileDescription): IO[FileAttributes] = { + val sourcePath = Paths.get(URI.create(s"file://${source.location.path}")) + for { + (destPath, destRelativePath) <- initLocation(storage.project, storage.value, dest.uuid, dest.filename) + _ <- fs2.io.file.copy[IO](sourcePath, destPath, Seq(StandardCopyOption.COPY_ATTRIBUTES)) + } yield FileAttributes( + uuid = dest.uuid, + location = Uri(destPath.toUri.toString), + path = Uri.Path(destRelativePath.toString), + filename = dest.filename, + mediaType = source.mediaType, + bytes = source.bytes, + digest = source.digest, + origin = source.origin + ) + } +} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFile.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFile.scala new file mode 100644 index 0000000000..71236dbae9 --- /dev/null +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageCopyFile.scala @@ -0,0 +1,33 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote + +import akka.http.scaladsl.model.Uri +import cats.effect.IO +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.CopyFile +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.SaveFile.intermediateFolders +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient + +class RemoteDiskStorageCopyFile( + storage: RemoteDiskStorage, + client: RemoteDiskStorageClient +) extends CopyFile { + + def apply(source: FileAttributes, description: FileDescription): IO[FileAttributes] = { + val destinationPath = Uri.Path(intermediateFolders(storage.project, description.uuid, description.filename)) + client.copyFile(storage.value.folder, source.location.path, destinationPath)(storage.value.endpoint).as { + FileAttributes( + uuid = description.uuid, + location = source.location, // TODO what's the destination absolute path? + path = destinationPath, + filename = description.filename, + mediaType = description.mediaType, + bytes = source.bytes, + digest = source.digest, + origin = source.origin + ) + } + + } + +} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala index a14bb20a2c..cf2336fbc7 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala @@ -175,6 +175,40 @@ final class RemoteDiskStorageClient(client: HttpClient, getAuthToken: AuthTokenP } } + /** + * Moves a path from the provided ''sourceRelativePath'' to ''destRelativePath'' inside the nexus folder. + * + * @param bucket + * the storage bucket name + * @param sourceRelativePath + * the source relative path location + * @param destRelativePath + * the destination relative path location inside the nexus folder + */ + def copyFile( + bucket: Label, + sourceRelativePath: Path, + destRelativePath: Path + )(implicit baseUri: BaseUri): IO[Unit] = { + getAuthToken(credentials).flatMap { authToken => + val endpoint = baseUri.endpoint / "buckets" / bucket.value / "files" / destRelativePath + val payload = Json.obj("source" -> sourceRelativePath.toString.asJson) + client + .discardBytes(Post(endpoint, payload).withCredentials(authToken), ()) + .adaptError { + // TODO update error + case error @ HttpClientStatusError(_, `NotFound`, _) if !bucketNotFoundType(error) => + MoveFileRejection.FileNotFound(sourceRelativePath.toString) + case error @ HttpClientStatusError(_, `BadRequest`, _) if pathContainsLinksType(error) => + MoveFileRejection.PathContainsLinks(destRelativePath.toString) + case HttpClientStatusError(_, `Conflict`, _) => + MoveFileRejection.ResourceAlreadyExists(destRelativePath.toString) + case error: HttpClientError => + UnexpectedMoveError(sourceRelativePath.toString, destRelativePath.toString, error.asString) + } + } + } + private def bucketNotFoundType(error: HttpClientError): Boolean = error.jsonBody.fold(false)(_.hcursor.get[String](keywords.tpe).toOption.contains("BucketNotFound")) diff --git a/delta/plugins/storage/src/test/resources/errors/tag-and-rev-copy-error.json b/delta/plugins/storage/src/test/resources/errors/tag-and-rev-copy-error.json new file mode 100644 index 0000000000..111259dff1 --- /dev/null +++ b/delta/plugins/storage/src/test/resources/errors/tag-and-rev-copy-error.json @@ -0,0 +1,5 @@ +{ + "@context" : "https://bluebrain.github.io/nexus/contexts/error.json", + "@type" : "InvalidFileLookup", + "reason" : "Only one of 'tag' and 'rev' can be used to lookup file '{{fileId}}'." +} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala index 729dd5e4f8..219d674c4d 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala @@ -2,6 +2,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.{HttpEntity, MessageEntity, Multipart, Uri} +import cats.effect.unsafe.implicits.global import cats.effect.{IO, Ref} import ch.epfl.bluebrain.nexus.delta.kernel.utils.{UUIDF, UrlUtils} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest @@ -11,37 +12,39 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePat import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} import ch.epfl.bluebrain.nexus.testkit.scalatest.EitherValues -import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsIOValues import org.scalatest.Suite import java.nio.file.{Files => JavaFiles} import java.util.{Base64, UUID} -trait FileFixtures extends EitherValues with CatsIOValues { +trait FileFixtures extends EitherValues { self: Suite => - val uuid = UUID.fromString("8249ba90-7cc6-4de5-93a1-802c04200dcc") - val uuid2 = UUID.fromString("12345678-7cc6-4de5-93a1-802c04200dcc") - val ref = Ref.of[IO, UUID](uuid).accepted - implicit val uuidF: UUIDF = UUIDF.fromRef(ref) - val org = Label.unsafe("org") - val orgDeprecated = Label.unsafe("org-deprecated") - val project = ProjectGen.project("org", "proj", base = nxv.base, mappings = ApiMappings("file" -> schemas.files)) - val deprecatedProject = ProjectGen.project("org", "proj-deprecated") - val projectWithDeprecatedOrg = ProjectGen.project("org-deprecated", "other-proj") - val projectRef = project.ref - val diskId2 = nxv + "disk2" - val file1 = nxv + "file1" - val file2 = nxv + "file2" - val fileTagged = nxv + "fileTagged" - val fileTagged2 = nxv + "fileTagged2" - val file1Encoded = UrlUtils.encode(file1.toString) - val encodeId = (id: String) => UrlUtils.encode((nxv + id).toString) - val generatedId = project.base.iri / uuid.toString - val generatedId2 = project.base.iri / uuid2.toString + val uuid = UUID.fromString("8249ba90-7cc6-4de5-93a1-802c04200dcc") + val uuid2 = UUID.fromString("12345678-7cc6-4de5-93a1-802c04200dcc") + val uuidOrg2 = UUID.fromString("66666666-7cc6-4de5-93a1-802c04200dcc") + val ref = Ref.of[IO, UUID](uuid).unsafeRunSync() + implicit val uuidF: UUIDF = UUIDF.fromRef(ref) + val org = Label.unsafe("org") + val org2 = Label.unsafe("org2") + val project = ProjectGen.project(org.value, "proj", base = nxv.base, mappings = ApiMappings("file" -> schemas.files)) + val project2 = + ProjectGen.project(org2.value, "proj2", base = nxv.base, mappings = ApiMappings("file" -> schemas.files)) + val deprecatedProject = ProjectGen.project("org", "proj-deprecated") + val projectRef = project.ref + val projectRefOrg2 = project2.ref + val diskId2 = nxv + "disk2" + val file1 = nxv + "file1" + val file2 = nxv + "file2" + val fileTagged = nxv + "fileTagged" + val fileTagged2 = nxv + "fileTagged2" + val file1Encoded = UrlUtils.encode(file1.toString) + val encodeId = (id: String) => UrlUtils.encode((nxv + id).toString) + val generatedId = project.base.iri / uuid.toString + val generatedId2 = project.base.iri / uuid2.toString val content = "file content" val path = AbsolutePath(JavaFiles.createTempDirectory("files")).rightValue @@ -50,16 +53,21 @@ trait FileFixtures extends EitherValues with CatsIOValues { def withUUIDF[T](id: UUID)(test: => T): T = (for { old <- ref.getAndSet(id) - t <- IO.delay(test).onError(_ => ref.set(old)) + t <- IO(test).onError(_ => ref.set(old)) _ <- ref.set(old) - } yield t).accepted + } yield t).unsafeRunSync() - def attributes(filename: String = "file.txt", size: Long = 12, id: UUID = uuid): FileAttributes = { + def attributes( + filename: String = "file.txt", + size: Long = 12, + id: UUID = uuid, + projRef: ProjectRef = projectRef + ): FileAttributes = { val uuidPathSegment = id.toString.take(8).mkString("/") FileAttributes( id, - s"file://$path/org/proj/$uuidPathSegment/$filename", - Uri.Path(s"org/proj/$uuidPathSegment/$filename"), + s"file://$path/${projRef.toString}/$uuidPathSegment/$filename", + Uri.Path(s"${projRef.toString}/$uuidPathSegment/$filename"), filename, Some(`text/plain(UTF-8)`), size, diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala index 9f6e2aa4bc..6ed0ab682c 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala @@ -11,8 +11,8 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.RemoteContextResolutionFixt import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.NotComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Storage import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileId, FileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotFound +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{CopyFileDestination, FileAttributes, FileId, FileRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{DifferentStorageType, StorageNotFound} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType.{RemoteDiskStorage => RemoteStorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{StorageRejection, StorageStatEntry, StorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers @@ -26,7 +26,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress import ch.epfl.bluebrain.nexus.delta.sdk.auth.{AuthTokenProvider, Credentials} import ch.epfl.bluebrain.nexus.delta.sdk.directives.FileResponse import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed -import ch.epfl.bluebrain.nexus.delta.sdk.http.{HttpClient, HttpClientConfig} +import ch.epfl.bluebrain.nexus.delta.sdk.http.HttpClient import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.{Caller, ServiceAccount} import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.model._ @@ -62,9 +62,8 @@ class FilesSpec(docker: RemoteStorageDocker) private val alice = User("Alice", realm) "The Files operations bundle" when { - implicit val hcc: HttpClientConfig = httpClientConfig implicit val typedSystem: typed.ActorSystem[Nothing] = system.toTyped - implicit val httpClient: HttpClient = HttpClient() + implicit val httpClient: HttpClient = HttpClient()(httpClientConfig, system) implicit val caller: Caller = Caller(bob, Set(bob, Group("mygroup", realm), Authenticated(realm))) implicit val authTokenProvider: AuthTokenProvider = AuthTokenProvider.anonymousForTest val remoteDiskStorageClient = new RemoteDiskStorageClient(httpClient, authTokenProvider, Credentials.Anonymous) @@ -92,13 +91,14 @@ class FilesSpec(docker: RemoteStorageDocker) val storage: IdSegment = nxv + "other-storage" val fetchContext = FetchContextDummy( - Map(project.ref -> project.context), + Map(project.ref -> project.context, project2.ref -> project2.context), Set(deprecatedProject.ref) ) val aclCheck = AclSimpleCheck( (Anonymous, AclAddress.Root, Set(Permissions.resources.read)), (bob, AclAddress.Project(projectRef), Set(diskFields.readPermission.value, diskFields.writePermission.value)), + (bob, AclAddress.Project(projectRefOrg2), Set(diskFields.readPermission.value, diskFields.writePermission.value)), (alice, AclAddress.Project(projectRef), Set(otherRead, otherWrite)) ).accepted @@ -153,10 +153,12 @@ class FilesSpec(docker: RemoteStorageDocker) "create storages for files" in { val payload = diskFieldsJson deepMerge json"""{"capacity": 320, "maxFileSize": 300, "volume": "$path"}""" storages.create(diskId, projectRef, payload).accepted + storages.create(diskId, projectRefOrg2, payload).accepted val payload2 = json"""{"@type": "RemoteDiskStorage", "endpoint": "${docker.hostConfig.endpoint}", "folder": "${RemoteStorageDocker.BucketName}", "readPermission": "$otherRead", "writePermission": "$otherWrite", "maxFileSize": 300, "default": false}""" storages.create(remoteId, projectRef, payload2).accepted + storages.create(remoteId, projectRefOrg2, payload2).accepted } "succeed with the id passed" in { @@ -438,6 +440,56 @@ class FilesSpec(docker: RemoteStorageDocker) } } + "copying a file" should { + + "succeed from disk storage based on a tag" in { + val newFileId = genString() + val destination = CopyFileDestination(projectRefOrg2, Some(newFileId), None, None, None) + val expectedFilename = "myfile.txt" + val expectedAttr = attributes(filename = expectedFilename, projRef = projectRefOrg2) + val expected = mkResource(nxv + newFileId, projectRefOrg2, diskRev, expectedAttr) + + val actual = files.copyTo(FileId("file1", tag, projectRef), destination).accepted + actual shouldEqual expected + + val fetched = files.fetch(FileId(newFileId, projectRefOrg2)).accepted + fetched shouldEqual expected + } + + "succeed from disk storage based on a rev and should tag the new file" in { + val (newFileId, newTag) = (genString(), UserTag.unsafe(genString())) + val destination = + CopyFileDestination(projectRefOrg2, Some(newFileId), None, Some(newTag), None) + val expectedFilename = "file.txt" + val expectedAttr = attributes(filename = expectedFilename, projRef = projectRefOrg2) + val expected = mkResource(nxv + newFileId, projectRefOrg2, diskRev, expectedAttr, tags = Tags(newTag -> 1)) + + val actual = files.copyTo(FileId("file1", 2, projectRef), destination).accepted + actual shouldEqual expected + + val fetchedByTag = files.fetch(FileId(newFileId, newTag, projectRefOrg2)).accepted + fetchedByTag shouldEqual expected + } + + "reject if the source file doesn't exist" in { + val destination = CopyFileDestination(projectRefOrg2, None, None, None, None) + files.copyTo(fileIdIri(nxv + "other"), destination).rejectedWith[FileNotFound] + } + + "reject if the destination storage doesn't exist" in { + val destination = CopyFileDestination(projectRefOrg2, None, Some(storage), None, None) + files.copyTo(fileId("file1"), destination).rejected shouldEqual + WrappedStorageRejection(StorageNotFound(storageIri, projectRefOrg2)) + } + + "reject if copying between different storage types" in { + val expectedError = DifferentStorageType(remoteIdIri, StorageType.RemoteDiskStorage, StorageType.DiskStorage) + val destination = CopyFileDestination(projectRefOrg2, None, Some(remoteId), None, None) + files.copyTo(FileId("file1", projectRef), destination).rejected shouldEqual + WrappedStorageRejection(expectedError) + } + } + "deleting a tag" should { "succeed" in { val expected = mkResource(file1, projectRef, diskRev, attributes(), rev = 4) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala index cabb839c63..86a15f625f 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala @@ -43,8 +43,11 @@ import ch.epfl.bluebrain.nexus.testkit.ce.IOFromMap import ch.epfl.bluebrain.nexus.testkit.errors.files.FileErrors.{fileAlreadyExistsError, fileIsNotDeprecatedError} import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsIOValues import io.circe.Json +import io.circe.syntax.KeyOps import org.scalatest._ +import java.util.UUID + class FilesRoutesSpec extends BaseRouteSpec with CancelAfterFailure @@ -88,7 +91,7 @@ class FilesRoutesSpec private val asWriter = addCredentials(OAuth2BearerToken("writer")) private val asS3Writer = addCredentials(OAuth2BearerToken("s3writer")) - private val fetchContext = FetchContextDummy(Map(project.ref -> project.context)) + private val fetchContext = FetchContextDummy(Map(project.ref -> project.context, project2.ref -> project2.context)) private val s3Read = Permission.unsafe("s3/read") private val s3Write = Permission.unsafe("s3/write") @@ -137,8 +140,11 @@ class FilesRoutesSpec clock )(uuidF, typedSystem) private val groupDirectives = - DeltaSchemeDirectives(fetchContext, ioFromMap(uuid -> projectRef.organization), ioFromMap(uuid -> projectRef)) - + DeltaSchemeDirectives( + fetchContext, + ioFromMap(uuid -> projectRef.organization, uuidOrg2 -> projectRefOrg2.organization), + ioFromMap(uuid -> projectRef, uuidOrg2 -> projectRefOrg2) + ) private lazy val routes = routesWithIdentities(identities) private def routesWithIdentities(identities: Identities) = Route.seal(FilesRoutes(stCfg, identities, aclCheck, files, groupDirectives, IndexingAction.noop)) @@ -166,6 +172,12 @@ class FilesRoutesSpec .create(dId, projectRef, diskFieldsJson deepMerge defaults deepMerge json"""{"capacity":5000}""")(callerWriter) .void .accepted + storages + .create(dId, projectRefOrg2, diskFieldsJson deepMerge defaults deepMerge json"""{"capacity":5000}""")( + callerWriter + ) + .void + .accepted } "File routes" should { @@ -352,6 +364,60 @@ class FilesRoutesSpec } } + "copy a file" in { + givenAFileInProject(projectRef.toString) { oldFileId => + val newFileId = genString() + val json = Json.obj("sourceProjectRef" := projectRef, "sourceFileId" := oldFileId) + + Put(s"/v1/files/${projectRefOrg2.toString}/$newFileId", json.toEntity) ~> asWriter ~> routes ~> check { + status shouldEqual StatusCodes.Created + val expectedId = project2.base.iri / newFileId + val attr = attributes(filename = oldFileId) + response.asJson shouldEqual fileMetadata(projectRefOrg2, expectedId, attr, diskIdRev) + } + } + } + + "copy a file with generated new Id" in { + val fileCopyUUId = UUID.randomUUID() + withUUIDF(fileCopyUUId) { + givenAFileInProject(projectRef.toString) { oldFileId => + val json = Json.obj("sourceProjectRef" := projectRef, "sourceFileId" := oldFileId) + + Post(s"/v1/files/${projectRefOrg2.toString}/", json.toEntity) ~> asWriter ~> routes ~> check { + status shouldEqual StatusCodes.Created + val expectedId = project2.base.iri / fileCopyUUId.toString + val attr = attributes(filename = oldFileId, id = fileCopyUUId) + response.asJson shouldEqual fileMetadata(projectRefOrg2, expectedId, attr, diskIdRev) + } + } + } + } + + "reject file copy request if tag and rev are present simultaneously" in { + givenAFileInProject(projectRef.toString) { oldFileId => + val json = Json.obj( + "sourceProjectRef" := projectRef, + "sourceFileId" := oldFileId, + "sourceTag" := "mytag", + "sourceRev" := 3 + ) + + val requests = List( + Put(s"/v1/files/${projectRefOrg2.toString}/${genString()}", json.toEntity), + Post(s"/v1/files/${projectRefOrg2.toString}/", json.toEntity) + ) + + forAll(requests) { req => + req ~> asWriter ~> routes ~> check { + status shouldEqual StatusCodes.BadRequest + response.asJson shouldEqual + jsonContentOf("/errors/tag-and-rev-copy-error.json", "fileId" -> oldFileId) + } + } + } + } + "deprecate a file" in { givenAFile { id => Delete(s"/v1/files/org/proj/$id?rev=1") ~> asWriter ~> routes ~> check { @@ -632,9 +698,11 @@ class FilesRoutesSpec } } - def givenAFile(test: String => Assertion): Assertion = { + def givenAFile(test: String => Assertion): Assertion = givenAFileInProject("org/proj")(test) + + def givenAFileInProject(projRef: String)(test: String => Assertion): Assertion = { val id = genString() - Put(s"/v1/files/org/proj/$id", entity(s"$id")) ~> asWriter ~> routes ~> check { + Put(s"/v1/files/$projRef/$id", entity(s"$id")) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.Created } test(id) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala index 365c8f1011..aba6c62fa8 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala @@ -86,7 +86,9 @@ class RemoteStorageClientSpec(docker: RemoteStorageDocker) } "move a file" in { - client.moveFile(bucket, Uri.Path("my/file-1.txt"), Uri.Path("other/file-1.txt"))(baseUri).accepted shouldEqual + client + .moveFile(bucket, Uri.Path("my/file-1.txt"), Uri.Path("other/file-1.txt"))(baseUri) + .accepted shouldEqual attributes.copy( location = s"file:///app/$BucketName/nexus/other/file-1.txt", digest = NotComputedDigest diff --git a/docs/src/main/paradox/docs/delta/api/assets/files/copy-put.json b/docs/src/main/paradox/docs/delta/api/assets/files/copy-put.json new file mode 100644 index 0000000000..f4f6a287c1 --- /dev/null +++ b/docs/src/main/paradox/docs/delta/api/assets/files/copy-put.json @@ -0,0 +1,34 @@ +{ + "@context": [ + "https://bluebrain.github.io/nexus/contexts/files.json", + "https://bluebrain.github.io/nexus/contexts/metadata.json" + ], + "@id": "http://localhost:8080/v1/resources/myorg/myproject/_/newfileid", + "@type": "File", + "_bytes": 5963969, + "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/files.json", + "_createdAt": "2021-05-12T07:30:54.576Z", + "_createdBy": "http://localhost:8080/v1/anonymous", + "_deprecated": false, + "_digest": { + "_algorithm": "SHA-256", + "_value": "d14a7cb4602a2c6e1e7035809aa319d07a6d3c58303ecce7804d2e481cd4965f" + }, + "_filename": "newfile.pdf", + "_incoming": "http://localhost:8080/v1/files/myorg/myproject/newfileid/incoming", + "_location": "file:///tmp/test/nexus/myorg/myproject/c/b/5/c/4/d/8/e/newfile.pdf", + "_mediaType": "application/pdf", + "_origin": "Client", + "_outgoing": "http://localhost:8080/v1/files/myorg/myproject/newfileid/outgoing", + "_project": "http://localhost:8080/v1/projects/myorg/myproject", + "_rev": 1, + "_self": "http://localhost:8080/v1/files/myorg/myproject/newfileid", + "_storage": { + "@id": "http://localhost:8080/v1/resources/myorg/myproject/_/remote", + "@type": "RemoteDiskStorage", + "_rev": 1 + }, + "_updatedAt": "2021-05-12T07:30:54.576Z", + "_updatedBy": "http://localhost:8080/v1/anonymous", + "_uuid": "cb5c4d8e-0189-49ab-b761-c92b2d4f49d2" +} \ No newline at end of file diff --git a/docs/src/main/paradox/docs/delta/api/assets/files/copy-put.sh b/docs/src/main/paradox/docs/delta/api/assets/files/copy-put.sh new file mode 100644 index 0000000000..1de7f91d50 --- /dev/null +++ b/docs/src/main/paradox/docs/delta/api/assets/files/copy-put.sh @@ -0,0 +1,10 @@ +curl -X PUT \ + -H "Content-Type: application/json" \ + "http://localhost:8080/v1/files/myorg/myproject/newfileid?storage=remote" -d \ + '{ + "destinationFilename": "newfile.pdf", + "sourceProjectRef": "otherorg/otherproj", + "sourceFileId": "oldfileid", + "sourceTag": "mytag", + "sourceRev": null + }' \ No newline at end of file diff --git a/docs/src/main/paradox/docs/delta/api/files-api.md b/docs/src/main/paradox/docs/delta/api/files-api.md index 1d326b3a7b..450877ac2e 100644 --- a/docs/src/main/paradox/docs/delta/api/files-api.md +++ b/docs/src/main/paradox/docs/delta/api/files-api.md @@ -89,6 +89,62 @@ Request Response : @@snip [created-put.json](assets/files/created-put.json) +## Create copy using POST or PUT + +Create a file copy based on a source file potentially in a different organization. No `MIME` details are necessary since this is not a file upload. Metadata such as the size and digest of the source file are preserved. + +The caller must have the following permissions: +- `files/read` on the source project. +- `storages/write` on the storage in the destination project. + +Either `POST` or `PUT` can be used to copy a file, as with other creation operations. These REST resources are in the context of the **destination** file; the one being created. +- `POST` will generate a new UUID for the file: + ``` + POST /v1/files/{org_label}/{project_label}?storage={storageId}&tag={tagName} + ``` +- `PUT` accepts a `{file_id}` from the user: + ``` + PUT /v1/files/{org_label}/{project_label}/{file_id}?storage={storageId}&tag={tagName} + ``` + +... where +- `{storageId}` optionally selects a specific storage backend for the new file. The `@type` of this storage must be `DiskStorage` or `RemoteDiskStorage`. + If omitted, the default storage of the project is used. The request will be rejected if there's not enough space on the storage. +- `{tagName}` an optional label given to the new file on its first revision. + +Both requests accept the following JSON payload: +```json +{ + "destinationFilename": "{destinationFilename}", + "sourceProjectRef": "{sourceOrg}/{sourceProj}", + "sourceFileId": "{sourceFileId}", + "sourceTag": "{sourceTagName}", + "sourceRev": "{sourceRev}" +} +``` + +... where +- `{destinationFilename}` the optional filename for the new file. If omitted, the source filename will be used. +- `{sourceOrg}` the organization label of the source file. +- `{sourceProj}` the project label of the source file. +- `{sourceFileId}` the unique identifier of the source file. +- `{sourceTagName}` the optional source revision to be fetched. +- `{sourceRev}` the optional source tag to be fetched. + +Notes: + +- The storage type of `sourceFileId` must match that of the destination file. For example, if the destination `storageId` is omitted, the source storage must be of type `DiskStorage` (the default storage type). +- `sourceTagName` and `sourceRev` cannot be simultaneously present. If neither are present, the latest revision of the source file will be used. + +**Example** + +Request +: @@snip [copy-put.sh](assets/files/copy-put.sh) + +Response +: @@snip [copy-put.json](assets/files/copy-put.json) + + ## Link using POST Brings a file existing in a storage to Nexus Delta as a file resource. This operation is supported for files using `S3Storage` and `RemoteDiskStorage`. @@ -106,7 +162,7 @@ POST /v1/files/{org_label}/{project_label}?storage={storageId}&tag={tagName} When not specified, the default storage of the project is used. - `{path}`: String - the relative location (from the point of view of storage folder) on the remote storage where the file exists. - `{filename}`: String - the name that will be given to the file during linking. This field is optional. When not specified, the original filename is retained. -- `{mediaType}`: String - the MediaType fo the file. This field is optional. When not specified, Nexus Delta will attempt to detectput +- `{mediaType}`: String - the MediaType fo the file. This field is optional. When not specified, Nexus Delta will attempt to detect it. - `{tagName}` an optional label given to the linked file resource on its first revision. **Example** diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/HttpClient.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/HttpClient.scala index cf1247141f..bfe91b256e 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/HttpClient.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/HttpClient.scala @@ -66,6 +66,11 @@ class HttpClient private (baseUrl: Uri, httpExt: HttpExt)(implicit )(implicit um: FromEntityUnmarshaller[A]): IO[Assertion] = requestAssert(PUT, url, Some(body), identity, extraHeaders)(assertResponse) + def putAndReturn[A](url: String, body: Json, identity: Identity, extraHeaders: Seq[HttpHeader] = jsonHeaders)( + assertResponse: (A, HttpResponse) => (A, Assertion) + )(implicit um: FromEntityUnmarshaller[A]): IO[A] = + requestAssertAndReturn(PUT, url, Some(body), identity, extraHeaders)(assertResponse).map(_._1) + def putIO[A](url: String, body: IO[Json], identity: Identity, extraHeaders: Seq[HttpHeader] = jsonHeaders)( assertResponse: (A, HttpResponse) => Assertion )(implicit um: FromEntityUnmarshaller[A]): IO[Assertion] = { @@ -151,25 +156,25 @@ class HttpClient private (baseUrl: Uri, httpExt: HttpExt)(implicit )(implicit um: FromEntityUnmarshaller[A]): IO[Assertion] = requestAssert(DELETE, url, None, identity, extraHeaders)(assertResponse) - def requestAssert[A]( + def requestAssertAndReturn[A]( method: HttpMethod, url: String, body: Option[Json], identity: Identity, extraHeaders: Seq[HttpHeader] = jsonHeaders - )(assertResponse: (A, HttpResponse) => Assertion)(implicit um: FromEntityUnmarshaller[A]): IO[Assertion] = { + )(assertResponse: (A, HttpResponse) => (A, Assertion))(implicit um: FromEntityUnmarshaller[A]): IO[(A, Assertion)] = { def buildClue(a: A, response: HttpResponse) = s""" - |Endpoint: ${method.value} $url - |Identity: $identity - |Token: ${Option(tokensMap.get(identity)).map(_.credentials.token()).getOrElse("None")} - |Status code: ${response.status} - |Body: ${body.getOrElse("None")} - |Response: - |$a - |""".stripMargin - - requestJson( + |Endpoint: ${method.value} $url + |Identity: $identity + |Token: ${Option(tokensMap.get(identity)).map(_.credentials.token()).getOrElse("None")} + |Status code: ${response.status} + |Body: ${body.getOrElse("None")} + |Response: + |$a + |""".stripMargin + + requestJson[A, (A, Assertion)]( method, url, body, @@ -179,6 +184,17 @@ class HttpClient private (baseUrl: Uri, httpExt: HttpExt)(implicit ) } + def requestAssert[A]( + method: HttpMethod, + url: String, + body: Option[Json], + identity: Identity, + extraHeaders: Seq[HttpHeader] = jsonHeaders + )(assertResponse: (A, HttpResponse) => Assertion)(implicit um: FromEntityUnmarshaller[A]): IO[Assertion] = + requestAssertAndReturn[A](method, url, body, identity, extraHeaders) { (a, resp) => + (a, assertResponse(a, resp)) + }.map(_._2) + def sparqlQuery[A](url: String, query: String, identity: Identity, extraHeaders: Seq[HttpHeader] = Nil)( assertResponse: (A, HttpResponse) => Assertion )(implicit um: FromEntityUnmarshaller[A]): IO[Assertion] = { diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/CopyFileSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/CopyFileSpec.scala new file mode 100644 index 0000000000..19b4c60858 --- /dev/null +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/CopyFileSpec.scala @@ -0,0 +1,52 @@ +package ch.epfl.bluebrain.nexus.tests.kg + +import akka.http.scaladsl.model._ +import akka.util.ByteString +import cats.effect.IO +import ch.epfl.bluebrain.nexus.delta.kernel.utils.UrlUtils +import ch.epfl.bluebrain.nexus.tests.HttpClient._ +import ch.epfl.bluebrain.nexus.tests.Identity.storages.Coyote +import ch.epfl.bluebrain.nexus.tests.Optics +import io.circe.Json +import io.circe.syntax.KeyOps +import org.scalatest.Assertion + +trait CopyFileSpec { self: StorageSpec => + + "Copying a json file to a different organization" should { + + def givenAProjectWithStorage(test: String => IO[Assertion]): IO[Assertion] = { + val (proj, org) = (genId(), genId()) + val projRef = s"$org/$proj" + createProjects(Coyote, org, proj) >> + createStorages(projRef) >> + test(projRef) + } + + "succeed" in { + givenAProjectWithStorage { destProjRef => + val sourceFileId = "attachment.json" + val destFileId = "attachment2.json" + val destFilename = genId() + + val payload = Json.obj( + "destinationFilename" := destFilename, + "sourceProjectRef" := self.projectRef, + "sourceFileId" := sourceFileId + ) + val uri = s"/files/$destProjRef/$destFileId?storage=nxv:$storageId" + + for { + json <- deltaClient.putAndReturn[Json](uri, payload, Coyote) { (json, response) => + (json, expectCreated(json, response)) + } + returnedId = Optics.`@id`.getOption(json).getOrElse(fail("could not find @id of created resource")) + assertion <- + deltaClient.get[ByteString](s"/files/$destProjRef/${UrlUtils.encode(returnedId)}", Coyote, acceptAll) { + expectDownload(destFilename, ContentTypes.`application/json`, updatedJsonFileContent) + } + } yield assertion + } + } + } +} diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/DiskStorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/DiskStorageSpec.scala index f4f3b64d69..1ae4a01753 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/DiskStorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/DiskStorageSpec.scala @@ -8,7 +8,7 @@ import ch.epfl.bluebrain.nexus.tests.iam.types.Permission import io.circe.Json import org.scalatest.Assertion -class DiskStorageSpec extends StorageSpec { +class DiskStorageSpec extends StorageSpec with CopyFileSpec { override def storageName: String = "disk" @@ -32,7 +32,7 @@ class DiskStorageSpec extends StorageSpec { ): _* ) - override def createStorages: IO[Assertion] = { + override def createStorages(projectRef: String): IO[Assertion] = { val payload = jsonContentOf("kg/storages/disk.json") val payload2 = jsonContentOf("kg/storages/disk-perms.json") diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala index 52abb4c436..ae9a9f0853 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala @@ -17,7 +17,7 @@ import org.scalatest.Assertion import scala.annotation.nowarn import scala.sys.process._ -class RemoteStorageSpec extends StorageSpec { +class RemoteStorageSpec extends StorageSpec with CopyFileSpec { override def storageName: String = "external" @@ -60,7 +60,7 @@ class RemoteStorageSpec extends StorageSpec { ): _* ) - override def createStorages: IO[Assertion] = { + override def createStorages(projectRef: String): IO[Assertion] = { val payload = jsonContentOf( "kg/storages/remote-disk.json", "endpoint" -> externalEndpoint, diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala index 51af5b062c..d461207fa7 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala @@ -82,7 +82,7 @@ class S3StorageSpec extends StorageSpec { ): _* ) - override def createStorages: IO[Assertion] = { + override def createStorages(projectRef: String): IO[Assertion] = { val payload = jsonContentOf( "kg/storages/s3.json", "storageId" -> s"https://bluebrain.github.io/nexus/vocabulary/$storageId", diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/SearchConfigIndexingSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/SearchConfigIndexingSpec.scala index 94f62bddaf..bfee666011 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/SearchConfigIndexingSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/SearchConfigIndexingSpec.scala @@ -846,7 +846,7 @@ class SearchConfigIndexingSpec extends BaseIntegrationSpec { } "aggregate presynaptic brain regions" in { - val query = jsonContentOf("kg/search/synapse-agg.json") + val query = jsonContentOf("kg/search/synapse-agg.json") val preSynapticBrainRegionAgg = json"""{ "preSynapticBrainRegions" : { @@ -877,10 +877,10 @@ class SearchConfigIndexingSpec extends BaseIntegrationSpec { * Defines an ES query that searches for the document with the provided id and limits the resulting source to just * the requested field */ - private def queryField(id: String, field: String) = + private def queryField(id: String, field: String) = jsonContentOf("kg/search/id-query-single-field.json", "id" -> id, "field" -> field) - private def queryDocument(id: String) = + private def queryDocument(id: String) = jsonContentOf("kg/search/id-query.json", "id" -> id) private def aggregationIn(json: Json): Option[Json] = diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/StorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/StorageSpec.scala index 630e4260db..42e3104295 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/StorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/StorageSpec.scala @@ -39,7 +39,7 @@ abstract class StorageSpec extends BaseIntegrationSpec { def locationPrefix: Option[String] - def createStorages: IO[Assertion] + def createStorages(projectRef: String): IO[Assertion] protected def fileSelf(project: String, id: String): String = { val uri = Uri(s"${config.deltaUri}/files/$project") @@ -48,6 +48,9 @@ abstract class StorageSpec extends BaseIntegrationSpec { private[tests] val fileSelfPrefix = fileSelf(projectRef, attachmentPrefix) + val jsonFileContent = """{ "initial": ["is", "a", "test", "file"] }""" + val updatedJsonFileContent = """{ "updated": ["is", "a", "test", "file"] }""" + override def beforeAll(): Unit = { super.beforeAll() createProjects(Coyote, orgId, projId).accepted @@ -55,7 +58,7 @@ abstract class StorageSpec extends BaseIntegrationSpec { "Creating a storage" should { s"succeed for a $storageName storage" in { - createStorages + createStorages(projectRef) } "wait for storages to be indexed" in { @@ -91,9 +94,6 @@ abstract class StorageSpec extends BaseIntegrationSpec { "A json file" should { - val jsonFileContent = """{ "initial": ["is", "a", "test", "file"] }""" - val updatedJsonFileContent = """{ "updated": ["is", "a", "test", "file"] }""" - "be uploaded" in { deltaClient.uploadFile[Json]( s"/files/$projectRef/attachment.json?storage=nxv:$storageId", @@ -424,7 +424,7 @@ abstract class StorageSpec extends BaseIntegrationSpec { s"=?UTF-8?B?$encodedFilename?=" } - private def expectDownload( + protected def expectDownload( expectedFilename: String, expectedContentType: ContentType, expectedContent: String,