From f2c390310089bd95b2dd9a98c9c77760765183c3 Mon Sep 17 00:00:00 2001 From: Simon Date: Tue, 3 Dec 2024 10:41:07 +0100 Subject: [PATCH] Remove remote storage implementation (#5258) * Remove remote storage implementation --------- Co-authored-by: Simon Dumas --- .../delta/routes/VersionRoutesSpec.scala | 4 +- .../BlazegraphServiceDependency.scala | 7 +- .../ElasticSearchServiceDependency.scala | 4 +- .../src/main/resources/contexts/files.json | 1 - .../resources/contexts/storages-metadata.json | 1 - .../storage/src/main/resources/storage.conf | 23 - .../RemoteStorageServiceDependency.scala | 15 - .../plugins/storage/StoragePluginConfig.scala | 5 +- .../plugins/storage/StoragePluginModule.scala | 55 +-- .../files/FileAttributesUpdateStream.scala | 118 ----- .../delta/plugins/storage/files/Files.scala | 146 +----- .../files/model/ComputedFileAttributes.scala | 15 - .../storage/files/model/FileCommand.scala | 29 -- .../storage/files/model/FileEvent.scala | 4 +- .../storage/files/model/FileMetadata.scala | 11 - .../storage/files/model/FileRejection.scala | 57 +-- .../storage/files/routes/FilesRoutes.scala | 69 +-- .../StorageDecoderConfiguration.scala | 1 - .../storages/StorageDeletionTask.scala | 10 +- .../plugins/storage/storages/Storages.scala | 3 +- .../storage/storages/StoragesConfig.scala | 61 +-- .../storages/access/RemoteStorageAccess.scala | 28 -- .../storages/access/StorageAccess.scala | 9 +- .../storage/storages/model/Storage.scala | 15 +- .../storages/model/StorageFields.scala | 46 +- .../storage/storages/model/StorageState.scala | 11 +- .../storage/storages/model/StorageValue.scala | 49 +- .../storages/operations/FileOperations.scala | 39 +- .../operations/StorageFileRejection.scala | 53 +-- .../storages/operations/UploadingFile.scala | 15 +- .../remote/RemoteDiskFileOperations.scala | 85 ---- .../client/RemoteDiskStorageClient.scala | 262 ----------- .../RemoteDiskStorageFileAttributes.scala | 58 --- .../database/remote-storage-created.json | 33 -- .../database/remote-storage-updated.json | 33 -- .../storages/remote-storage-expanded.json | 49 -- .../storages/remote-storage-fetched.json | 30 -- .../resources/storages/remote-storage.json | 13 - .../storages/s3-storage-fetched.json | 2 +- .../storages/sse/remote-storage-created.json | 28 -- .../storages/sse/remote-storage-updated.json | 28 -- .../storages/storage-remote-state.json | 43 -- .../storages-list-not-deprecated.json | 37 -- .../resources/storages/storages-list.json | 29 +- .../FileAttributesUpdateStreamSuite.scala | 95 ---- .../plugins/storage/files/FileFixtures.scala | 2 + .../plugins/storage/files/FilesSpec.scala | 335 ++++--------- .../plugins/storage/files/FilesStmSpec.scala | 48 +- .../files/RemoteStorageFilesSpec.scala | 3 - .../files/mocks/FileOperationsMock.scala | 13 +- .../files/routes/FilesRoutesSpec.scala | 57 +-- .../RemoteStorageClientFixtures.scala | 75 --- .../RemoteStorageContainer.scala | 21 - .../storages/StorageDeletionTaskSuite.scala | 4 +- .../storage/storages/StorageFixtures.scala | 28 +- .../storage/storages/StoragesSpec.scala | 27 +- .../storage/storages/StoragesStmSpec.scala | 90 ++-- .../access/RemoteDiskStorageAccessSpec.scala | 33 -- .../storages/model/StorageFieldsSpec.scala | 24 - .../model/StorageSerializationSuite.scala | 12 +- .../storage/storages/model/StorageSpec.scala | 26 +- .../disk/DiskStorageSaveFileSpec.scala | 4 +- .../remote/RemoteStorageLinkFileSpec.scala | 75 --- .../RemoteStorageSaveAndFetchFileSpec.scala | 79 ---- .../operations/remote/RemoteStorageSpec.scala | 18 - .../client/RemoteStorageClientSpec.scala | 86 ---- .../storages/routes/StoragesRoutesSpec.scala | 97 ++-- .../delta/sdk/auth/AuthTokenProvider.scala | 2 +- docs/src/main/paradox/docs/releases/index.md | 2 +- .../docs/releases/v1.11-release-notes.md | 6 +- ship/src/main/resources/ship-default.conf | 23 - .../nexus/ship/files/FileWiring.scala | 12 +- tests/docker/config/delta-postgres.conf | 11 - tests/docker/config/storage.conf | 27 -- tests/docker/docker-compose.yml | 16 - .../resources/kg/files/linking-metadata.json | 31 -- .../resources/kg/files/linking-notfound.json | 6 - .../kg/files/linking-notsupported.json | 6 - .../resources/kg/files/remote-linked.json | 31 -- .../kg/files/remote-updated-linked.json | 30 -- .../kg/storages/remote-disk-response.json | 26 -- .../resources/kg/storages/remote-disk.json | 9 - .../nexus/tests/kg/VersionSpec.scala | 3 +- .../tests/kg/files/DiskStorageSpec.scala | 15 - .../tests/kg/files/RemoteStorageSpec.scala | 440 ------------------ .../nexus/tests/kg/files/S3StorageSpec.scala | 14 - .../nexus/tests/kg/files/StoragesDsl.scala | 41 -- 87 files changed, 297 insertions(+), 3340 deletions(-) delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/RemoteStorageServiceDependency.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/ComputedFileAttributes.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileMetadata.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteStorageAccess.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskFileOperations.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala delete mode 100644 delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskStorageFileAttributes.scala delete mode 100644 delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/remote-storage-expanded.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/remote-storage-fetched.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/remote-storage.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/storage-remote-state.json delete mode 100644 delta/plugins/storage/src/test/resources/storages/storages-list-not-deprecated.json delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStreamSuite.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/RemoteStorageFilesSpec.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageClientFixtures.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageContainer.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteDiskStorageAccessSpec.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSpec.scala delete mode 100644 delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala delete mode 100644 tests/docker/config/storage.conf delete mode 100644 tests/src/test/resources/kg/files/linking-metadata.json delete mode 100644 tests/src/test/resources/kg/files/linking-notfound.json delete mode 100644 tests/src/test/resources/kg/files/linking-notsupported.json delete mode 100644 tests/src/test/resources/kg/files/remote-linked.json delete mode 100644 tests/src/test/resources/kg/files/remote-updated-linked.json delete mode 100644 tests/src/test/resources/kg/storages/remote-disk-response.json delete mode 100644 tests/src/test/resources/kg/storages/remote-disk.json delete mode 100644 tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/RemoteStorageSpec.scala diff --git a/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/VersionRoutesSpec.scala b/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/VersionRoutesSpec.scala index 0d6ec9776a..8d3d99a489 100644 --- a/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/VersionRoutesSpec.scala +++ b/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/VersionRoutesSpec.scala @@ -31,7 +31,7 @@ class VersionRoutesSpec extends BaseRouteSpec { } private val dependency2 = new ServiceDependency { - override def serviceDescription: IO[ServiceDescription] = IO.pure(ServiceDescription("remoteStorage", "1.0.0")) + override def serviceDescription: IO[ServiceDescription] = IO.pure(ServiceDescription("blazegraph", "1.0.0")) } private val descriptionConfig = DescriptionConfig(Name.unsafe("delta"), Name.unsafe("dev")) @@ -82,7 +82,7 @@ class VersionRoutesSpec extends BaseRouteSpec { "delta": "${descriptionConfig.version}", "dependencies": { "elasticsearch": "unknown", - "remoteStorage": "1.0.0" + "blazegraph": "1.0.0" }, "plugins": { "pluginA": "1.0", diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphServiceDependency.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphServiceDependency.scala index c14dbf27fa..de20d1af9c 100644 --- a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphServiceDependency.scala +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphServiceDependency.scala @@ -6,11 +6,10 @@ import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ServiceDependency import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.client.BlazegraphClient /** - * Describes the remote storage [[ServiceDependency]] providing a way to extract the [[ServiceDescription]] from a - * remote storage calling its ''/version'' endpoint + * Describes the Blazegraph [[ServiceDependency]] providing a way to extract the [[ServiceDescription]] from its + * ''/status'' endpoint */ class BlazegraphServiceDependency(client: BlazegraphClient) extends ServiceDependency { - override def serviceDescription: IO[ServiceDescription] = - client.serviceDescription + override def serviceDescription: IO[ServiceDescription] = client.serviceDescription } diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchServiceDependency.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchServiceDependency.scala index 648ab13510..019a96b519 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchServiceDependency.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchServiceDependency.scala @@ -6,8 +6,8 @@ import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ServiceDependency import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.client.ElasticSearchClient /** - * Describes the remote storage [[ServiceDependency]] providing a way to extract the [[ServiceDescription]] from a - * remote storage calling its ''/version'' endpoint + * Describes the Elasticsearch [[ServiceDependency]] providing a way to extract the [[ServiceDescription]] from + * Elasticsearch */ class ElasticSearchServiceDependency(client: ElasticSearchClient) extends ServiceDependency { diff --git a/delta/plugins/storage/src/main/resources/contexts/files.json b/delta/plugins/storage/src/main/resources/contexts/files.json index 1146b67c00..2a0b3971c8 100644 --- a/delta/plugins/storage/src/main/resources/contexts/files.json +++ b/delta/plugins/storage/src/main/resources/contexts/files.json @@ -7,7 +7,6 @@ "@vocab": "https://bluebrain.github.io/nexus/keywords/", "DiskStorage": "https://bluebrain.github.io/nexus/vocabulary/DiskStorage", "S3Storage": "https://bluebrain.github.io/nexus/vocabulary/S3Storage", - "RemoteDiskStorage": "https://bluebrain.github.io/nexus/vocabulary/RemoteDiskStorage", "File": "https://bluebrain.github.io/nexus/vocabulary/File", "_digest": "https://bluebrain.github.io/nexus/vocabulary/digest", "_bytes": "https://bluebrain.github.io/nexus/vocabulary/bytes", diff --git a/delta/plugins/storage/src/main/resources/contexts/storages-metadata.json b/delta/plugins/storage/src/main/resources/contexts/storages-metadata.json index b00c0a223c..5cdc66cec6 100644 --- a/delta/plugins/storage/src/main/resources/contexts/storages-metadata.json +++ b/delta/plugins/storage/src/main/resources/contexts/storages-metadata.json @@ -3,7 +3,6 @@ "Storage": "https://bluebrain.github.io/nexus/vocabulary/Storage", "DiskStorage": "https://bluebrain.github.io/nexus/vocabulary/DiskStorage", "S3Storage": "https://bluebrain.github.io/nexus/vocabulary/S3Storage", - "RemoteDiskStorage": "https://bluebrain.github.io/nexus/vocabulary/RemoteDiskStorage", "_algorithm": "https://bluebrain.github.io/nexus/vocabulary/algorithm" }, "@id": "https://bluebrain.github.io/nexus/contexts/storages-metadata.json" diff --git a/delta/plugins/storage/src/main/resources/storage.conf b/delta/plugins/storage/src/main/resources/storage.conf index 2438c16ab3..ae1365006f 100644 --- a/delta/plugins/storage/src/main/resources/storage.conf +++ b/delta/plugins/storage/src/main/resources/storage.conf @@ -47,29 +47,6 @@ plugins.storage { default-bucket = "default-bucket-override-me" default-bucket = ${?DEFAULT_BUCKET} } - # Remote disk storage configuration - remote-disk { - # to enable remote storage - enabled = false - # the default endpoint - default-endpoint = "http://localhost:8084/v1" - # the default credentials for the endpoint - credentials { - type: "anonymous" - } - # the default digest algorithm - digest-algorithm = "SHA-256" - # the default permission required in order to download a file from a remote disk storage - default-read-permission = "resources/read" - # the default permission required in order to upload a file to a remote disk storage - default-write-permission = "files/write" - # flag to decide whether or not to show the absolute location of the files in the metadata response - show-location = true - # the default maximum allowed file size (in bytes) for uploaded files. 10 GB - default-max-file-size = 10737418240 - # Retry delay for digest computation - digest-computation-retry-delay = 5s - } # the storages event log configuration event-log = ${app.defaults.event-log} # the storages pagination config diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/RemoteStorageServiceDependency.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/RemoteStorageServiceDependency.scala deleted file mode 100644 index 25fbea5a6a..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/RemoteStorageServiceDependency.scala +++ /dev/null @@ -1,15 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage - -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ComponentDescription.ServiceDescription -import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ServiceDependency -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient - -/** - * Describes the remote storage [[ServiceDependency]] providing a way to extract the [[ServiceDescription]] from a - * remote storage calling its ''/version'' endpoint - */ -class RemoteStorageServiceDependency(remoteClient: RemoteDiskStorageClient) extends ServiceDependency { - - override def serviceDescription: IO[ServiceDescription] = remoteClient.serviceDescription -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginConfig.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginConfig.scala index 0261fa55be..0e569c0028 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginConfig.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginConfig.scala @@ -34,10 +34,7 @@ object StoragePluginConfig { .flatTap { config => IO.whenA(config.storages.storageTypeConfig.amazon.isDefined) { logger.info("Amazon S3 storage is enabled") - } >> - IO.whenA(config.storages.storageTypeConfig.remoteDisk.isDefined) { - logger.info("Remote-disk storage is enabled") - } + } } implicit final val storagePluginConfig: ConfigReader[StoragePluginConfig] = diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala index d9b6afb9aa..66abb9c07d 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala @@ -4,7 +4,6 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.Uri.Path import akka.http.scaladsl.server.Directives.concat import cats.effect.{Clock, IO} -import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ServiceDependency import ch.epfl.bluebrain.nexus.delta.kernel.utils.{ClasspathResourceLoader, UUIDF} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.config.ElasticSearchViewsConfig @@ -13,31 +12,27 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.contexts.{files => fi import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.{DelegateFilesRoutes, FilesRoutes, LinkFilesRoutes} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.schemas.{files => filesSchemaId} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FileAttributesUpdateStream, Files, FormDataExtractor, MediaTypeDetector} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{Files, FormDataExtractor, MediaTypeDetector} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.{ShowFileLocation, StorageTypeConfig} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._ +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access.{S3StorageAccess, StorageAccess} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.contexts.{storages => storageCtxId, storagesMetadata => storageMetaCtxId} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.{FileOperations, LinkFileAction} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskFileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskFileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.client.S3StorageClient import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.{S3FileOperations, S3LocationGenerator} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.{FileOperations, LinkFileAction} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.routes.StoragesRoutes import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.schemas.{storage => storagesSchemaId} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access.{RemoteStorageAccess, S3StorageAccess, StorageAccess} import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering import ch.epfl.bluebrain.nexus.delta.sdk.IndexingAction.AggregateIndexingAction import ch.epfl.bluebrain.nexus.delta.sdk._ import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck -import ch.epfl.bluebrain.nexus.delta.sdk.auth.AuthTokenProvider import ch.epfl.bluebrain.nexus.delta.sdk.deletion.ProjectDeletionTask import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives import ch.epfl.bluebrain.nexus.delta.sdk.fusion.FusionConfig -import ch.epfl.bluebrain.nexus.delta.kernel.http.HttpClient import ch.epfl.bluebrain.nexus.delta.sdk.identities.Identities import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.ServiceAccount import ch.epfl.bluebrain.nexus.delta.sdk.jws.JWSPayloadHelper @@ -49,7 +44,6 @@ import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution import ch.epfl.bluebrain.nexus.delta.sdk.sse.SseEncoder import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Supervisor import ch.epfl.bluebrain.nexus.delta.sourcing.{ScopedEventLog, Transactors} import com.typesafe.config.Config import izumi.distage.model.definition.{Id, ModuleDef} @@ -67,10 +61,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { make[ShowFileLocation].from { cfg: StorageTypeConfig => cfg.showFileLocation } - make[HttpClient].named("storage").from { (as: ActorSystem) => - HttpClient.noRetry(compression = false)(as) - } - make[S3StorageClient].fromResource { (cfg: StoragePluginConfig) => S3StorageClient.resource(cfg.storages.storageTypeConfig.amazon) } @@ -80,9 +70,7 @@ class StoragePluginModule(priority: Int) extends ModuleDef { new S3LocationGenerator(prefix) } - make[StorageAccess].from { (remoteClient: RemoteDiskStorageClient, s3Client: S3StorageClient) => - StorageAccess(RemoteStorageAccess(remoteClient), S3StorageAccess(s3Client)) - } + make[StorageAccess].from { (s3Client: S3StorageClient) => StorageAccess(S3StorageAccess(s3Client)) } make[Storages] .fromEffect { @@ -179,17 +167,13 @@ class StoragePluginModule(priority: Int) extends ModuleDef { DiskFileOperations.mk(as, uuidF) } - make[RemoteDiskFileOperations].from { (client: RemoteDiskStorageClient, uuidF: UUIDF) => - RemoteDiskFileOperations.mk(client)(uuidF) - } - make[S3FileOperations].from { (client: S3StorageClient, locationGenerator: S3LocationGenerator, uuidF: UUIDF, as: ActorSystem) => S3FileOperations.mk(client, locationGenerator)(as, uuidF) } - make[FileOperations].from { (disk: DiskFileOperations, remoteDisk: RemoteDiskFileOperations, s3: S3FileOperations) => - FileOperations.apply(disk, remoteDisk, s3) + make[FileOperations].from { (disk: DiskFileOperations, s3: S3FileOperations) => + FileOperations.apply(disk, s3) } make[MediaTypeDetector].from { (cfg: StoragePluginConfig) => @@ -226,11 +210,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { )(uuidF) } - make[FileAttributesUpdateStream].fromEffect { - (files: Files, storages: Storages, storageTypeConfig: StorageTypeConfig, supervisor: Supervisor) => - FileAttributesUpdateStream.start(files, storages, storageTypeConfig.remoteDisk, supervisor) - } - make[FilesRoutes].from { ( showLocation: ShowFileLocation, @@ -302,26 +281,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { many[ResourceShift[_, _, _]].ref[File.Shift] - make[RemoteDiskStorageClient].from { - ( - client: HttpClient @Id("storage"), - as: ActorSystem, - authTokenProvider: AuthTokenProvider, - cfg: StorageTypeConfig - ) => - RemoteDiskStorageClient(client, authTokenProvider, cfg.remoteDisk)(as) - } - - many[ServiceDependency].addSet { - ( - cfg: StorageTypeConfig, - remoteStorageClient: RemoteDiskStorageClient - ) => - cfg.remoteDisk.fold(Set.empty[ServiceDependency]) { _ => - Set(new RemoteStorageServiceDependency(remoteStorageClient)) - } - } - many[ScopeInitialization].addSet { (storages: Storages, serviceAccount: ServiceAccount, cfg: StoragePluginConfig) => Option.when(cfg.enableDefaultCreation)(StorageScopeInitialization(storages, serviceAccount, cfg.defaults)).toSet } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala deleted file mode 100644 index bb9b18d4c4..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStream.scala +++ /dev/null @@ -1,118 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files - -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.kernel.Logger -import ch.epfl.bluebrain.nexus.delta.kernel.cache.LocalCache -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileState -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.Storages -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.RemoteDiskStorageConfig -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageType} -import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef, SuccessElemStream} -import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.stream._ -import fs2.Stream -import retry.RetryPolicies.constantDelay -import retry.implicits.retrySyntaxError - -import scala.concurrent.duration.FiniteDuration - -/** - * Stream that attempts to update file attributes asynchronously for linked files in remote storages - */ -sealed trait FileAttributesUpdateStream - -object FileAttributesUpdateStream { - private val logger = Logger[FileAttributesUpdateStream] - - private val metadata: ProjectionMetadata = ProjectionMetadata("system", "file-attributes-update", None, None) - - final object Disabled extends FileAttributesUpdateStream - - final class Impl( - streamFiles: Offset => SuccessElemStream[FileState], - fetchStorage: (ProjectRef, ResourceRef.Revision) => IO[Storage], - updateAttributes: (FileState, Storage) => IO[Unit], - retryDelay: FiniteDuration - ) extends FileAttributesUpdateStream { - - def run(offset: Offset): Stream[IO, Elem[Unit]] = - streamFiles(offset).evalMap { - _.evalMapFilter { - processFile - } - } - - private[files] def processFile(file: FileState) = { - if (file.storageType == StorageType.RemoteDiskStorage && !file.attributes.digest.computed && !file.deprecated) { - for { - _ <- logger.debug(s"Attempt to update attributes for file ${file.id} in ${file.project}") - storage <- fetchStorage(file.project, file.storage) - _ <- updateAttributes(file, storage) - _ <- logger.info(s"Attributes for file ${file.id} in ${file.project} have been updated.") - } yield Some(()) - } else IO.none - }.retryingOnAllErrors( - constantDelay(retryDelay), - { - case (_: DigestNotComputed, details) => - IO.whenA(details.retriesSoFar % 10 == 0)( - logger.info( - s"Digest for file '${file.id}' in '${file.project}' is not yet completed after '${details.cumulativeDelay}'." - ) - ) - case (error, details) => - logger.error(error)( - s"Digest for file '${file.id}' in '${file.project}' ended in an error at attempt '${details.retriesSoFar}' and after '${details.cumulativeDelay}'." - ) - } - ) - } - - def apply( - files: Files, - storages: Storages, - configOpt: Option[RemoteDiskStorageConfig] - ): IO[FileAttributesUpdateStream] = - configOpt match { - case Some(config) => - LocalCache[(ProjectRef, ResourceRef.Revision), Storage]().map { storageCache => - def fetchStorage(project: ProjectRef, id: ResourceRef.Revision) = - storageCache.getOrElseUpdate( - (project, id), - storages - .fetch(IdSegmentRef(id), project) - .map(_.value) - ) - - new Impl( - files.states, - fetchStorage, - files.updateAttributes, - config.digestComputationRetryDelay - ) - } - case None => IO.pure(Disabled) - } - - def start( - files: Files, - storages: Storages, - configOpt: Option[RemoteDiskStorageConfig], - supervisor: Supervisor - ): IO[FileAttributesUpdateStream] = - apply(files, storages, configOpt).flatTap { - case enabled: Impl => - supervisor - .run( - CompiledProjection.fromStream( - metadata, - ExecutionStrategy.PersistentSingleNode, - enabled.run - ) - ) - case _: Disabled.type => - logger.debug("Remote storage is disabled, the update attributes task is disabled too.") - } -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala index df448a2952..1fb143fb85 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala @@ -4,6 +4,7 @@ import akka.http.scaladsl.model.ContentTypes.`application/octet-stream` import akka.http.scaladsl.model.Uri import cats.effect.{Clock, IO} import cats.syntax.all._ +import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource import ch.epfl.bluebrain.nexus.delta.kernel.kamon.KamonMetricComponent import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.Files._ @@ -16,12 +17,11 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.schemas.{files => fileSchema} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, Storage, StorageType} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchAttributeRejection, FetchFileRejection, SaveFileRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchFileRejection, SaveFileRejection} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{FetchStorage, Storages} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue -import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource import ch.epfl.bluebrain.nexus.delta.sdk.directives.FileResponse import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ @@ -115,59 +115,6 @@ final class Files( } yield res }.span("createFile") - /** - * Create a new file linking where the id is self generated - * - * @param storageId - * the optional storage identifier to expand as the id of the storage. When None, the default storage is used - * @param projectRef - * the project where the file will belong - * @param path - * the path where the file is located inside the storage - * @param tag - * the optional tag this file link is being created with, attached to the current revision - */ - def createLegacyLink( - storageId: Option[IdSegment], - projectRef: ProjectRef, - description: FileDescription, - path: Uri.Path, - tag: Option[UserTag] - )(implicit caller: Caller): IO[FileResource] = { - for { - pc <- fetchContext.onCreate(projectRef) - iri <- generateId(pc) - storageIri <- storageId.traverse(expandStorageIri(_, pc)) - res <- createLegacyLink(iri, projectRef, storageIri, description, path, tag) - } yield res - }.span("createLink") - - /** - * Create a new file linking it from an existing file in a storage - * - * @param id - * the file identifier to expand as the iri of the file - * @param storageId - * the optional storage identifier to expand as the id of the storage. When None, the default storage is used - * @param path - * the path where the file is located inside the storage - * @param tag - * the optional tag this file link is being created with, attached to the current revision - */ - def createLegacyLink( - id: FileId, - storageId: Option[IdSegment], - description: FileDescription, - path: Uri.Path, - tag: Option[UserTag] - )(implicit caller: Caller): IO[FileResource] = { - for { - (iri, pc) <- id.expandIri(fetchContext.onCreate) - storageIri <- storageId.traverse(expandStorageIri(_, pc)) - res <- createLegacyLink(iri, id.project, storageIri, description, path, tag) - } yield res - }.span("createLink") - /** * Grants a delegation to create the physical file on the given storage * @param id @@ -305,37 +252,6 @@ final class Files( } yield res }.span("updateLinkedFile") - /** - * Update a new file linking it from an existing file in a storage - * - * @param id - * the file identifier to expand as the iri of the file - * @param storageId - * the optional storage identifier to expand as the id of the storage. When None, the default storage is used - * @param rev - * the current revision of the file - * @param path - * the path where the file is located inside the storage - */ - def updateLegacyLink( - id: FileId, - storageId: Option[IdSegment], - description: FileDescription, - path: Uri.Path, - rev: Int, - tag: Option[UserTag] - )(implicit caller: Caller): IO[FileResource] = { - for { - (iri, pc) <- id.expandIri(fetchContext.onModify) - storageIri <- storageId.traverse(expandStorageIri(_, pc)) - _ <- test(UpdateFile(iri, id.project, testStorageRef, testStorageType, testAttributes, rev, caller.subject, tag)) - (storageRef, storage) <- fetchStorage.onWrite(storageIri, id.project) - metadata <- legacyLinkFile(storage, path, description.filename, iri) - attributes = FileAttributes.from(description.filename, description.mediaType, description.metadata, metadata) - res <- eval(UpdateFile(iri, id.project, storageRef, storage.tpe, attributes, rev, caller.subject, tag)) - } yield res - }.span("updateLink") - /** * Add a tag to an existing file * @@ -459,33 +375,6 @@ final class Files( } } - private def createLegacyLink( - iri: Iri, - project: ProjectRef, - storageIri: Option[Iri], - description: FileDescription, - path: Uri.Path, - tag: Option[UserTag] - )(implicit caller: Caller): IO[FileResource] = - for { - _ <- test(CreateFile(iri, project, testStorageRef, testStorageType, testAttributes, caller.subject, tag)) - (storageRef, storage) <- fetchStorage.onWrite(storageIri, project) - storageMetadata <- legacyLinkFile(storage, path, description.filename, iri) - fileAttributes = - FileAttributes.from(description.filename, description.mediaType, description.metadata, storageMetadata) - res <- eval(CreateFile(iri, project, storageRef, storage.tpe, fileAttributes, caller.subject, tag)) - } yield res - - private def legacyLinkFile( - storage: Storage, - path: Uri.Path, - filename: String, - fileId: Iri - ): IO[FileStorageMetadata] = - fileOperations.legacyLink(storage, path, filename).adaptError { case e: StorageFileRejection => - LinkRejection(fileId, storage.id, e) - } - private def eval(cmd: FileCommand): IO[FileResource] = FilesLog.eval(log)(cmd) private def test(cmd: FileCommand) = log.dryRun(cmd.project, cmd.id, cmd) @@ -502,23 +391,6 @@ final class Files( def states(offset: Offset): SuccessElemStream[FileState] = log.states(Scope.root, offset) - private[files] def updateAttributes(f: FileState, storage: Storage): IO[Unit] = { - val attr = f.attributes - for { - _ <- IO.raiseWhen(f.attributes.digest.computed)(DigestAlreadyComputed(f.id)) - newAttr <- fetchAttributes(storage, attr, f.id) - mediaType = attr.mediaType orElse Some(newAttr.mediaType) - command = UpdateFileAttributes(f.id, f.project, mediaType, newAttr.bytes, newAttr.digest, f.rev, f.updatedBy) - _ <- log.evaluate(f.project, f.id, command) - } yield () - } - - private def fetchAttributes(storage: Storage, attr: FileAttributes, fileId: Iri): IO[ComputedFileAttributes] = { - fileOperations - .fetchAttributes(storage, attr) - .adaptError { case e: FetchAttributeRejection => FetchAttributesRejection(fileId, storage.id, e) } - } - def cancelEvent(command: CancelEvent): IO[Unit] = log.evaluate(command.project, command.id, command).void } @@ -624,19 +496,6 @@ object Files { .map(FileUpdated(c.id, c.project, c.storage, c.storageType, c.attributes, s.rev + 1, _, c.subject, c.tag)) } - def updateAttributes(c: UpdateFileAttributes) = state match { - case None => IO.raiseError(FileNotFound(c.id, c.project)) - case Some(s) if s.rev != c.rev => IO.raiseError(IncorrectRev(c.rev, s.rev)) - case Some(s) if s.deprecated => IO.raiseError(FileIsDeprecated(c.id)) - case Some(s) if s.attributes.digest.computed => IO.raiseError(DigestAlreadyComputed(s.id)) - case Some(s) if !c.digest.computed => IO.raiseError(DigestNotComputed(s.id)) - case Some(s) => - // format: off - clock.realTimeInstant - .map(FileAttributesUpdated(c.id, c.project, s.storage, s.storageType, c.mediaType, c.bytes, c.digest, s.rev + 1, _, c.subject)) - // format: on - } - def updateCustomMetadata(c: UpdateFileCustomMetadata) = state match { case None => IO.raiseError(FileNotFound(c.id, c.project)) case Some(s) if s.rev != c.rev => IO.raiseError(IncorrectRev(c.rev, s.rev)) @@ -706,7 +565,6 @@ object Files { cmd match { case c: CreateFile => create(c) case c: UpdateFile => update(c) - case c: UpdateFileAttributes => updateAttributes(c) case c: UpdateFileCustomMetadata => updateCustomMetadata(c) case c: TagFile => tag(c) case c: DeleteFileTag => deleteTag(c) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/ComputedFileAttributes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/ComputedFileAttributes.scala deleted file mode 100644 index 45184a010b..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/ComputedFileAttributes.scala +++ /dev/null @@ -1,15 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model - -import akka.http.scaladsl.model.ContentType - -/** - * Holds metadata information related to the file computed attributes. - * - * @param mediaType - * the media type of the file - * @param bytes - * the size of the file file in bytes - * @param digest - * the digest information of the file - */ -final case class ComputedFileAttributes(mediaType: ContentType, bytes: Long, digest: Digest) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileCommand.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileCommand.scala index 0acca69f2a..cb3d537bf4 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileCommand.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileCommand.scala @@ -1,6 +1,5 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model -import akka.http.scaladsl.model.ContentType import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageWrite import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri @@ -155,34 +154,6 @@ object FileCommand { tag: Option[UserTag] ) extends FileCommand - /** - * Command to update an asynchronously computed file attributes. This command gets issued when linking a file using a - * ''RemoteDiskStorage''. Since the attributes cannot be computed synchronously, ''NotComputedDigest'' and wrong size - * are returned - * - * @param id - * the file identifier - * @param project - * the project the file belongs to - * @param mediaType - * the optional media type of the file - * @param bytes - * the size of the file file in bytes - * @param digest - * the digest information of the file - * @param subject - * the identity associated to this command - */ - final case class UpdateFileAttributes( - id: Iri, - project: ProjectRef, - mediaType: Option[ContentType], - bytes: Long, - digest: Digest, - rev: Int, - subject: Subject - ) extends FileCommand - /** * Command to tag a file * diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileEvent.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileEvent.scala index e0deae55e0..d2614d0219 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileEvent.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileEvent.scala @@ -105,7 +105,7 @@ object FileEvent { * @param project * the project the file belongs to * @param storage - * the reference to the remote storage used + * the reference to the storage used * @param storageType * the type of storage * @param attributes @@ -139,7 +139,7 @@ object FileEvent { * @param project * the project the file belongs to * @param storage - * the reference to the remote storage used + * the reference to the storage used * @param storageType * the type of storage * @param metadata diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileMetadata.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileMetadata.scala deleted file mode 100644 index 1342c50446..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileMetadata.scala +++ /dev/null @@ -1,11 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model - -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin - -case class FileMetadata(bytes: Long, digest: Digest, origin: FileAttributesOrigin) - -object FileMetadata { - def from(attributes: FileAttributes): FileMetadata = { - FileMetadata(attributes.bytes, attributes.digest, attributes.origin) - } -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala index 1ff3b9be3e..d0e6923d5a 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/model/FileRejection.scala @@ -92,15 +92,6 @@ object FileRejection { s"The digest computation for the current file '$id' is not yet complete; the file cannot be updated" ) - /** - * Signals that the digest of the file has already been computed - * - * @param id - * the file identifier - */ - final case class DigestAlreadyComputed(id: Iri) - extends FileRejection(s"The digest computation for the current file '$id' has already been completed") - /** * Rejection returned when a subject intends to perform an operation on the current file, but either provided an * incorrect revision or a concurrent update won over this attempt. @@ -168,22 +159,6 @@ object FileRejection { Some(rejection.loggedDetails) ) - /** - * Rejection returned when interacting with the storage operations bundle to fetch a file attributes from a storage - * - * @param id - * the file id - * @param storageId - * the storage id - * @param rejection - * the rejection which occurred with the storage - */ - final case class FetchAttributesRejection( - id: Iri, - storageId: Iri, - rejection: StorageFileRejection.FetchAttributeRejection - ) extends FileRejection(s"Attributes of file '$id' could not be fetched using storage '$storageId'") - /** * Rejection returned when interacting with the storage operations bundle to save a file in a storage * @@ -197,22 +172,6 @@ object FileRejection { final case class SaveRejection(id: Iri, storageId: Iri, rejection: StorageFileRejection.SaveFileRejection) extends FileRejection(s"File '$id' could not be saved using storage '$storageId'", Some(rejection.loggedDetails)) - /** - * Rejection returned when interacting with the storage operations bundle to move a file in a storage - * - * @param id - * the file id - * @param storageId - * the storage id - * @param rejection - * the rejection which occurred with the storage - */ - final case class LinkRejection(id: Iri, storageId: Iri, rejection: StorageFileRejection) - extends FileRejection( - s"File '$id' could not be linked using storage '$storageId'", - Some(rejection.loggedDetails) - ) - /** * Rejection returned when attempting to link a file without providing a filename or a path that ends with a * filename. @@ -227,18 +186,14 @@ object FileRejection { val tpe = ClassUtils.simpleName(r) val obj = JsonObject(keywords.tpe -> tpe.asJson, "reason" -> r.reason.asJson) r match { - case WrappedAkkaRejection(rejection) => rejection.asJsonObject - case SaveRejection(_, _, rejection) => - obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) - case FetchRejection(_, _, rejection) => - obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) - case FetchAttributesRejection(_, _, rejection) => + case WrappedAkkaRejection(rejection) => rejection.asJsonObject + case SaveRejection(_, _, rejection) => obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) - case LinkRejection(_, _, rejection) => + case FetchRejection(_, _, rejection) => obj.add(keywords.tpe, ClassUtils.simpleName(rejection).asJson).add("details", rejection.loggedDetails.asJson) - case IncorrectRev(provided, expected) => obj.add("provided", provided.asJson).add("expected", expected.asJson) - case _: FileNotFound => obj.add(keywords.tpe, "ResourceNotFound".asJson) - case _ => obj + case IncorrectRev(provided, expected) => obj.add("provided", provided.asJson).add("expected", expected.asJson) + case _: FileNotFound => obj.add(keywords.tpe, "ResourceNotFound".asJson) + case _ => obj } } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala index 31f6356760..d8b2789679 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutes.scala @@ -1,9 +1,8 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes +import akka.http.scaladsl.model.MediaRange import akka.http.scaladsl.model.StatusCodes.Created -import akka.http.scaladsl.model.Uri.Path import akka.http.scaladsl.model.headers.{`Content-Length`, Accept} -import akka.http.scaladsl.model.{ContentType, MediaRange} import akka.http.scaladsl.server.Directives.{extractRequestEntity, optionalHeaderValueByName, provide, reject} import akka.http.scaladsl.server._ import cats.effect.IO @@ -13,7 +12,6 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.permissions.{read => Read, write => Write} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FileUriDirectives._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FilesRoutes.LinkFileRequest.{fileDescriptionFromRequest, linkFileDecoder} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.routes.FilesRoutes._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{schemas, FileResource, Files} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragePluginExceptionHandler.handleStorageExceptions @@ -31,9 +29,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri import ch.epfl.bluebrain.nexus.delta.sdk.model.routes.Tag -import io.circe.generic.extras.Configuration -import io.circe.generic.extras.semiauto.deriveConfiguredDecoder -import io.circe.{parser, Decoder} +import io.circe.parser /** * The files routes @@ -77,18 +73,6 @@ final class FilesRoutes( concat( (pathEndOrSingleSlash & post & noRev & storageParam & indexingMode & tagParam) { (storage, mode, tag) => concat( - // Link a file without id segment - entity(as[LinkFileRequest]) { linkRequest => - emit( - Created, - fileDescriptionFromRequest(linkRequest) - .flatMap { desc => - files - .createLegacyLink(storage, project, desc, linkRequest.path, tag) - .index(mode) - } - ) - }, // Create a file without id segment uploadRequest { request => emit( @@ -107,24 +91,6 @@ final class FilesRoutes( concat( (revParam & storageParam & tagParam) { case (rev, storage, tag) => concat( - // Update a Link - entity(as[LinkFileRequest]) { linkRequest => - emit( - fileDescriptionFromRequest(linkRequest) - .flatMap { description => - files - .updateLegacyLink( - fileId, - storage, - description, - linkRequest.path, - rev, - tag - ) - .index(mode) - } - ) - }, // Update a file (requestEntityPresent & uploadRequest) { request => emit( @@ -145,18 +111,6 @@ final class FilesRoutes( }, (storageParam & tagParam) { case (storage, tag) => concat( - // Link a file with id segment - entity(as[LinkFileRequest]) { linkRequest => - emit( - Created, - fileDescriptionFromRequest(linkRequest) - .flatMap { description => - files - .createLegacyLink(fileId, storage, description, linkRequest.path, tag) - .index(mode) - } - ) - }, // Create a file with id segment uploadRequest { request => emit( @@ -312,23 +266,4 @@ object FilesRoutes { case (entity, customMetadata, contentLength) => provide(FileUploadRequest(entity, customMetadata, contentLength)) } - - final case class LinkFileRequest( - path: Path, - filename: Option[String], - mediaType: Option[ContentType], - metadata: Option[FileCustomMetadata] - ) - - object LinkFileRequest { - - implicit private val config: Configuration = Configuration.default - implicit val linkFileDecoder: Decoder[LinkFileRequest] = deriveConfiguredDecoder[LinkFileRequest] - - def fileDescriptionFromRequest(f: LinkFileRequest): IO[FileDescription] = - f.filename.orElse(f.path.lastSegment) match { - case Some(value) => IO.pure(FileDescription(value, f.mediaType, f.metadata)) - case None => IO.raiseError(InvalidFilePath) - } - } } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDecoderConfiguration.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDecoderConfiguration.scala index 26a4330240..0a5f686ea9 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDecoderConfiguration.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDecoderConfiguration.scala @@ -16,7 +16,6 @@ private[storages] object StorageDecoderConfiguration { val ctx = jsonLdContext .addAlias("DiskStorageFields", StorageType.DiskStorage.iri) .addAlias("S3StorageFields", StorageType.S3Storage.iri) - .addAlias("RemoteDiskStorageFields", StorageType.RemoteDiskStorage.iri) Configuration(ctx, "id") } } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTask.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTask.scala index d9f11e7e1b..63248013a9 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTask.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTask.scala @@ -4,7 +4,7 @@ import cats.effect.IO import ch.epfl.bluebrain.nexus.delta.kernel.Logger import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageDeletionTask.{init, logger} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.sdk.deletion.ProjectDeletionTask import ch.epfl.bluebrain.nexus.delta.sdk.deletion.model.ProjectDeletionReport import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Subject @@ -25,13 +25,9 @@ final class StorageDeletionTask(currentStorages: ProjectRef => Stream[IO, Storag private def run(project: ProjectRef) = currentStorages(project) .evalScan(init) { - case (acc, disk: DiskStorageValue) => + case (acc, disk: DiskStorageValue) => deleteRecursively(project, disk).map(acc ++ _) - case (acc, remote: RemoteDiskStorageValue) => - val message = - s"Deletion of files for remote storages is yet to be implemented. Files in folder '${remote.folder}' will remain." - logger.warn(message).as(acc ++ message) - case (acc, s3: S3StorageValue) => + case (acc, s3: S3StorageValue) => val message = s"Deletion of files for S3 storages is yet to be implemented. Files in bucket '${s3.bucket}' will remain." logger.warn(message).as(acc ++ message) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala index c260b66bae..2bd15aa6a5 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala @@ -384,8 +384,7 @@ object Storages { val allowedStorageTypes: Set[StorageType] = Set(StorageType.DiskStorage) ++ - config.amazon.as(StorageType.S3Storage) ++ - config.remoteDisk.as(StorageType.RemoteDiskStorage) + config.amazon.as(StorageType.S3Storage) def validateAndReturnValue(id: Iri, fields: StorageFields): IO[StorageValue] = for { diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesConfig.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesConfig.scala index 5e9ec45aee..a9846db5ad 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesConfig.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesConfig.scala @@ -6,8 +6,6 @@ import cats.implicits._ import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePath, DigestAlgorithm, StorageType} -import ch.epfl.bluebrain.nexus.delta.sdk.auth.Credentials -import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri import ch.epfl.bluebrain.nexus.delta.sdk.model.search.PaginationConfig import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sourcing.config.EventLogConfig @@ -16,8 +14,6 @@ import pureconfig.error.{CannotConvert, ConfigReaderFailures, ConvertFailure, Fa import pureconfig.generic.auto._ import pureconfig.{ConfigConvert, ConfigReader} -import scala.concurrent.duration.FiniteDuration - /** * Configuration for the Storages module. * @@ -55,19 +51,15 @@ object StoragesConfig { * configuration for the disk storage * @param amazon * configuration for the s3 compatible storage - * @param remoteDisk - * configuration for the remote disk storage */ final case class StorageTypeConfig( disk: DiskStorageConfig, - amazon: Option[S3StorageConfig], - remoteDisk: Option[RemoteDiskStorageConfig] + amazon: Option[S3StorageConfig] ) { def showFileLocation: ShowFileLocation = { - val diskType = if (disk.showLocation) Set(StorageType.DiskStorage) else Set() - val remoteType = if (remoteDisk.exists(_.showLocation)) Set(StorageType.RemoteDiskStorage) else Set() - val s3Type = if (amazon.exists(_.showLocation)) Set(StorageType.S3Storage) else Set() - ShowFileLocation(diskType ++ remoteType ++ s3Type) + val diskType = if (disk.showLocation) Set(StorageType.DiskStorage) else Set() + val s3Type = if (amazon.exists(_.showLocation)) Set(StorageType.S3Storage) else Set() + ShowFileLocation(diskType ++ s3Type) } } @@ -81,28 +73,22 @@ object StoragesConfig { implicit val storageTypeConfigReader: ConfigReader[StorageTypeConfig] = ConfigReader.fromCursor { cursor => for { - obj <- cursor.asObjectCursor - diskCursor <- obj.atKey("disk") - disk <- ConfigReader[DiskStorageConfig].from(diskCursor) - _ <- + obj <- cursor.asObjectCursor + diskCursor <- obj.atKey("disk") + disk <- ConfigReader[DiskStorageConfig].from(diskCursor) + _ <- Option .when(disk.allowedVolumes.contains(disk.defaultVolume))(()) .toRight( ConfigReaderFailures(ConvertFailure(WrongAllowedKeys(disk.defaultVolume), None, "disk.allowed-volumes")) ) - amazonCursor <- obj.atKeyOrUndefined("amazon").asObjectCursor amazonEnabledCursor <- amazonCursor.atKey("enabled") amazonEnabled <- amazonEnabledCursor.asBoolean amazon <- ConfigReader[S3StorageConfig].from(amazonCursor) - remoteCursor <- obj.atKeyOrUndefined("remote-disk").asObjectCursor - remoteEnabledCursor <- remoteCursor.atKey("enabled") - remoteEnabled <- remoteEnabledCursor.asBoolean - remote <- ConfigReader[RemoteDiskStorageConfig].from(remoteCursor) } yield StorageTypeConfig( disk, - Option.when(amazonEnabled)(amazon), - Option.when(remoteEnabled)(remote) + Option.when(amazonEnabled)(amazon) ) } @@ -187,35 +173,6 @@ object StoragesConfig { val prefixPath: Path = prefix.getOrElse(Path.Empty) } - /** - * Remote Disk storage configuration - * - * @param defaultEndpoint - * the default endpoint of the current storage - * @param defaultCredentials - * the default credentials for the defaul endpoint of the current storage - * @param defaultReadPermission - * the default permission required in order to download a file from a remote disk storage - * @param defaultWritePermission - * the default permission required in order to upload a file to a remote disk storage - * @param showLocation - * flag to decide whether or not to show the absolute location of the files in the metadata response - * @param defaultMaxFileSize - * the default maximum allowed file size (in bytes) for uploaded files - * @param digestComputationRetryDelay - * retry configuration for the digest computation task - */ - final case class RemoteDiskStorageConfig( - digestAlgorithm: DigestAlgorithm, - defaultEndpoint: BaseUri, - credentials: Credentials, - defaultReadPermission: Permission, - defaultWritePermission: Permission, - showLocation: Boolean, - defaultMaxFileSize: Long, - digestComputationRetryDelay: FiniteDuration - ) extends StorageTypeEntryConfig - implicit private val uriConverter: ConfigConvert[Uri] = ConfigConvert.viaString[Uri](catchReadError(Uri(_)), _.toString) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteStorageAccess.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteStorageAccess.scala deleted file mode 100644 index 63e9f91243..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteStorageAccess.scala +++ /dev/null @@ -1,28 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access - -import cats.syntax.all._ -import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient -import ch.epfl.bluebrain.nexus.delta.kernel.http.HttpClientError -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label - -trait RemoteStorageAccess { - - def checkFolderExists(folder: Label): IO[Unit] - -} - -object RemoteStorageAccess { - - def apply(client: RemoteDiskStorageClient): RemoteStorageAccess = - (folder: Label) => - client - .exists(folder) - .adaptError { case err: HttpClientError => - StorageNotAccessible( - err.details.fold(s"Folder '$folder' does not exist")(d => s"${err.reason}: $d") - ) - } - -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/StorageAccess.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/StorageAccess.scala index 6e2ed5a583..73d27ad420 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/StorageAccess.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/StorageAccess.scala @@ -2,7 +2,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access import cats.effect.IO import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, S3StorageValue} trait StorageAccess { @@ -18,10 +18,9 @@ trait StorageAccess { object StorageAccess { - def apply(remoteAccess: RemoteStorageAccess, s3Access: S3StorageAccess): StorageAccess = { - case d: DiskStorageValue => DiskStorageAccess.checkVolumeExists(d.volume) - case s: RemoteDiskStorageValue => remoteAccess.checkFolderExists(s.folder) - case s: S3StorageValue => s3Access.checkBucketExists(s.bucket) + def apply(s3Access: S3StorageAccess): StorageAccess = { + case d: DiskStorageValue => DiskStorageAccess.checkVolumeExists(d.volume) + case s: S3StorageValue => s3Access.checkBucketExists(s.bucket) } } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala index 2ff24fcd9a..4e3cc4c2d7 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala @@ -1,7 +1,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.Metadata -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{contexts, Storages} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue @@ -83,19 +83,6 @@ object Storage { override val storageValue: StorageValue = value } - /** - * A storage that stores and fetches files from a remote volume using a well-defined API - */ - final case class RemoteDiskStorage( - id: Iri, - project: ProjectRef, - value: RemoteDiskStorageValue, - source: Json - ) extends Storage { - override val default: Boolean = value.default - override val storageValue: StorageValue = value - } - /** * Storage metadata. * diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala index 8f6439ee21..d53e23223c 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala @@ -1,13 +1,12 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.configuration.semiauto.deriveConfigJsonLdDecoder import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.{Configuration => JsonLdConfiguration, JsonLdDecoder} import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import io.circe.{Encoder, Json} import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.deriveConfiguredEncoder @@ -153,49 +152,6 @@ object StorageFields { } } - /** - * Necessary values to create/update a Remote disk storage - * - * @param default - * ''true'' if this store is the project's default, ''false'' otherwise - * @param folder - * the rootFolder for this storage - * @param readPermission - * the permission required in order to download a file from this storage - * @param writePermission - * the permission required in order to upload a file to this storage - * @param maxFileSize - * the maximum allowed file size (in bytes) for uploaded files - */ - final case class RemoteDiskStorageFields( - name: Option[String], - description: Option[String], - default: Boolean, - folder: Label, - readPermission: Option[Permission], - writePermission: Option[Permission], - maxFileSize: Option[Long] - ) extends StorageFields { - - override val tpe: StorageType = StorageType.RemoteDiskStorage - - override type Value = RemoteDiskStorageValue - - override def toValue(config: StorageTypeConfig): Option[Value] = - config.remoteDisk.map { cfg => - RemoteDiskStorageValue( - name, - description, - default, - cfg.digestAlgorithm, - folder, - readPermission.getOrElse(cfg.defaultReadPermission), - writePermission.getOrElse(cfg.defaultWritePermission), - computeMaxFileSize(maxFileSize, cfg.defaultMaxFileSize) - ) - } - } - implicit private[model] val storageFieldsEncoder: Encoder.AsObject[StorageFields] = { implicit val config: Configuration = Configuration.default.withDiscriminator(keywords.tpe) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala index 337659b23f..e05ef0f2d5 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala @@ -1,7 +1,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage, S3Storage} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, S3Storage} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{schemas, StorageResource} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.sdk.model.{ResourceF, ResourceUris} @@ -27,8 +27,6 @@ import java.time.Instant * additional fields to configure the storage * @param source * the representation of the storage as posted by the subject - * @param tags - * the collection of tag aliases * @param rev * the current state revision * @param deprecated @@ -61,9 +59,8 @@ final case class StorageState( def storage: Storage = value match { - case value: DiskStorageValue => DiskStorage(id, project, value, source) - case value: S3StorageValue => S3Storage(id, project, value, source) - case value: RemoteDiskStorageValue => RemoteDiskStorage(id, project, value, source) + case value: DiskStorageValue => DiskStorage(id, project, value, source) + case value: S3StorageValue => S3Storage(id, project, value, source) } def toResource: StorageResource = diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala index 1e8eb61bf6..fc3c837024 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala @@ -2,7 +2,7 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.{deriveConfiguredCodec, deriveConfiguredEncoder} import io.circe.syntax._ @@ -149,53 +149,6 @@ object StorageValue { ) } - /** - * Resolved values to create/update a Remote disk storage - * - * @see - * [[StorageFields.RemoteDiskStorageFields]] - */ - final case class RemoteDiskStorageValue( - name: Option[String] = None, - description: Option[String] = None, - default: Boolean, - algorithm: DigestAlgorithm, - folder: Label, - readPermission: Permission, - writePermission: Permission, - maxFileSize: Long - ) extends StorageValue { - - override val tpe: StorageType = StorageType.RemoteDiskStorage - - } - - object RemoteDiskStorageValue { - - /** - * @return - * a RemoteDiskStorageValue without name or description - */ - def apply( - default: Boolean, - algorithm: DigestAlgorithm, - folder: Label, - readPermission: Permission, - writePermission: Permission, - maxFileSize: Long - ): RemoteDiskStorageValue = - RemoteDiskStorageValue( - None, - None, - default, - algorithm, - folder, - readPermission, - writePermission, - maxFileSize - ) - } - implicit private[model] val storageValueEncoder: Encoder.AsObject[StorageValue] = { implicit val config: Configuration = Configuration.default.withDiscriminator(keywords.tpe) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/FileOperations.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/FileOperations.scala index 904d209c24..4d59ad5fbc 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/FileOperations.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/FileOperations.scala @@ -1,17 +1,15 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations -import akka.http.scaladsl.model.Uri import cats.effect.IO +import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.UploadedFileInformation -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{ComputedFileAttributes, FileAttributes, FileDelegationRequest, FileStorageMetadata} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDelegationRequest, FileStorageMetadata} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage, S3Storage} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{DelegateFileOperation, FetchAttributeRejection, MoveFileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.{DiskUploadingFile, RemoteUploadingFile, S3UploadingFile} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, S3Storage} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.DelegateFileOperation +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.{DiskUploadingFile, S3UploadingFile} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskFileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskFileOperations import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.S3FileOperations -import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef import java.util.UUID @@ -25,17 +23,12 @@ trait FileOperations { def fetch(storage: Storage, attributes: FileAttributes): IO[AkkaSource] - def legacyLink(storage: Storage, sourcePath: Uri.Path, filename: String): IO[FileStorageMetadata] - - def fetchAttributes(storage: Storage, attributes: FileAttributes): IO[ComputedFileAttributes] - def delegate(storage: Storage, filename: String): IO[FileDelegationRequest.TargetLocation] } object FileOperations { def apply( diskFileOps: DiskFileOperations, - remoteDiskFileOps: RemoteDiskFileOperations, s3FileOps: S3FileOperations ): FileOperations = new FileOperations { @@ -45,29 +38,15 @@ object FileOperations { contentLength: Option[Long] ): IO[FileStorageMetadata] = IO.fromEither(UploadingFile(storage, info, contentLength)).flatMap { - case d: DiskUploadingFile => diskFileOps.save(d) - case r: RemoteUploadingFile => remoteDiskFileOps.save(r) - case s: S3UploadingFile => s3FileOps.save(s) + case d: DiskUploadingFile => diskFileOps.save(d) + case s: S3UploadingFile => s3FileOps.save(s) } override def fetch(storage: Storage, attributes: FileAttributes): IO[AkkaSource] = storage match { - case _: DiskStorage => diskFileOps.fetch(attributes.location.path) - case s: S3Storage => s3FileOps.fetch(s.value.bucket, attributes.path) - case s: RemoteDiskStorage => remoteDiskFileOps.fetch(s.value.folder, attributes.path) + case _: DiskStorage => diskFileOps.fetch(attributes.location.path) + case s: S3Storage => s3FileOps.fetch(s.value.bucket, attributes.path) } - override def legacyLink(storage: Storage, sourcePath: Uri.Path, filename: String): IO[FileStorageMetadata] = - storage match { - case storage: RemoteDiskStorage => remoteDiskFileOps.legacyLink(storage, sourcePath, filename) - case s => IO.raiseError(MoveFileRejection.UnsupportedOperation(s.tpe)) - } - - override def fetchAttributes(storage: Storage, attributes: FileAttributes): IO[ComputedFileAttributes] = - storage match { - case s: RemoteDiskStorage => remoteDiskFileOps.fetchAttributes(s.value.folder, attributes.path) - case s => IO.raiseError(FetchAttributeRejection.UnsupportedOperation(s.tpe)) - } - override def delegate(storage: Storage, filename: String): IO[FileDelegationRequest.TargetLocation] = storage match { case s: S3Storage => diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala index ea06d1f0ce..721b32aa7c 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/StorageFileRejection.scala @@ -59,12 +59,6 @@ object StorageFileRejection { extends FetchAttributeRejection( s"Fetching a file's attributes is not supported for storages of type '${tpe.iri}'" ) - - /** - * Rejection returned when a storage cannot fetch a file - */ - final case class WrappedFetchRejection(rejection: FetchFileRejection) - extends FetchAttributeRejection(rejection.loggedDetails) } /** @@ -118,51 +112,6 @@ object StorageFileRejection { extends SaveFileRejection(s"Access denied to bucket $bucket at key $key") } - /** - * Rejection returned when a storage cannot move a file - */ - sealed abstract class MoveFileRejection(loggedDetails: String) extends StorageFileRejection(loggedDetails) - - object MoveFileRejection { - - /** - * Rejection returned when a file is not found - */ - final case class FileNotFound(sourcePath: String) - extends MoveFileRejection(s"File could not be moved from expected path '$sourcePath'.") - - /** - * Rejection returned when a storage cannot move a file because it already exists on its destination location - */ - final case class ResourceAlreadyExists(destinationPath: String) - extends MoveFileRejection( - s"File cannot be moved because it already exists on its destination path '$destinationPath'." - ) - - /** - * Rejection returned when a path to be moved contains links - */ - final case class PathContainsLinks(path: String) - extends MoveFileRejection( - s"File could not be moved from path '$path' because the path contains links." - ) - - /** - * Rejection returned when a storage cannot move a file due to an unexpected reason - */ - final case class UnexpectedMoveError(sourcePath: String, destinationPath: String, details: String) - extends MoveFileRejection( - s"File cannot be moved from path '$sourcePath' to '$destinationPath' for unexpected reasons. Details '$details'" - ) - - /** - * Rejection performing this operation because the storage does not support it - */ - final case class UnsupportedOperation(tpe: StorageType) - extends MoveFileRejection(s"Moving a file is not supported for storages of type '${tpe.iri}'") - - } - sealed abstract class LinkFileRejection(loggedDetails: String) extends StorageFileRejection(loggedDetails) object LinkFileRejection { @@ -173,7 +122,7 @@ object StorageFileRejection { extends LinkFileRejection(s"An S3 path must contain at least the filename. Path was $path") final case class UnsupportedOperation(tpe: StorageType) - extends MoveFileRejection(s"Linking a file is not supported for storages of type '${tpe.iri}'") + extends LinkFileRejection(s"Linking a file is not supported for storages of type '${tpe.iri}'") } sealed abstract class DelegateFileOperation(loggedDetails: String) extends StorageFileRejection(loggedDetails) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/UploadingFile.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/UploadingFile.scala index c880142752..f8c96a176c 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/UploadingFile.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/UploadingFile.scala @@ -2,11 +2,11 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations import akka.http.scaladsl.model.{BodyPartEntity, ContentType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.UploadedFileInformation -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage, S3Storage} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, S3Storage} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePath, DigestAlgorithm, Storage} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.SaveFileRejection import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.SaveFileRejection.FileContentLengthIsMissing -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef /** * Represents a file being uploaded with one implementing by storage type @@ -42,9 +42,6 @@ object UploadingFile { entity: BodyPartEntity ) extends UploadingFile - final case class RemoteUploadingFile(project: ProjectRef, folder: Label, filename: String, entity: BodyPartEntity) - extends UploadingFile - final case class S3UploadingFile( project: ProjectRef, bucket: String, @@ -58,13 +55,11 @@ object UploadingFile { storage: Storage, info: UploadedFileInformation, contentLengthOpt: Option[Long] - ): Either[SaveFileRejection.FileContentLengthIsMissing.type, UploadingFile] = + ): Either[SaveFileRejection, UploadingFile] = storage match { - case s: DiskStorage => + case s: DiskStorage => Right(DiskUploadingFile(s.project, s.value.volume, s.value.algorithm, info.filename, info.contents)) - case s: RemoteDiskStorage => - Right(RemoteUploadingFile(s.project, s.value.folder, info.filename, info.contents)) - case s: S3Storage => + case s: S3Storage => contentLengthOpt.toRight(FileContentLengthIsMissing).map { contentLength => S3UploadingFile( s.project, diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskFileOperations.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskFileOperations.scala deleted file mode 100644 index 921ec1f5cf..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskFileOperations.scala +++ /dev/null @@ -1,85 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote - -import akka.http.scaladsl.model.Uri -import cats.effect.IO -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{ComputedFileAttributes, FileStorageMetadata} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations.intermediateFolders -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchAttributeRejection.WrappedFetchRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.RemoteUploadingFile -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskStorageFileAttributes -import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} - -import java.util.UUID - -trait RemoteDiskFileOperations { - - def legacyLink(storage: RemoteDiskStorage, sourcePath: Uri.Path, filename: String): IO[FileStorageMetadata] - - def fetch(folder: Label, path: Uri.Path): IO[AkkaSource] - - def save(uploading: RemoteUploadingFile): IO[FileStorageMetadata] - - def fetchAttributes(folder: Label, path: Uri.Path): IO[ComputedFileAttributes] -} - -object RemoteDiskFileOperations { - - def mk(client: RemoteDiskStorageClient)(implicit uuidf: UUIDF): RemoteDiskFileOperations = - new RemoteDiskFileOperations { - override def fetch(folder: Label, path: Uri.Path): IO[AkkaSource] = client.getFile(folder, path) - - override def save(uploading: RemoteUploadingFile): IO[FileStorageMetadata] = - for { - (uuid, destinationPath) <- generateRandomPath(uploading.project, uploading.filename) - attr <- client.createFile(uploading.folder, destinationPath, uploading.entity) - } yield metadataFromAttributes(attr, uuid, destinationPath, FileAttributesOrigin.Client) - - override def legacyLink( - storage: RemoteDiskStorage, - sourcePath: Uri.Path, - filename: String - ): IO[FileStorageMetadata] = - for { - (uuid, destinationPath) <- generateRandomPath(storage.project, filename) - attr <- client.moveFile(storage.value.folder, sourcePath, destinationPath) - } yield metadataFromAttributes(attr, uuid, destinationPath, FileAttributesOrigin.Storage) - - private def metadataFromAttributes( - attr: RemoteDiskStorageFileAttributes, - uuid: UUID, - destinationPath: Uri.Path, - origin: FileAttributesOrigin - ) = - FileStorageMetadata( - uuid = uuid, - bytes = attr.bytes, - digest = attr.digest, - origin = origin, - location = attr.location, - path = destinationPath - ) - - private def generateRandomPath(project: ProjectRef, filename: String) = uuidf().map { uuid => - val path = Uri.Path(intermediateFolders(project, uuid, filename)) - (uuid, path) - } - - override def fetchAttributes(folder: Label, path: Uri.Path): IO[ComputedFileAttributes] = - client - .getAttributes(folder, path) - .map { case RemoteDiskStorageFileAttributes(_, bytes, digest, mediaType) => - ComputedFileAttributes(mediaType, bytes, digest) - } - .adaptError { case e: FetchFileRejection => - WrappedFetchRejection(e) - } - } - -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala deleted file mode 100644 index 0bdf95dbb5..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteDiskStorageClient.scala +++ /dev/null @@ -1,262 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client - -import akka.actor.ActorSystem -import akka.http.scaladsl.client.RequestBuilding._ -import akka.http.scaladsl.model.BodyPartEntity -import akka.http.scaladsl.model.Multipart.FormData -import akka.http.scaladsl.model.Multipart.FormData.BodyPart -import akka.http.scaladsl.model.StatusCodes._ -import akka.http.scaladsl.model.Uri.Path -import cats.effect.IO -import cats.implicits._ -import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ComponentDescription.ServiceDescription -import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ComponentDescription.ServiceDescription.ResolvedServiceDescription -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.RemoteDiskStorageConfig -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection.UnexpectedFetchError -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.MoveFileRejection.UnexpectedMoveError -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchFileRejection, MoveFileRejection, SaveFileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskStorageFileAttributes -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords -import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource -import ch.epfl.bluebrain.nexus.delta.sdk.auth.{AuthTokenProvider, Credentials} -import ch.epfl.bluebrain.nexus.delta.kernel.circe.CirceMarshalling._ -import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.FeatureDisabled -import ch.epfl.bluebrain.nexus.delta.kernel.http.HttpClientError._ -import ch.epfl.bluebrain.nexus.delta.kernel.http.{HttpClient, HttpClientError} -import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri -import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import io.circe.generic.semiauto.deriveDecoder -import io.circe.syntax._ -import io.circe.{Decoder, Json} - -import scala.concurrent.duration._ - -/** - * The client to communicate with the remote storage service - */ -trait RemoteDiskStorageClient { - - /** - * Fetches the service description information (name and version) - */ - def serviceDescription: IO[ServiceDescription] - - /** - * Checks that the provided storage bucket exists and it is readable/writable. - * - * @param bucket - * the storage bucket name - */ - def exists(bucket: Label): IO[Unit] - - /** - * Creates a file with the provided metadata and ''source'' on the provided ''relativePath''. - * - * @param bucket - * the storage bucket name - * @param relativePath - * the relative path location - * @param entity - * the file content - */ - def createFile( - bucket: Label, - relativePath: Path, - entity: BodyPartEntity - ): IO[RemoteDiskStorageFileAttributes] - - /** - * Retrieves the file as a Source. - * - * @param bucket - * the storage bucket name - * @param relativePath - * the relative path to the file location - */ - def getFile(bucket: Label, relativePath: Path): IO[AkkaSource] - - /** - * Retrieves the file attributes. - * - * @param bucket - * the storage bucket name - * @param relativePath - * the relative path to the file location - */ - def getAttributes( - bucket: Label, - relativePath: Path - ): IO[RemoteDiskStorageFileAttributes] - - /** - * Moves a path from the provided ''sourceRelativePath'' to ''destRelativePath'' inside the nexus folder. - * - * @param bucket - * the storage bucket name - * @param sourceRelativePath - * the source relative path location - * @param destRelativePath - * the destination relative path location inside the nexus folder - */ - def moveFile( - bucket: Label, - sourceRelativePath: Path, - destRelativePath: Path - ): IO[RemoteDiskStorageFileAttributes] -} - -object RemoteDiskStorageClient { - - final class RemoteDiskStorageClientImpl( - client: HttpClient, - getAuthToken: AuthTokenProvider, - baseUri: BaseUri, - credentials: Credentials - )(implicit as: ActorSystem) - extends RemoteDiskStorageClient { - - import as.dispatcher - - private val serviceName = "remoteStorage" - - def serviceDescription: IO[ServiceDescription] = - client - .fromJsonTo[ResolvedServiceDescription](Get(baseUri.base)) - .map(_.copy(name = serviceName)) - .widen[ServiceDescription] - .timeout(1.second) - .recover(_ => ServiceDescription.unresolved(serviceName)) - - def exists(bucket: Label): IO[Unit] = { - getAuthToken(credentials).flatMap { authToken => - val endpoint = baseUri.endpoint / "buckets" / bucket.value - val req = Head(endpoint).withCredentials(authToken) - client(req) { - case resp if resp.status.isSuccess() => IO.delay(resp.discardEntityBytes()).void - } - } - } - - def createFile( - bucket: Label, - relativePath: Path, - entity: BodyPartEntity - ): IO[RemoteDiskStorageFileAttributes] = { - getAuthToken(credentials).flatMap { authToken => - val endpoint = baseUri.endpoint / "buckets" / bucket.value / "files" / relativePath - val filename = relativePath.lastSegment.getOrElse("filename") - val multipartForm = FormData(BodyPart("file", entity, Map("filename" -> filename))).toEntity() - client - .fromJsonTo[RemoteDiskStorageFileAttributes](Put(endpoint, multipartForm).withCredentials(authToken)) - .adaptError { - case HttpClientStatusError(_, `Conflict`, _, _) => - SaveFileRejection.ResourceAlreadyExists(relativePath.toString) - case error: HttpClientError => - SaveFileRejection.UnexpectedSaveError(relativePath.toString, error.asString) - } - } - } - - def getFile(bucket: Label, relativePath: Path): IO[AkkaSource] = { - getAuthToken(credentials).flatMap { authToken => - val endpoint = baseUri.endpoint / "buckets" / bucket.value / "files" / relativePath - client - .toDataBytes(Get(endpoint).withCredentials(authToken)) - .adaptError { - case error @ HttpClientStatusError(_, `NotFound`, _, _) if !bucketNotFoundType(error) => - FetchFileRejection.FileNotFound(relativePath.toString) - case error: HttpClientError => - UnexpectedFetchError(relativePath.toString, error.asString) - } - } - } - - def getAttributes( - bucket: Label, - relativePath: Path - ): IO[RemoteDiskStorageFileAttributes] = { - getAuthToken(credentials).flatMap { authToken => - val endpoint = baseUri.endpoint / "buckets" / bucket.value / "attributes" / relativePath - client.fromJsonTo[RemoteDiskStorageFileAttributes](Get(endpoint).withCredentials(authToken)).adaptError { - case error @ HttpClientStatusError(_, `NotFound`, _, _) if !bucketNotFoundType(error) => - FetchFileRejection.FileNotFound(relativePath.toString) - case error: HttpClientError => - UnexpectedFetchError(relativePath.toString, error.asString) - } - } - } - - def moveFile( - bucket: Label, - sourceRelativePath: Path, - destRelativePath: Path - ): IO[RemoteDiskStorageFileAttributes] = { - getAuthToken(credentials).flatMap { authToken => - val endpoint = baseUri.endpoint / "buckets" / bucket.value / "files" / destRelativePath - val payload = Json.obj("source" -> sourceRelativePath.toString.asJson) - client - .fromJsonTo[RemoteDiskStorageFileAttributes](Put(endpoint, payload).withCredentials(authToken)) - .adaptError { - case error @ HttpClientStatusError(_, `NotFound`, _, _) if !bucketNotFoundType(error) => - MoveFileRejection.FileNotFound(sourceRelativePath.toString) - case error @ HttpClientStatusError(_, `BadRequest`, _, _) if pathContainsLinksType(error) => - MoveFileRejection.PathContainsLinks(destRelativePath.toString) - case HttpClientStatusError(_, `Conflict`, _, _) => - MoveFileRejection.ResourceAlreadyExists(destRelativePath.toString) - case error: HttpClientError => - UnexpectedMoveError(sourceRelativePath.toString, destRelativePath.toString, error.asString) - } - } - } - - private def bucketNotFoundType(error: HttpClientError): Boolean = - error.jsonBody.fold(false)(_.hcursor.get[String](keywords.tpe).toOption.contains("BucketNotFound")) - - private def pathContainsLinksType(error: HttpClientError): Boolean = - error.jsonBody.fold(false)(_.hcursor.get[String](keywords.tpe).toOption.contains("PathContainsLinks")) - - implicit private val resolvedServiceDescriptionDecoder: Decoder[ResolvedServiceDescription] = - deriveDecoder[ResolvedServiceDescription] - - } - - final object RemoteDiskStorageClientDisabled extends RemoteDiskStorageClient { - - private val disabledError = IO.raiseError(FeatureDisabled("Remote storage is disabled")) - - override def serviceDescription: IO[ServiceDescription] = disabledError - - override def exists(bucket: Label): IO[Unit] = disabledError - - override def createFile( - bucket: Label, - relativePath: Path, - entity: BodyPartEntity - ): IO[RemoteDiskStorageFileAttributes] = disabledError - - override def getFile(bucket: Label, relativePath: Path): IO[AkkaSource] = disabledError - - override def getAttributes(bucket: Label, relativePath: Path): IO[RemoteDiskStorageFileAttributes] = disabledError - - override def moveFile( - bucket: Label, - sourceRelativePath: Path, - destRelativePath: Path - ): IO[RemoteDiskStorageFileAttributes] = disabledError - } - - def apply(client: HttpClient, authTokenProvider: AuthTokenProvider, configOpt: Option[RemoteDiskStorageConfig])( - implicit as: ActorSystem - ): RemoteDiskStorageClient = - configOpt - .map { config => - new RemoteDiskStorageClientImpl( - client, - authTokenProvider, - config.defaultEndpoint, - config.credentials - )(as) - } - .getOrElse(RemoteDiskStorageClientDisabled) -} diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskStorageFileAttributes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskStorageFileAttributes.scala deleted file mode 100644 index af1bab49bd..0000000000 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/model/RemoteDiskStorageFileAttributes.scala +++ /dev/null @@ -1,58 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model - -import akka.http.scaladsl.model.{ContentType, Uri} -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.{ComputedDigest, NotComputedDigest} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm -import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import io.circe.{Decoder, DecodingFailure} -import io.circe.generic.extras.Configuration -import io.circe.generic.extras.semiauto._ - -// $COVERAGE-OFF$ -/** - * Holds all the metadata information related to the file. - * - * @param location - * the file location - * @param bytes - * the size of the file file in bytes - * @param digest - * the digest information of the file - * @param mediaType - * the media type of the file - */ -final case class RemoteDiskStorageFileAttributes( - location: Uri, - bytes: Long, - digest: Digest, - mediaType: ContentType -) - -object RemoteDiskStorageFileAttributes { - - implicit private val config: Configuration = - Configuration.default - .copy(transformMemberNames = { - case "@context" => "@context" - case key => s"_$key" - }) - - implicit val fileAttrDecoder: Decoder[RemoteDiskStorageFileAttributes] = { - - implicit val computedDigestDecoder: Decoder[Digest] = Decoder.instance { hc => - (hc.get[String]("_value"), hc.get[String]("_algorithm")).mapN { - case ("", "") => - Right(NotComputedDigest) - case (value, algorithm) => - DigestAlgorithm(algorithm) - .map(ComputedDigest(_, value)) - .toRight(DecodingFailure(s"wrong DigestAlgorithm '$algorithm'", hc.history)) - }.flatten - } - - deriveConfiguredDecoder[RemoteDiskStorageFileAttributes] - } -} -// $COVERAGE-ON$ diff --git a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json b/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json deleted file mode 100644 index ef0f949b6b..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "id" : "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "project" : "myorg/myproj", - "value" : { - "default" : true, - "name": "remoteName", - "description": "remoteDescription", - "algorithm": "SHA-256", - "folder" : "myfolder", - "readPermission" : "remote/read", - "writePermission" : "remote/write", - "maxFileSize" : 52, - "@type" : "RemoteDiskStorageValue" - }, - "source" : { - "@type" : "RemoteDiskStorage", - "name": "remoteName", - "description": "remoteDescription", - "default" : true, - "folder" : "myfolder", - "readPermission" : "remote/read", - "writePermission" : "remote/write", - "maxFileSize" : 52 - }, - "rev" : 1, - "instant" : "1970-01-01T00:00:00Z", - "subject" : { - "subject" : "username", - "realm" : "myrealm", - "@type" : "User" - }, - "@type" : "StorageCreated" -} diff --git a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json b/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json deleted file mode 100644 index 885e2919f0..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "id" : "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "project" : "myorg/myproj", - "value" : { - "default" : true, - "name": "remoteName", - "description": "remoteDescription", - "algorithm": "SHA-256", - "folder" : "myfolder2", - "readPermission" : "remote/read", - "writePermission" : "remote/write", - "maxFileSize" : 42, - "@type" : "RemoteDiskStorageValue" - }, - "source" : { - "@type" : "RemoteDiskStorage", - "name": "remoteName", - "description": "remoteDescription", - "default" : true, - "folder" : "myfolder", - "readPermission" : "remote/read", - "writePermission" : "remote/write", - "maxFileSize" : 52 - }, - "rev" : 2, - "instant" : "1970-01-01T00:00:00Z", - "subject" : { - "subject" : "username", - "realm" : "myrealm", - "@type" : "User" - }, - "@type" : "StorageUpdated" -} diff --git a/delta/plugins/storage/src/test/resources/storages/remote-storage-expanded.json b/delta/plugins/storage/src/test/resources/storages/remote-storage-expanded.json deleted file mode 100644 index 17136a304d..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/remote-storage-expanded.json +++ /dev/null @@ -1,49 +0,0 @@ -[ - { - "@id": "https://bluebrain.github.io/nexus/vocabulary/remote", - "@type": [ - "https://bluebrain.github.io/nexus/vocabulary/Storage", - "https://bluebrain.github.io/nexus/vocabulary/RemoteDiskStorage" - ], - "http://schema.org/name": [ - { - "@value": "remoteName" - } - ], - "http://schema.org/description": [ - { - "@value": "remoteDescription" - } - ], - "https://bluebrain.github.io/nexus/vocabulary/default": [ - { - "@value": true - } - ], - "https://bluebrain.github.io/nexus/vocabulary/algorithm": [ - { - "@value": "SHA-256" - } - ], - "https://bluebrain.github.io/nexus/vocabulary/folder": [ - { - "@value": "myfolder" - } - ], - "https://bluebrain.github.io/nexus/vocabulary/maxFileSize": [ - { - "@value": 52 - } - ], - "https://bluebrain.github.io/nexus/vocabulary/readPermission": [ - { - "@value": "remote/read" - } - ], - "https://bluebrain.github.io/nexus/vocabulary/writePermission": [ - { - "@value": "remote/write" - } - ] - } -] \ No newline at end of file diff --git a/delta/plugins/storage/src/test/resources/storages/remote-storage-fetched.json b/delta/plugins/storage/src/test/resources/storages/remote-storage-fetched.json deleted file mode 100644 index 1e9c1d902f..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/remote-storage-fetched.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "@context" : [ - "https://bluebrain.github.io/nexus/contexts/storages.json", - "https://bluebrain.github.io/nexus/contexts/metadata.json" - ], - "@id" : "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "@type" : [ - "Storage", - "RemoteDiskStorage" - ], - "_constrainedBy" : "https://bluebrain.github.io/nexus/schemas/storages.json", - "_createdAt" : "1970-01-01T00:00:00Z", - "_createdBy" : "http://localhost/v1/realms/wonderland/users/writer", - "_deprecated" : false, - "_incoming" : "{{self}}/incoming", - "_outgoing" : "{{self}}/outgoing", - "_project" : "http://localhost/v1/projects/myorg/myproject", - "_rev" : 1, - "_self" : "{{self}}", - "_updatedAt" : "1970-01-01T00:00:00Z", - "_updatedBy" : "http://localhost/v1/realms/wonderland/users/writer", - "_algorithm" : "SHA-256", - "default" : true, - "name": "remoteName", - "description": "remoteDescription", - "folder" : "myfolder", - "maxFileSize" : 52, - "readPermission" : "remote/read", - "writePermission" : "remote/write" -} diff --git a/delta/plugins/storage/src/test/resources/storages/remote-storage.json b/delta/plugins/storage/src/test/resources/storages/remote-storage.json deleted file mode 100644 index adbeb65b13..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/remote-storage.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "@context" : "https://bluebrain.github.io/nexus/contexts/storages.json", - "@id" : "https://bluebrain.github.io/nexus/vocabulary/remote", - "@type": "RemoteDiskStorage", - "default": true, - "_algorithm" : "SHA-256", - "name": "remoteName", - "description": "remoteDescription", - "folder": "myfolder", - "readPermission": "remote/read", - "writePermission": "remote/write", - "maxFileSize": 52 -} \ No newline at end of file diff --git a/delta/plugins/storage/src/test/resources/storages/s3-storage-fetched.json b/delta/plugins/storage/src/test/resources/storages/s3-storage-fetched.json index 4bddd08a14..1a2dfa5299 100644 --- a/delta/plugins/storage/src/test/resources/storages/s3-storage-fetched.json +++ b/delta/plugins/storage/src/test/resources/storages/s3-storage-fetched.json @@ -15,7 +15,7 @@ "_incoming" : "{{self}}/incoming", "_outgoing" : "{{self}}/outgoing", "_project" : "http://localhost/v1/projects/myorg/myproject", - "_rev" : 5, + "_rev" : 4, "_self" : "{{self}}", "_updatedAt" : "1970-01-01T00:00:00Z", "_updatedBy" : "http://localhost/v1/realms/wonderland/users/writer", diff --git a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json b/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json deleted file mode 100644 index c3387bc6f2..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "@context": [ - "https://bluebrain.github.io/nexus/contexts/metadata.json", - "https://bluebrain.github.io/nexus/contexts/storages.json" - ], - "@type": "StorageCreated", - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/storages.json", - "_instant": "1970-01-01T00:00:00Z", - "_project": "http://localhost/v1/projects/myorg/myproj", - "_resourceId": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "_rev": 1, - "_source": { - "@type": "RemoteDiskStorage", - "name": "remoteName", - "description": "remoteDescription", - "default": true, - "folder": "myfolder", - "maxFileSize": 52, - "readPermission": "remote/read", - "writePermission": "remote/write" - }, - "_storageId": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "_subject": "http://localhost/v1/realms/myrealm/users/username", - "_types": [ - "https://bluebrain.github.io/nexus/vocabulary/Storage", - "https://bluebrain.github.io/nexus/vocabulary/RemoteDiskStorage" - ] -} \ No newline at end of file diff --git a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json b/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json deleted file mode 100644 index 8b2092fc90..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "@context": [ - "https://bluebrain.github.io/nexus/contexts/metadata.json", - "https://bluebrain.github.io/nexus/contexts/storages.json" - ], - "@type": "StorageUpdated", - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/storages.json", - "_instant": "1970-01-01T00:00:00Z", - "_project": "http://localhost/v1/projects/myorg/myproj", - "_resourceId": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "_rev": 2, - "_source": { - "@type": "RemoteDiskStorage", - "name": "remoteName", - "description": "remoteDescription", - "default": true, - "folder": "myfolder", - "maxFileSize": 52, - "readPermission": "remote/read", - "writePermission": "remote/write" - }, - "_storageId": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "_subject": "http://localhost/v1/realms/myrealm/users/username", - "_types": [ - "https://bluebrain.github.io/nexus/vocabulary/Storage", - "https://bluebrain.github.io/nexus/vocabulary/RemoteDiskStorage" - ] -} \ No newline at end of file diff --git a/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json b/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json deleted file mode 100644 index ea8be39913..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "project": "myorg/myproj", - "id": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "types": [ - "https://bluebrain.github.io/nexus/vocabulary/Storage", - "https://bluebrain.github.io/nexus/vocabulary/RemoteDiskStorage" - ], - "rev": 1, - "deprecated": false, - "value": { - "default": true, - "name": "remoteName", - "description": "remoteDescription", - "algorithm": "SHA-256", - "folder": "myfolder", - "readPermission": "remote/read", - "writePermission": "remote/write", - "maxFileSize": 52, - "@type": "RemoteDiskStorageValue" - }, - "source": { - "@type": "RemoteDiskStorage", - "name": "remoteName", - "description": "remoteDescription", - "default": true, - "folder": "myfolder", - "readPermission": "remote/read", - "writePermission": "remote/write", - "maxFileSize": 52 - }, - "createdAt": "1970-01-01T00:00:00Z", - "createdBy": { - "subject": "username", - "realm": "myrealm", - "@type": "User" - }, - "updatedAt": "1970-01-01T00:00:00Z", - "updatedBy": { - "subject": "username", - "realm": "myrealm", - "@type": "User" - } -} \ No newline at end of file diff --git a/delta/plugins/storage/src/test/resources/storages/storages-list-not-deprecated.json b/delta/plugins/storage/src/test/resources/storages/storages-list-not-deprecated.json deleted file mode 100644 index 7c5a96c9fa..0000000000 --- a/delta/plugins/storage/src/test/resources/storages/storages-list-not-deprecated.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "@context": [ - "https://bluebrain.github.io/nexus/contexts/metadata.json", - "https://bluebrain.github.io/nexus/contexts/search.json", - "https://bluebrain.github.io/nexus/contexts/storages.json" - ], - "_total": 1, - "_results": [ - { - "@id": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "@type": [ - "Storage", - "RemoteDiskStorage" - ], - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/storages.json", - "_createdAt": "1970-01-01T00:00:00Z", - "_createdBy": "http://localhost/v1/realms/wonderland/users/alice", - "_deprecated": false, - "_incoming": "http://localhost/v1/storages/myorg/myproject/remote-disk-storage/incoming", - "_outgoing": "http://localhost/v1/storages/myorg/myproject/remote-disk-storage/outgoing", - "_project": "http://localhost/v1/projects/myorg/myproject", - "_rev": 3, - "_self": "http://localhost/v1/storages/myorg/myproject/remote-disk-storage", - "_updatedAt": "1970-01-01T00:00:00Z", - "_updatedBy": "http://localhost/v1/anonymous", - "default": false, - "name": "remoteName", - "description": "remoteDescription", - "_algorithm": "SHA-256", - "endpoint": "http://localhost", - "folder": "myfolder", - "maxFileSize": 52, - "readPermission": "remote/read", - "writePermission": "remote/write" - } - ] -} diff --git a/delta/plugins/storage/src/test/resources/storages/storages-list.json b/delta/plugins/storage/src/test/resources/storages/storages-list.json index 70bd8ea5e6..1c528c39c2 100644 --- a/delta/plugins/storage/src/test/resources/storages/storages-list.json +++ b/delta/plugins/storage/src/test/resources/storages/storages-list.json @@ -4,7 +4,7 @@ "https://bluebrain.github.io/nexus/contexts/search.json", "https://bluebrain.github.io/nexus/contexts/storages.json" ], - "_total": 2, + "_total": 1, "_results": [ { "@id": "https://bluebrain.github.io/nexus/vocabulary/s3-storage", @@ -33,33 +33,6 @@ "readPermission": "s3/read", "region" : "eu-west-1", "writePermission": "s3/write" - }, - { - "@id": "https://bluebrain.github.io/nexus/vocabulary/remote-disk-storage", - "@type": [ - "Storage", - "RemoteDiskStorage" - ], - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/storages.json", - "_createdAt": "1970-01-01T00:00:00Z", - "_createdBy": "http://localhost/v1/realms/wonderland/users/alice", - "_deprecated": false, - "_incoming": "http://localhost/v1/storages/myorg/myproject/remote-disk-storage/incoming", - "_outgoing": "http://localhost/v1/storages/myorg/myproject/remote-disk-storage/outgoing", - "_project": "http://localhost/v1/projects/myorg/myproject", - "_rev": 3, - "_self": "http://localhost/v1/storages/myorg/myproject/remote-disk-storage", - "_updatedAt": "1970-01-01T00:00:00Z", - "_updatedBy": "http://localhost/v1/anonymous", - "_algorithm": "SHA-256", - "default": false, - "name": "remoteName", - "description": "remoteDescription", - "endpoint": "http://localhost", - "folder": "myfolder", - "maxFileSize": 52, - "readPermission": "remote/read", - "writePermission": "remote/write" } ] } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStreamSuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStreamSuite.scala deleted file mode 100644 index db783221c1..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileAttributesUpdateStreamSuite.scala +++ /dev/null @@ -1,95 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files - -import akka.http.scaladsl.model.{ContentTypes, Uri} -import cats.effect.IO -import cats.effect.kernel.Ref -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.{ComputedDigest, NotComputedDigest} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.DigestNotComputed -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileState} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, StorageType} -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, ResourceRef} -import ch.epfl.bluebrain.nexus.testkit.mu.NexusSuite -import fs2.Stream - -import java.util.UUID -import scala.concurrent.duration._ - -class FileAttributesUpdateStreamSuite extends NexusSuite with StorageFixtures { - - private val project = ProjectRef.unsafe("org", "projg") - private val id = nxv + "file" - - private val storage = RemoteDiskStorage(nxv + "remote", project, remoteVal, json"""{"disk": "value"}""") - - private val storageRef = ResourceRef.Revision(storage.id, 1) - - private val mediaType = Some(ContentTypes.`text/plain(UTF-8)`) - private val attributes = FileAttributes( - UUID.randomUUID(), - location = "http://localhost/my/file.txt", - path = Uri.Path("my/file.txt"), - filename = "myfile.txt", - mediaType = mediaType, - keywords = Map(Label.unsafe("key") -> "value"), - None, - None, - bytes = 10, - NotComputedDigest, - Client - ) - private val validFile = - FileGen.state(id, project, storageRef, attributes, storageType = StorageType.RemoteDiskStorage) - - private def updateStream(updateAttributes: IO[Unit]) = new FileAttributesUpdateStream.Impl( - _ => Stream.empty, - (_, _) => IO.pure(storage), - (_, _) => updateAttributes, - 50.millis - ) - - private def assertProcess(file: FileState, expectedAttempts: Int) = - for { - ref <- Ref.of[IO, Int](0) - _ <- updateStream(ref.update(_ + 1)).processFile(file) - _ <- ref.get.assertEquals(expectedAttempts) - } yield () - - private def assertSuccess(file: FileState) = assertProcess(file, 1) - - private def assertSkipped(file: FileState) = assertProcess(file, 0) - - test("A valid file is successfully processed.") { - assertSuccess(validFile) - } - - test("A deprecated file is skipped.") { - val deprecated = validFile.copy(deprecated = true) - assertSkipped(deprecated) - } - - test("A local file is skipped.") { - val localFile = validFile.copy(storageType = StorageType.DiskStorage) - assertSkipped(localFile) - } - - test("A file with a computed digest is skipped.") { - val newAttributes = attributes.copy(digest = ComputedDigest(DigestAlgorithm.default, "something")) - val alreadyProcessed = validFile.copy(attributes = newAttributes) - assertSkipped(alreadyProcessed) - } - - test("A file is processed again when the digest is not yet computed.") { - for { - ref <- Ref.of[IO, Int](0) - updateAttributes = ref.updateAndGet(_ + 1).flatMap { attempt => IO.raiseWhen(attempt < 2)(DigestNotComputed(id)) } - _ <- updateStream(updateAttributes).processFile(validFile) - _ <- ref.get.assertEquals(2) - } yield () - } - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala index cd5f98202b..bb00659a7b 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FileFixtures.scala @@ -7,6 +7,7 @@ import cats.effect.{IO, Ref} import ch.epfl.bluebrain.nexus.delta.kernel.utils.{UUIDF, UrlUtils} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileCustomMetadata} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.AbsolutePath import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings @@ -47,6 +48,7 @@ trait FileFixtures extends Generators { } yield t).unsafeRunSync() def attributes( + path: AbsolutePath, filename: String = "file.txt", size: Long = 12, id: UUID = uuid, diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala index f7335a843b..829d07fcf5 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala @@ -1,22 +1,18 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.files import akka.actor.ActorSystem +import akka.http.scaladsl.model.ContentType import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` -import akka.http.scaladsl.model.{ContentType, Uri} import akka.testkit.TestKit import cats.effect.IO import ch.epfl.bluebrain.nexus.delta.kernel.http.MediaTypeDetectorConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.RemoteContextResolutionFixture import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.mocks.FileOperationsMock -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.NotComputedDigest -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Storage import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotFound import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType.{RemoteDiskStorage => RemoteStorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.{AkkaSourceHelpers, FileOperations, LinkFileAction} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{FetchStorage, StorageFixtures, Storages, StoragesConfig} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri @@ -28,6 +24,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.directives.FileResponse import ch.epfl.bluebrain.nexus.delta.sdk.error.ServiceError.AuthorizationFailed import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.{Caller, ServiceAccount} import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment.IriSegment import ch.epfl.bluebrain.nexus.delta.sdk.model._ import ch.epfl.bluebrain.nexus.delta.sdk.permissions.Permissions import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission @@ -35,19 +32,17 @@ import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContextDummy import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ProjectRejection.{ProjectIsDeprecated, ProjectNotFound} import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Authenticated, Group, User} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ResourceRef.Latest import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, ResourceRef} import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec +import org.scalatest.Assertion import org.scalatest.concurrent.Eventually -import org.scalatest.{Assertion, DoNotDiscover} import java.net.URLDecoder import java.util.UUID -@DoNotDiscover -class FilesSpec(fixture: RemoteStorageClientFixtures) +class FilesSpec extends TestKit(ActorSystem("FilesSpec")) with CatsEffectSpec with DoobieScalaTestFixture @@ -82,8 +77,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) FileDescription(filename, None, Some(FileCustomMetadata(Some(name), Some(description), Some(keywords)))) "The Files operations bundle" when { - implicit val caller: Caller = Caller(bob, Set(bob, Group("mygroup", realm), Authenticated(realm))) - lazy val remoteDiskStorageClient = fixture.init + implicit val caller: Caller = Caller(bob, Set(bob, Group("mygroup", realm), Authenticated(realm))) val tag = UserTag.unsafe("tag") val otherRead = Permission.unsafe("other/read") @@ -96,13 +90,14 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) otherWrite ) - val remoteIdIri = nxv + "remote" - val remoteId: IdSegment = remoteIdIri - val remoteRev = ResourceRef.Revision(remoteIdIri, 1) + val defaultStorageIri = nxv + "default" + val defaultStorageId = IriSegment(defaultStorageIri) + val defaultStorageRef = ResourceRef.Revision(defaultStorageIri, 1) + val defaultStorageFolder = FileGen.mkTempDir("default") - val diskIdIri = nxv + "disk" - val diskId: IdSegment = nxv + "disk" - val diskRev = ResourceRef.Revision(diskIdIri, 1) + val customStorageIri = nxv + "custom" + val customStorageId = IriSegment(customStorageIri) + val customStorageFolder = FileGen.mkTempDir("default") val storageIri = nxv + "other-storage" val storage: IdSegment = nxv + "other-storage" @@ -121,8 +116,10 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val maxFileSize = 300L val cfg = config.copy( - disk = config.disk.copy(defaultMaxFileSize = maxFileSize, allowedVolumes = config.disk.allowedVolumes + path), - remoteDisk = Some(config.remoteDisk.value.copy(defaultMaxFileSize = maxFileSize)) + disk = config.disk.copy( + defaultMaxFileSize = maxFileSize, + allowedVolumes = config.disk.allowedVolumes ++ Set(defaultStorageFolder, customStorageFolder) + ) ) lazy val storages: Storages = Storages( @@ -137,7 +134,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) ).accepted lazy val fetchStorage = FetchStorage(storages, aclCheck) - lazy val fileOps: FileOperations = FileOperationsMock.forDiskAndRemoteDisk(remoteDiskStorageClient) + lazy val fileOps: FileOperations = FileOperationsMock.forDisk val mediaTypeDetector = new MediaTypeDetector(MediaTypeDetectorConfig.Empty) val dataExtractor = FormDataExtractor(mediaTypeDetector)(system) @@ -160,30 +157,22 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) ): FileResource = FileGen.resourceFor(id, project, storage, attributes, storageType, rev, deprecated, tags, bob, bob) - def updateAttributes(file: Iri) = { - val aliceCaller = Caller(alice, Set(alice, Group("mygroup", realm), Authenticated(realm))) - for { - file <- files.fetchState(Latest(file), projectRef) - storage <- fetchStorage.onRead(file.storage, projectRef)(aliceCaller) - _ <- files.updateAttributes(file, storage) - } yield () - } - "creating a file" should { "create storages for files" in { - val payload = diskFieldsJson deepMerge json"""{"maxFileSize": 300, "volume": "$path"}""" - storages.create(diskId, projectRef, payload).accepted + val defaultStoragePayload = + diskFieldsJson deepMerge json"""{"maxFileSize": 300, "volume": "$defaultStorageFolder"}""" + storages.create(defaultStorageId, projectRef, defaultStoragePayload).accepted - val payload2 = - json"""{"@type": "RemoteDiskStorage", "endpoint": "${fixture.hostConfig.endpoint}", "folder": "${RemoteStorageClientFixtures.BucketName}", "readPermission": "$otherRead", "writePermission": "$otherWrite", "maxFileSize": 300, "default": false}""" - storages.create(remoteId, projectRef, payload2).accepted + val customStoragePayload = diskFieldsJson deepMerge + json"""{"maxFileSize": 300, "volume": "$customStorageFolder", "readPermission": "$otherRead", "writePermission": "$otherWrite", "default": false}""" + storages.create(customStorageId, projectRef, customStoragePayload).accepted } "succeed with the id passed" in { val request = FileUploadRequest.from(entity("myfile.txt")) - val expected = mkResource(file1, projectRef, diskRev, attributes("myfile.txt")) - val actual = files.create(fileId("file1"), Some(diskId), request, None).accepted + val expected = mkResource(file1, projectRef, defaultStorageRef, attributes(defaultStorageFolder, "myfile.txt")) + val actual = files.create(fileId("file1"), Some(defaultStorageId), request, None).accepted actual shouldEqual expected } @@ -192,7 +181,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val id = fileId(genString()) val request = FileUploadRequest(entity(genString()), Some(metadata), None) - files.create(id, Some(diskId), request, None).accepted + files.create(id, Some(defaultStorageId), request, None).accepted assertCorrectCustomMetadata(id, metadata) } @@ -200,7 +189,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val specialFileName = "-._~:?#[ ]@!$&'()*,;=" val request = FileUploadRequest.from(randomEntity(specialFileName, 1)) - files.create(fileId("specialFile"), Some(diskId), request, None).accepted + files.create(fileId("specialFile"), Some(defaultStorageId), request, None).accepted val fetched = files.fetch(fileId("specialFile")).accepted val decodedFilenameFromLocation = @@ -212,9 +201,9 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "succeed and tag with the id passed" in { withUUIDF(uuid2) { val request = FileUploadRequest.from(entity("fileTagged.txt")) - val file = files.create(fileId("fileTagged"), Some(diskId), request, Some(tag)).accepted - val attr = attributes("fileTagged.txt", id = uuid2) - val expectedData = mkResource(fileTagged, projectRef, diskRev, attr, tags = Tags(tag -> 1)) + val file = files.create(fileId("fileTagged"), Some(defaultStorageId), request, Some(tag)).accepted + val attr = attributes(defaultStorageFolder, "fileTagged.txt", id = uuid2) + val expectedData = mkResource(fileTagged, projectRef, defaultStorageRef, attr, tags = Tags(tag -> 1)) val fileByTag = files.fetch(FileId("fileTagged", tag, projectRef)).accepted file shouldEqual expectedData @@ -223,7 +212,8 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) } "succeed with randomly generated id" in { - val expected = mkResource(generatedId, projectRef, diskRev, attributes("myfile2.txt")) + val expected = + mkResource(generatedId, projectRef, defaultStorageRef, attributes(defaultStorageFolder, "myfile2.txt")) val request = FileUploadRequest.from(entity("myfile2.txt")) val actual = files.create(None, projectRef, request, None).accepted val fetched = files.fetch(FileId(actual.id, projectRef)).accepted @@ -243,8 +233,8 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "succeed and tag with randomly generated id" in { withUUIDF(uuid2) { - val attr = attributes("fileTagged2.txt", id = uuid2) - val expected = mkResource(generatedId2, projectRef, diskRev, attr, tags = Tags(tag -> 1)) + val attr = attributes(defaultStorageFolder, "fileTagged2.txt", id = uuid2) + val expected = mkResource(generatedId2, projectRef, defaultStorageRef, attr, tags = Tags(tag -> 1)) val request = FileUploadRequest.from(entity("fileTagged2.txt")) val file = files.create(None, projectRef, request, Some(tag)).accepted val fileByTag = files.fetch(FileId(generatedId2, tag, projectRef)).accepted @@ -256,7 +246,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if no write permissions" in { val request = FileUploadRequest.from(entity()) - files.create(fileId("file2"), Some(remoteId), request, None).rejectedWith[AuthorizationFailed] + files.create(fileId("file2"), Some(customStorageId), request, None).rejectedWith[AuthorizationFailed] } "reject if file id already exists" in { @@ -269,7 +259,9 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if the file exceeds max file size for the storage" in { val id = fileId("file-too-large") val request = FileUploadRequest.from(randomEntity("large_file", (maxFileSize + 1).toInt)) - files.create(id, Some(remoteId), request, None)(aliceCaller).rejected shouldEqual FileTooLarge(maxFileSize) + files.create(id, Some(customStorageId), request, None)(aliceCaller).rejected shouldEqual FileTooLarge( + maxFileSize + ) } "reject if storage does not exist" in { @@ -286,79 +278,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if project is deprecated" in { val request = FileUploadRequest.from(entity()) - files.create(Some(diskId), deprecatedProject.ref, request, None).rejectedWith[ProjectIsDeprecated] - } - } - - "linking a file" should { - - "reject if no write permissions" in { - files - .createLegacyLink(fileId("file2"), Some(remoteId), description("myfile.txt"), Uri.Path.Empty, None) - .rejectedWith[AuthorizationFailed] - } - - "succeed and tag with the id passed" in { - aclCheck.append(AclAddress.Root, bob -> Set(otherWrite)).accepted - val path = Uri.Path("my/file-3.txt") - val tempAttr = attributes("myfile.txt").copy(digest = NotComputedDigest) - val attr = - tempAttr.copy( - location = Uri(s"file:///app/nexustest/nexus/${tempAttr.path}"), - origin = Storage, - mediaType = None - ) - val expected = - mkResource(file2, projectRef, remoteRev, attr, storageType = RemoteStorageType, tags = Tags(tag -> 1)) - - val result = files - .createLegacyLink(fileId("file2"), Some(remoteId), description("myfile.txt"), path, Some(tag)) - .accepted - val fileByTag = files.fetch(FileId("file2", tag, projectRef)).accepted - - result shouldEqual expected - fileByTag.value.tags.tags should contain(tag) - } - - "succeed with custom user provided metadata" in { - val (name, description, keywords) = (genString(), genString(), genKeywords()) - val fileDescription = descriptionWithMetadata("file-5.txt", name, description, keywords) - - val id = fileId(genString()) - val path = Uri.Path(s"my/file-5.txt") - - files.createLegacyLink(id, Some(remoteId), fileDescription, path, None).accepted - val fetchedFile = files.fetch(id).accepted - - fetchedFile.value.attributes.name should contain(name) - fetchedFile.value.attributes.description should contain(description) - fetchedFile.value.attributes.keywords shouldEqual keywords - } - - "reject if file id already exists" in { - files - .createLegacyLink(fileId("file2"), Some(remoteId), description("myfile.txt"), Uri.Path.Empty, None) - .rejected shouldEqual - ResourceAlreadyExists(file2, projectRef) - } - - "reject if storage does not exist" in { - files - .createLegacyLink(fileId("file3"), Some(storage), description("myfile.txt"), Uri.Path.Empty, None) - .rejected shouldEqual StorageNotFound(storageIri, projectRef) - } - - "reject if project does not exist" in { - val projectRef = ProjectRef(org, Label.unsafe("other")) - files - .createLegacyLink(None, projectRef, description("myfile.txt"), Uri.Path.Empty, None) - .rejectedWith[ProjectNotFound] - } - - "reject if project is deprecated" in { - files - .createLegacyLink(Some(remoteId), deprecatedProject.ref, description("myfile.txt"), Uri.Path.Empty, None) - .rejectedWith[ProjectIsDeprecated] + files.create(Some(defaultStorageId), deprecatedProject.ref, request, None).rejectedWith[ProjectIsDeprecated] } } @@ -367,7 +287,15 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "succeed" in { val request = FileUploadRequest.from(entity()) files.update(fileId("file1"), None, 1, request, None).accepted shouldEqual - FileGen.resourceFor(file1, projectRef, diskRev, attributes(), rev = 2, createdBy = bob, updatedBy = bob) + FileGen.resourceFor( + file1, + projectRef, + defaultStorageRef, + attributes(defaultStorageFolder), + rev = 2, + createdBy = bob, + updatedBy = bob + ) } "succeed with custom metadata" in { @@ -376,7 +304,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val createRequest = FileUploadRequest.from(entity(genString())) val updateRequest = FileUploadRequest(randomEntity(genString(), 10), Some(metadata), None) - files.create(id, Some(diskId), createRequest, None).accepted + files.create(id, Some(defaultStorageId), createRequest, None).accepted files.update(id, None, 1, updateRequest, None).accepted assertCorrectCustomMetadata(id, metadata) @@ -416,7 +344,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val metadata = genCustomMetadata() val request = FileUploadRequest.from(entity(genString())) - files.create(id, Some(diskId), request, None).accepted + files.create(id, Some(defaultStorageId), request, None).accepted files.updateMetadata(id, 1, metadata, None).accepted files.fetch(id).accepted.rev shouldEqual 2 @@ -428,7 +356,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val metadata = genCustomMetadata() val request = FileUploadRequest.from(entity(genString())) - files.create(id, Some(diskId), request, None).accepted + files.create(id, Some(defaultStorageId), request, None).accepted files.updateMetadata(id, 1, metadata, Some(tag)).accepted val updatedFile = files.fetch(id).accepted @@ -442,7 +370,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val metadata = genCustomMetadata() val request = FileUploadRequest.from(entity(genString())) - files.create(id, Some(diskId), request, None).accepted + files.create(id, Some(defaultStorageId), request, None).accepted val expectedError = IncorrectRev(expected = 1, provided = 2) files.updateMetadata(id, 2, metadata, None).rejected shouldEqual expectedError } @@ -474,126 +402,17 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) } - "updating remote disk file attributes" should { + "tagging a file" should { "succeed" in { - val tempAttr = attributes("myfile.txt") - val attr = tempAttr.copy(location = Uri(s"file:///app/nexustest/nexus/${tempAttr.path}"), origin = Storage) val expected = mkResource( - file2, + file1, projectRef, - remoteRev, - attr, - storageType = RemoteStorageType, - rev = 2, + defaultStorageRef, + attributes(defaultStorageFolder), + rev = 3, tags = Tags(tag -> 1) ) - - (updateAttributes(file2) >> files.fetch(fileIdIri(file2))).accepted shouldEqual expected - } - } - - "reject if digest is already computed" in { - updateAttributes(file2).rejectedWith[DigestAlreadyComputed] - } - - "updating a file linking" should { - - "succeed and tag" in { - val path = Uri.Path("my/file-4.txt") - val tempAttr = attributes("file-4.txt").copy(digest = NotComputedDigest) - val attr = tempAttr.copy(location = Uri(s"file:///app/nexustest/nexus/${tempAttr.path}"), origin = Storage) - val newTag = UserTag.unsafe(genString()) - val expected = - mkResource( - file2, - projectRef, - remoteRev, - attr, - storageType = RemoteStorageType, - rev = 3, - tags = Tags(tag -> 1, newTag -> 3) - ) - val actual = files - .updateLegacyLink( - fileId("file2"), - Some(remoteId), - description("file-4.txt", `text/plain(UTF-8)`), - path, - 2, - Some(newTag) - ) - .accepted - val byTag = files.fetch(FileId("file2", newTag, projectRef)).accepted - - actual shouldEqual expected - byTag shouldEqual expected - } - - "succeed if also updating custom metadata" in { - val id = fileId(genString()) - val path = Uri.Path("my/file-6.txt") - - val (name, desc, keywords) = (genString(), genString(), genKeywords()) - - val originalFileDescription = description("file-6.txt") - - files.createLegacyLink(id, Some(remoteId), originalFileDescription, path, None).accepted - eventually { - files.fetch(id).map { fetched => - fetched.value.attributes.name should contain(name) - fetched.value.attributes.description should contain(desc) - fetched.value.attributes.keywords shouldEqual keywords - } - } - - } - - "reject if file doesn't exists" in { - files - .updateLegacyLink(fileIdIri(nxv + "other"), None, description("myfile.txt"), Uri.Path.Empty, 1, None) - .rejectedWith[FileNotFound] - } - - "reject if digest is not computed" in { - files - .updateLegacyLink(fileId("file2"), None, description("myfile.txt"), Uri.Path.Empty, 3, None) - .rejectedWith[DigestNotComputed] - } - - "reject if storage does not exist" in { - val storage = nxv + "other-storage" - files - .updateLegacyLink(fileId("file1"), Some(storage), description("myfile.txt"), Uri.Path.Empty, 2, None) - .rejected shouldEqual StorageNotFound(storage, projectRef) - } - - "reject if project does not exist" in { - val projectRef = ProjectRef(org, Label.unsafe("other")) - - files - .updateLegacyLink(FileId(file1, projectRef), None, description("myfile.txt"), Uri.Path.Empty, 2, None) - .rejectedWith[ProjectNotFound] - } - - "reject if project is deprecated" in { - files - .updateLegacyLink( - FileId(file1, deprecatedProject.ref), - None, - description("myfile.txt"), - Uri.Path.Empty, - 2, - None - ) - .rejectedWith[ProjectIsDeprecated] - } - } - - "tagging a file" should { - - "succeed" in { - val expected = mkResource(file1, projectRef, diskRev, attributes(), rev = 3, tags = Tags(tag -> 1)) val actual = files.tag(fileIdIri(file1), tag, tagRev = 1, 2).accepted actual shouldEqual expected } @@ -605,17 +424,17 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if project does not exist" in { val projectRef = ProjectRef(org, Label.unsafe("other")) - files.tag(FileId(rdId, projectRef), tag, tagRev = 2, 4).rejectedWith[ProjectNotFound] + files.tag(FileId(nxv + "file", projectRef), tag, tagRev = 2, 4).rejectedWith[ProjectNotFound] } "reject if project is deprecated" in { - files.tag(FileId(rdId, deprecatedProject.ref), tag, tagRev = 2, 4).rejectedWith[ProjectIsDeprecated] + files.tag(FileId(nxv + "file", deprecatedProject.ref), tag, tagRev = 2, 4).rejectedWith[ProjectIsDeprecated] } } "deleting a tag" should { "succeed" in { - val expected = mkResource(file1, projectRef, diskRev, attributes(), rev = 4) + val expected = mkResource(file1, projectRef, defaultStorageRef, attributes(defaultStorageFolder), rev = 4) val actual = files.deleteTag(fileIdIri(file1), tag, 3).accepted actual shouldEqual expected } @@ -633,7 +452,8 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "deprecating a file" should { "succeed" in { - val expected = mkResource(file1, projectRef, diskRev, attributes(), rev = 5, deprecated = true) + val expected = + mkResource(file1, projectRef, defaultStorageRef, attributes(defaultStorageFolder), rev = 5, deprecated = true) val actual = files.deprecate(fileIdIri(file1), 4).accepted actual shouldEqual expected } @@ -659,7 +479,15 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "allow tagging after deprecation" in { val expected = - mkResource(file1, projectRef, diskRev, attributes(), rev = 6, tags = Tags(tag -> 4), deprecated = true) + mkResource( + file1, + projectRef, + defaultStorageRef, + attributes(defaultStorageFolder), + rev = 6, + tags = Tags(tag -> 4), + deprecated = true + ) val actual = files.tag(fileIdIri(file1), tag, tagRev = 4, 5).accepted actual shouldEqual expected } @@ -705,10 +533,19 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) } "fetching a file" should { - val resourceRev1 = mkResource(file1, projectRef, diskRev, attributes("myfile.txt")) - val resourceRev4 = mkResource(file1, projectRef, diskRev, attributes(), rev = 4) + val resourceRev1 = + mkResource(file1, projectRef, defaultStorageRef, attributes(defaultStorageFolder, "myfile.txt")) + val resourceRev4 = mkResource(file1, projectRef, defaultStorageRef, attributes(defaultStorageFolder), rev = 4) val resourceRev6 = - mkResource(file1, projectRef, diskRev, attributes(), rev = 6, tags = Tags(tag -> 4), deprecated = true) + mkResource( + file1, + projectRef, + defaultStorageRef, + attributes(defaultStorageFolder), + rev = 6, + tags = Tags(tag -> 4), + deprecated = true + ) "succeed" in { files.fetch(fileIdIri(file1)).accepted shouldEqual resourceRev6 @@ -742,7 +579,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if project does not exist" in { val projectRef = ProjectRef(org, Label.unsafe("other")) - files.fetch(FileId(rdId, projectRef)).rejectedWith[ProjectNotFound] + files.fetch(FileId(nxv + "file", projectRef)).rejectedWith[ProjectNotFound] } } @@ -793,7 +630,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) "reject if project does not exist" in { val projectRef = ProjectRef(org, Label.unsafe("other")) - files.fetchContent(FileId(rdId, projectRef)).rejectedWith[ProjectNotFound] + files.fetchContent(FileId(nxv + "file", projectRef)).rejectedWith[ProjectNotFound] } } @@ -802,7 +639,7 @@ class FilesSpec(fixture: RemoteStorageClientFixtures) val filename = genString() val id = fileId(filename) val request = FileUploadRequest.from(randomEntity(filename, 1)) - files.create(id, Some(diskId), request, None).accepted + files.create(id, Some(defaultStorageId), request, None).accepted files.fetch(id).accepted assertion(id) } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesStmSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesStmSpec.scala index 1081e3b2f7..bcecbfef16 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesStmSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesStmSpec.scala @@ -4,11 +4,11 @@ import akka.http.scaladsl.model.{ContentTypes, Uri} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.Files.{evaluate, next} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.generators.FileGen import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.{ComputedDigest, NotComputedDigest} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileCommand._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileEvent._ -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.{DigestAlreadyComputed, DigestNotComputed, FileIsDeprecated, FileIsNotDeprecated, FileNotFound, IncorrectRev, ResourceAlreadyExists, RevisionNotFound} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{Digest, FileAttributes} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileRejection.{DigestNotComputed, FileIsDeprecated, FileIsNotDeprecated, FileNotFound, IncorrectRev, ResourceAlreadyExists, RevisionNotFound} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType.{DiskStorage => DiskStorageType, RemoteDiskStorage => RemoteStorageType} @@ -29,13 +29,13 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures private val bob = User("Bob", realm) private val alice = User("Alice", realm) - private val id = nxv + "files" - private val myTag = UserTag.unsafe("myTag") - private val storageRef = ResourceRef.Revision(nxv + "disk?rev=1", nxv + "disk", 1) - private val remoteStorageRef = ResourceRef.Revision(nxv + "remote?rev=1", nxv + "remote", 1) - private val mediaType = Some(ContentTypes.`text/plain(UTF-8)`) - private val dig = ComputedDigest(DigestAlgorithm.default, "something") - private val attributes = FileAttributes( + private val id = nxv + "files" + private val myTag = UserTag.unsafe("myTag") + private val storageRef = ResourceRef.Revision(nxv + "disk", 1) + private val s3StorageRef = ResourceRef.Revision(nxv + "s3", 1) + private val mediaType = Some(ContentTypes.`text/plain(UTF-8)`) + private val dig = ComputedDigest(DigestAlgorithm.default, "something") + private val attributes = FileAttributes( uuid, location = "http://localhost/my/file.txt", path = Uri.Path("my/file.txt"), @@ -62,21 +62,12 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures "create a new event from a UpdateFile command" in { val updateCmd = UpdateFile(id, projectRef, storageRef, DiskStorageType, attributes, 1, alice, None) val current = - FileGen.state(id, projectRef, remoteStorageRef, attributes.copy(bytes = 1), RemoteStorageType) + FileGen.state(id, projectRef, s3StorageRef, attributes.copy(bytes = 1), RemoteStorageType) evaluate(clock)(Some(current), updateCmd).accepted shouldEqual FileUpdated(id, projectRef, storageRef, DiskStorageType, attributes, 2, epoch, alice, None) } - "create a new event from a UpdateFileAttributes command" in { - val updateAttrCmd = UpdateFileAttributes(id, projectRef, mediaType, 10, dig, 1, alice) - val current = - FileGen.state(id, projectRef, remoteStorageRef, attributes.copy(bytes = 1, digest = Digest.NotComputedDigest)) - - evaluate(clock)(Some(current), updateAttrCmd).accepted shouldEqual - FileAttributesUpdated(id, projectRef, remoteStorageRef, DiskStorageType, mediaType, 10, dig, 2, epoch, alice) - } - "create a new event from a TagFile command" in { val current = FileGen.state(id, projectRef, storageRef, attributes, rev = 2) evaluate(clock)(Some(current), TagFile(id, projectRef, targetRev = 2, myTag, 2, alice)).accepted shouldEqual @@ -120,7 +111,6 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures val current = FileGen.state(id, projectRef, storageRef, attributes) val commands = List( UpdateFile(id, projectRef, storageRef, DiskStorageType, attributes, 2, alice, None), - UpdateFileAttributes(id, projectRef, mediaType, 10, dig, 2, alice), TagFile(id, projectRef, targetRev = 1, myTag, 2, alice), DeleteFileTag(id, projectRef, myTag, 2, alice), DeprecateFile(id, projectRef, 2, alice), @@ -143,7 +133,6 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures "reject with FileNotFound" in { val commands = List( UpdateFile(id, projectRef, storageRef, DiskStorageType, attributes, 2, alice, None), - UpdateFileAttributes(id, projectRef, mediaType, 10, dig, 2, alice), TagFile(id, projectRef, targetRev = 1, myTag, 2, alice), DeleteFileTag(id, projectRef, myTag, 2, alice), DeprecateFile(id, projectRef, 2, alice), @@ -158,7 +147,6 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures val current = FileGen.state(id, projectRef, storageRef, attributes, rev = 2, deprecated = true) val commands = List( UpdateFile(id, projectRef, storageRef, DiskStorageType, attributes, 2, alice, None), - UpdateFileAttributes(id, projectRef, mediaType, 10, dig, 2, alice), DeprecateFile(id, projectRef, 2, alice) ) forAll(commands) { cmd => @@ -183,20 +171,6 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures evaluate(clock)(Some(current), cmd).rejected shouldEqual DigestNotComputed(id) } - "reject with DigestNotComputed with an update attributes command" in { - val updateAttrCmd = UpdateFileAttributes(id, projectRef, mediaType, 10, NotComputedDigest, 1, alice) - val current = FileGen.state(id, projectRef, remoteStorageRef, attributes.copy(bytes = 1)) - - evaluate(clock)(Some(current), updateAttrCmd).rejected shouldEqual DigestAlreadyComputed(id) - } - - "reject with DigestAlreadyComputed with an update attributes command" in { - val updateAttrCmd = UpdateFileAttributes(id, projectRef, mediaType, 10, dig, 1, alice) - val current = FileGen.state(id, projectRef, remoteStorageRef, attributes.copy(bytes = 1)) - - evaluate(clock)(Some(current), updateAttrCmd).rejected shouldEqual DigestAlreadyComputed(id) - } - } "producing next state" should { @@ -214,7 +188,7 @@ class FilesStmSpec extends CatsEffectSpec with FileFixtures with StorageFixtures next(None, event) shouldEqual None val att = attributes.copy(bytes = 1) - val current = FileGen.state(id, projectRef, remoteStorageRef, att, createdBy = bob, updatedBy = bob) + val current = FileGen.state(id, projectRef, s3StorageRef, att, createdBy = bob, updatedBy = bob) next(Some(current), event).value shouldEqual current.copy(rev = 2, storage = storageRef, attributes = attributes, updatedAt = time2, updatedBy = alice) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/RemoteStorageFilesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/RemoteStorageFilesSpec.scala deleted file mode 100644 index 06bf44fab3..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/RemoteStorageFilesSpec.scala +++ /dev/null @@ -1,3 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.files - -class RemoteStorageFilesSpec {} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FileOperationsMock.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FileOperationsMock.scala index db5683bf72..daaac66dfd 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FileOperationsMock.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/mocks/FileOperationsMock.scala @@ -4,31 +4,24 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.Uri import akka.http.scaladsl.model.Uri.Path import cats.effect.IO +import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource import ch.epfl.bluebrain.nexus.delta.kernel.utils.UUIDF import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileStorageMetadata import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.{DiskUploadingFile, S3UploadingFile} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.disk.DiskFileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.RemoteDiskFileOperations -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient.RemoteDiskStorageClientDisabled import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.client.S3StorageClient import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.{S3FileOperations, S3LocationGenerator} -import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef object FileOperationsMock { - def forRemoteDisk(client: RemoteDiskStorageClient)(implicit uuidf: UUIDF): FileOperations = - FileOperations.apply(diskUnimplemented, RemoteDiskFileOperations.mk(client), s3Unimplemented) - - def forDiskAndRemoteDisk(client: RemoteDiskStorageClient)(implicit as: ActorSystem, uuidf: UUIDF): FileOperations = - FileOperations.apply(DiskFileOperations.mk, RemoteDiskFileOperations.mk(client), s3Unimplemented) + def forDisk(implicit as: ActorSystem, uuidf: UUIDF): FileOperations = + FileOperations.apply(DiskFileOperations.mk, s3Unimplemented) def disabled(implicit as: ActorSystem, uuidf: UUIDF): FileOperations = FileOperations.apply( DiskFileOperations.mk, - RemoteDiskFileOperations.mk(RemoteDiskStorageClientDisabled), S3FileOperations.mk(S3StorageClient.disabled, new S3LocationGenerator(Path.Empty)) ) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala index 8e483df7c0..0695d39e57 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala @@ -192,7 +192,7 @@ class FilesRoutesSpec "create a file" in { postFile("/v1/files/org/proj", entity()) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.Created - val attr = attributes() + val attr = attributes(path) response.asJson shouldEqual fileMetadata(projectRef, generatedId, attr, diskIdRev) } } @@ -239,7 +239,7 @@ class FilesRoutesSpec withUUIDF(uuid2) { postFile("/v1/files/org/proj?tag=mytag", entity()) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.Created - val attr = attributes(id = uuid2) + val attr = attributes(path, id = uuid2) val expected = fileMetadata(projectRef, generatedId2, attr, diskIdRev) val userTag = UserTag.unsafe(tag) val fileByTag = files.fetch(FileId(generatedId2, userTag, projectRef)).accepted @@ -249,30 +249,6 @@ class FilesRoutesSpec } } - "fail to create a file link using a storage that does not allow it" in { - val payload = json"""{"filename": "my.txt", "path": "my/file.txt", "mediaType": "text/plain"}""" - putJson("/v1/files/org/proj/file1", payload) ~> asWriter ~> routes ~> check { - status shouldEqual StatusCodes.BadRequest - response.asJson shouldEqual - jsonContentOf("files/errors/unsupported-operation.json", "id" -> file1, "storageId" -> dId) - } - } - - "fail to create a file link if no filename is specified either explicitly or in the path" in { - val payload = json"""{"path": "my/", "mediaType": "text/plain"}""" - postJson("/v1/files/org/proj", payload) ~> asWriter ~> routes ~> check { - status shouldEqual StatusCodes.BadRequest - response.asJson shouldEqual - json""" - { - "@context" : "https://bluebrain.github.io/nexus/contexts/error.json", - "@type" : "InvalidFilePath", - "reason" : "Linking a file cannot be performed without a 'filename' or a 'path' that does not end with a filename." - } - """ - } - } - "fail to create a file without s3/write permission" in { putFile("/v1/files/org/proj/file1?storage=s3-storage", entity()) ~> asWriter ~> routes ~> check { response.shouldBeForbidden @@ -283,7 +259,7 @@ class FilesRoutesSpec val id = genString() putFile(s"/v1/files/org/proj/$id?storage=s3-storage", entity(id)) ~> asS3Writer ~> routes ~> check { status shouldEqual StatusCodes.Created - val attr = attributes(id) + val attr = attributes(path, id) response.asJson shouldEqual fileMetadata(projectRef, nxv + id, attr, s3IdRev, createdBy = s3writer, updatedBy = s3writer) } @@ -296,7 +272,7 @@ class FilesRoutesSpec entity("fileTagged.txt") ) ~> asS3Writer ~> routes ~> check { status shouldEqual StatusCodes.Created - val attr = attributes("fileTagged.txt", id = uuid2) + val attr = attributes(path, "fileTagged.txt", id = uuid2) val expected = fileMetadata(projectRef, fileTagged, attr, s3IdRev, createdBy = s3writer, updatedBy = s3writer) val userTag = UserTag.unsafe(tag) val fileByTag = files.fetch(FileId(generatedId2, userTag, projectRef)).accepted @@ -351,7 +327,7 @@ class FilesRoutesSpec val filename = s"file-idx-$idx.txt" putFile(s"$endpoint?rev=${idx + 1}", entity(filename)) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.OK - val attr = attributes(filename) + val attr = attributes(path, filename) response.asJson shouldEqual fileMetadata(projectRef, nxv + id, attr, diskIdRev, rev = idx + 2) } @@ -490,17 +466,6 @@ class FilesRoutesSpec } } - "fail to update a file link using a storage that does not allow it" in { - givenAFile { id => - val payload = json"""{"filename": "my.txt", "path": "my/file.txt", "mediaType": "text/plain"}""" - putJson(s"/v1/files/org/proj/$id?rev=1", payload) ~> asWriter ~> routes ~> check { - status shouldEqual StatusCodes.BadRequest - response.asJson shouldEqual - jsonContentOf("files/errors/unsupported-operation.json", "id" -> (nxv + id), "storageId" -> dId) - } - } - } - "reject the update of a non-existent file" in { val nonExistentFile = genString() putFile(s"/v1/files/org/proj/$nonExistentFile?rev=1", entity("other.txt")) ~> asWriter ~> routes ~> check { @@ -542,7 +507,7 @@ class FilesRoutesSpec givenAFile { id => Delete(s"/v1/files/org/proj/$id?rev=1") ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.OK - val attr = attributes(id) + val attr = attributes(path, id) response.asJson shouldEqual fileMetadata(projectRef, nxv + id, attr, diskIdRev, rev = 2, deprecated = true) } } @@ -579,7 +544,7 @@ class FilesRoutesSpec Put(s"/v1/files/org/proj/$id/undeprecate?rev=2") ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.OK response.asJson shouldEqual - fileMetadata(projectRef, nxv + id, attributes(id), diskIdRev, rev = 3, deprecated = false) + fileMetadata(projectRef, nxv + id, attributes(path, id), diskIdRev, rev = 3, deprecated = false) Get(s"/v1/files/org/proj/$id") ~> Accept(`*/*`) ~> asReader ~> routes ~> check { status shouldEqual StatusCodes.OK @@ -611,7 +576,7 @@ class FilesRoutesSpec val payload = json"""{"tag": "mytag", "rev": 1}""" postJson(s"/v1/files/org/proj/$id/tags?rev=1", payload) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.Created - val attr = attributes(id) + val attr = attributes(path, id) response.asJson shouldEqual fileMetadata(projectRef, nxv + id, attr, diskIdRev, rev = 2) } } @@ -704,7 +669,7 @@ class FilesRoutesSpec givenAFile { id => Get(s"/v1/files/org/proj/$id") ~> Accept(`application/ld+json`) ~> asReader ~> routes ~> check { status shouldEqual StatusCodes.OK - val attr = attributes(id) + val attr = attributes(path, id) response.asJson shouldEqual fileMetadata(projectRef, nxv + id, attr, diskIdRev) response.expectConditionalCacheHeaders response.headers should contain(varyHeader) @@ -714,7 +679,7 @@ class FilesRoutesSpec "fetch a file metadata by rev and tag" in { givenATaggedFile(tag) { id => - val attr = attributes(id) + val attr = attributes(path, id) val endpoints = List( s"/v1/files/org/proj/$id", s"/v1/resources/org/proj/_/$id", @@ -772,7 +737,7 @@ class FilesRoutesSpec def deleteTag(id: String, tag: String, rev: Int) = Delete(s"/v1/files/org/proj/$id/tags/$tag?rev=$rev") ~> asWriter ~> routes ~> check { - val attr = attributes(s"$id") + val attr = attributes(path, s"$id") status shouldEqual StatusCodes.OK response.asJson shouldEqual fileMetadata(projectRef, nxv + id, attr, diskIdRev, rev = rev + 1) } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageClientFixtures.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageClientFixtures.scala deleted file mode 100644 index 8d46940182..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageClientFixtures.scala +++ /dev/null @@ -1,75 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage - -import akka.actor.ActorSystem -import ch.epfl.bluebrain.nexus.delta.kernel.http.{HttpClient, HttpClientConfig} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures.{BucketName, RemoteStorageHostConfig} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteDiskStorageClient.RemoteDiskStorageClientImpl -import ch.epfl.bluebrain.nexus.delta.sdk.ConfigFixtures -import ch.epfl.bluebrain.nexus.delta.sdk.auth.{AuthTokenProvider, Credentials} -import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri -import ch.epfl.bluebrain.nexus.testkit.scalatest.BaseSpec -import org.scalatest.BeforeAndAfterAll - -import java.nio.file.attribute.PosixFilePermissions -import java.nio.file.{Files, Path} -import scala.concurrent.duration.DurationInt -import scala.jdk.DurationConverters.ScalaDurationOps - -trait RemoteStorageClientFixtures extends BeforeAndAfterAll with ConfigFixtures { this: BaseSpec => - - private val rwx = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rwxrwxrwx")) - private val tmpFolder: Path = Files.createTempDirectory("root", rwx) - - val storageVersion: String = "1.10.0" - - protected val container: RemoteStorageContainer = - new RemoteStorageContainer(storageVersion, tmpFolder) - .withReuse(false) - .withStartupTimeout(60.seconds.toJava) - - def hostConfig: RemoteStorageHostConfig = - RemoteStorageHostConfig(container.getHost, container.getMappedPort(8080)) - - def init(implicit as: ActorSystem): RemoteDiskStorageClient = { - implicit val httpConfig: HttpClientConfig = httpClientConfig - val httpClient: HttpClient = HttpClient() - val authTokenProvider: AuthTokenProvider = AuthTokenProvider.anonymousForTest - val baseUri = BaseUri(hostConfig.endpoint).rightValue - new RemoteDiskStorageClientImpl(httpClient, authTokenProvider, baseUri, Credentials.Anonymous) - } - - override def beforeAll(): Unit = { - super.beforeAll() - - val bucket = Files.createDirectory(tmpFolder.resolve(BucketName), rwx) - val bucketNexus = Files.createDirectory(bucket.resolve("nexus"), rwx) - val my = Files.createDirectory(bucket.resolve("my"), rwx) - - (1 to 6).map(idx => s"file-$idx.txt").foreach { fileName => - val path = Files.createFile(my.resolve(fileName), rwx) - path.toFile.setWritable(true, false) - Files.writeString(path, "file content") - } - List(bucket, bucketNexus, my).foreach { path => - path.toFile.setWritable(true, false) - } - - container.start() - } - - override def afterAll(): Unit = { - container.stop() - super.afterAll() - } -} - -object RemoteStorageClientFixtures { - val BucketName = "nexustest" - val Content = "file content" - val Digest = "e0ac3601005dfa1864f5392aabaf7d898b1b5bab854f1acb4491bcd806b76b0c" - - final case class RemoteStorageHostConfig(host: String, port: Int) { - def endpoint: String = s"http://$host:$port/v1" - } -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageContainer.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageContainer.scala deleted file mode 100644 index 9523479e3d..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/remotestorage/RemoteStorageContainer.scala +++ /dev/null @@ -1,21 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage - -import org.testcontainers.containers.GenericContainer -import org.testcontainers.containers.wait.strategy.Wait -import org.testcontainers.utility.{DockerImageName, MountableFile} - -import java.nio.file.Path - -class RemoteStorageContainer(storageVersion: String, rootVolume: Path) - extends GenericContainer[RemoteStorageContainer]( - DockerImageName.parse(s"bluebrain/nexus-storage:$storageVersion") - ) { - - addEnv("JAVA_OPTS", "-Xmx256m -Dconfig.override_with_env_vars=true") - addEnv("CONFIG_FORCE_app_subject_anonymous", "true") - addEnv("CONFIG_FORCE_app_instance_interface", "0.0.0.0") - addEnv("CONFIG_FORCE_app_storage_root__volume", "/app") - withCopyToContainer(MountableFile.forHostPath(rootVolume.toString, Integer.getInteger("777")), "/app") - addExposedPort(8080) - setWaitStrategy(Wait.forLogMessage(".*Bound\\sto.*", 1)) -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTaskSuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTaskSuite.scala index e352e1d84a..fd66702406 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTaskSuite.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageDeletionTaskSuite.scala @@ -28,7 +28,7 @@ class StorageDeletionTaskSuite extends NexusSuite with StorageFixtures with Acto val content = "file content" val entity = HttpEntity(content) val uploading = DiskUploadingFile(project, diskVal.volume, DigestAlgorithm.default, "trace", entity) - val storageStream: Stream[IO, StorageValue] = Stream(diskVal, s3Val, remoteVal) + val storageStream: Stream[IO, StorageValue] = Stream(diskVal, s3Val) val storageDir = diskVal.rootDirectory(project) def fileExists(metadata: FileStorageMetadata) = diskOps.fetch(metadata.location.path).redeem(_ => false, _ => true) @@ -38,7 +38,7 @@ class StorageDeletionTaskSuite extends NexusSuite with StorageFixtures with Acto _ <- fileExists(metadata).assertEquals(true, s"'${metadata.location}' should have been created.") deletionTask = new StorageDeletionTask(_ => storageStream) result <- deletionTask(project) - _ = assertEquals(result.log.size, 3, s"The three storages should have been processed:\n$result") + _ = assertEquals(result.log.size, 2, s"The two storages should have been processed:\n$result") _ = fileExists(metadata).assertEquals(false, s"'${metadata.location}' should have been deleted.") _ = assert(!storageDir.exists, s"The directory '$storageDir' should have been deleted.") diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala index 8798863b42..b910864cfc 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala @@ -1,20 +1,16 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages import ch.epfl.bluebrain.nexus.delta.kernel.Secret -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.{DiskStorageConfig, RemoteDiskStorageConfig, S3StorageConfig, StorageTypeConfig} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageFields.{DiskStorageFields, RemoteDiskStorageFields, S3StorageFields} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.{DiskStorageConfig, S3StorageConfig, StorageTypeConfig} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageFields.{DiskStorageFields, S3StorageFields} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePath, DigestAlgorithm} import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv -import ch.epfl.bluebrain.nexus.delta.sdk.auth.Credentials.Anonymous -import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import ch.epfl.bluebrain.nexus.testkit.CirceLiteral import ch.epfl.bluebrain.nexus.testkit.scalatest.ClasspathResources import java.nio.file.{Files, Paths} -import scala.concurrent.duration._ @SuppressWarnings(Array("OptionGet")) trait StorageFixtures extends CirceLiteral { @@ -23,7 +19,6 @@ trait StorageFixtures extends CirceLiteral { val dId = nxv + "disk-storage" val s3Id = nxv + "s3-storage" - val rdId = nxv + "remote-disk-storage" private val diskVolume = AbsolutePath(Files.createTempDirectory("disk")).toOption.get val tmpVolume: AbsolutePath = AbsolutePath(Paths.get("/tmp")).toOption.get @@ -32,8 +27,7 @@ trait StorageFixtures extends CirceLiteral { implicit val config: StorageTypeConfig = StorageTypeConfig( disk = DiskStorageConfig(diskVolume, Set(diskVolume,tmpVolume), DigestAlgorithm.default, permissions.read, permissions.write, showLocation = false, 50), amazon = Some(S3StorageConfig("localhost", useDefaultCredentialProvider = false, Secret("my_key"), Secret("my_secret_key"), - permissions.read, permissions.write, showLocation = false, 60, defaultBucket = "potato", prefix = None)), - remoteDisk = Some(RemoteDiskStorageConfig(DigestAlgorithm.default, BaseUri("http://localhost", Label.unsafe("v1")), Anonymous, permissions.read, permissions.write, showLocation = false, 70, 50.millis)), + permissions.read, permissions.write, showLocation = false, 60, defaultBucket = "potato", prefix = None)) ) implicit val showLocation: StoragesConfig.ShowFileLocation = config.showFileLocation val diskFields = DiskStorageFields(Some("diskName"), Some("diskDescription"), default = true, Some(tmpVolume), Some(Permission.unsafe("disk/read")), Some(Permission.unsafe("disk/write")), Some(50)) @@ -42,24 +36,18 @@ trait StorageFixtures extends CirceLiteral { val diskValUpdate = diskFieldsUpdate.toValue(config).get val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, Some("mybucket"), Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51)) val s3Val = s3Fields.toValue(config).get - val remoteFields = RemoteDiskStorageFields(Some("remoteName"), Some("remoteDescription"), default = true, Label.unsafe("myfolder"), Some(Permission.unsafe("remote/read")), Some(Permission.unsafe("remote/write")), Some(52)) - val remoteVal = remoteFields.toValue(config).get // format: on val allowedPerms = Seq( diskFields.readPermission.get, diskFields.writePermission.get, s3Fields.readPermission.get, - s3Fields.writePermission.get, - remoteFields.readPermission.get, - remoteFields.writePermission.get + s3Fields.writePermission.get ) - val diskJson = jsonContentOf("storages/disk-storage.json") - val s3Json = jsonContentOf("storages/s3-storage.json") - val remoteJson = jsonContentOf("storages/remote-storage.json") + val diskJson = jsonContentOf("storages/disk-storage.json") + val s3Json = jsonContentOf("storages/s3-storage.json") - val diskFieldsJson = diskJson.removeKeys("@id", "@context", "_algorithm") - val s3FieldsJson = s3Json.removeKeys("@id", "@context", "_algorithm") - val remoteFieldsJson = remoteJson.removeKeys("@id", "@context", "_algorithm") + val diskFieldsJson = diskJson.removeKeys("@id", "@context", "_algorithm") + val s3FieldsJson = s3Json.removeKeys("@id", "@context", "_algorithm") } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala index 3933158481..147099870e 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala @@ -81,8 +81,9 @@ private class StoragesSpec } "succeed with the passed id" in { - storages.create(rdId, projectRef, remoteFieldsJson).accepted shouldEqual - resourceFor(rdId, projectRef, remoteVal, remoteFieldsJson, createdBy = bob, updatedBy = bob) + val otherS3Id = nxv + "other-s3-storage" + storages.create(otherS3Id, projectRef, s3FieldsJson).accepted shouldEqual + resourceFor(otherS3Id, projectRef, s3Val, s3FieldsJson, createdBy = bob, updatedBy = bob) } "reject with different ids on the payload and passed" in { @@ -217,37 +218,37 @@ private class StoragesSpec } "fetching a storage" should { - val resourceRev1 = resourceFor(rdId, projectRef, remoteVal, remoteFieldsJson, createdBy = bob, updatedBy = bob) + val resourceRev1 = resourceFor(s3Id, projectRef, s3Val, s3FieldsJson, createdBy = bob, updatedBy = bob) val resourceRev2 = resourceFor( - rdId, + s3Id, projectRef, - remoteVal, - remoteFieldsJson, + s3Val, + s3FieldsJson, rev = 2, createdBy = bob, updatedBy = bob ) "succeed" in { - storages.fetch(rdId, projectRef).accepted shouldEqual resourceRev2 + storages.fetch(s3Id, projectRef).accepted shouldEqual resourceRev2 } "succeed by tag" in { - storages.fetch(IdSegmentRef(rdId, tag), projectRef).accepted shouldEqual resourceRev1 + storages.fetch(IdSegmentRef(s3Id, tag), projectRef).accepted shouldEqual resourceRev1 } "succeed by rev" in { - storages.fetch(IdSegmentRef(rdId, 2), projectRef).accepted shouldEqual resourceRev2 - storages.fetch(IdSegmentRef(rdId, 1), projectRef).accepted shouldEqual resourceRev1 + storages.fetch(IdSegmentRef(s3Id, 2), projectRef).accepted shouldEqual resourceRev2 + storages.fetch(IdSegmentRef(s3Id, 1), projectRef).accepted shouldEqual resourceRev1 } "reject fetch by tag" in { - val id = ResourceRef.Tag(rdId, UserTag.unsafe("other")) + val id = ResourceRef.Tag(s3Id, UserTag.unsafe("other")) storages.fetch(id, projectRef).rejected shouldEqual FetchByTagNotSupported(id) } "reject if revision does not exist" in { - storages.fetch(IdSegmentRef(rdId, 5), projectRef).rejected shouldEqual + storages.fetch(IdSegmentRef(s3Id, 5), projectRef).rejected shouldEqual RevisionNotFound(provided = 5, current = 2) } @@ -260,7 +261,7 @@ private class StoragesSpec "reject if project does not exist" in { val projectRef = ProjectRef(org, Label.unsafe("other")) - storages.fetch(rdId, projectRef).rejectedWith[ProjectNotFound] + storages.fetch(s3Id, projectRef).rejectedWith[ProjectNotFound] } } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala index 7a3cc589f6..b83e8b9b1c 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala @@ -9,7 +9,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageComma import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageEvent._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{DifferentStorageType, IncorrectRev, InvalidMaxFileSize, InvalidStorageType, PermissionsAreNotDefined, ResourceAlreadyExists, StorageIsDeprecated, StorageIsNotDeprecated, StorageNotAccessible, StorageNotFound} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType.{DiskStorage => DiskStorageType} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePath, DigestAlgorithm} import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.User @@ -34,14 +34,10 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { private val accessibleDisk = Set(diskFields.volume.value, tmp2) private val access: StorageAccess = { - case disk: DiskStorageValue => + case disk: DiskStorageValue => IO.whenA(!accessibleDisk.contains(disk.volume))(IO.raiseError(StorageNotAccessible("wrong volume"))) - case s3: S3StorageValue => + case s3: S3StorageValue => IO.whenA(!s3Fields.bucket.contains(s3.bucket))(IO.raiseError(StorageNotAccessible("wrong bucket"))) - case remote: RemoteDiskStorageValue => - IO.whenA(remote.folder != remoteFields.folder)( - IO.raiseError(StorageNotAccessible("Folder does not exist")) - ) } private val perms = IO.pure(allowedPerms.toSet) @@ -53,24 +49,21 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { "evaluating an incoming command" should { "create a new event from a CreateStorage command" in { - val disk = CreateStorage(dId, project, diskFields.copy(maxFileSize = Some(1)), diskFieldsJson, bob) - val s3 = CreateStorage(s3Id, project, s3Fields.copy(maxFileSize = Some(2)), s3FieldsJson, bob) - val remote = CreateStorage(rdId, project, remoteFields.copy(maxFileSize = Some(3)), remoteFieldsJson, bob) + val disk = CreateStorage(dId, project, diskFields.copy(maxFileSize = Some(1)), diskFieldsJson, bob) + val s3 = CreateStorage(s3Id, project, s3Fields.copy(maxFileSize = Some(2)), s3FieldsJson, bob) - forAll(List(disk, s3, remote)) { cmd => + forAll(List(disk, s3)) { cmd => eval(None, cmd).accepted shouldEqual StorageCreated(cmd.id, cmd.project, cmd.fields.toValue(config).value, cmd.source, 1, epoch, bob) } } "create a new event from a UpdateStorage command" in { - val disk = UpdateStorage(dId, project, diskFields, diskFieldsJson, 1, alice) - val diskCurrent = storageState(dId, project, diskVal.copy(maxFileSize = 1)) - val s3 = UpdateStorage(s3Id, project, s3Fields, s3FieldsJson, 1, alice) - val s3Current = storageState(s3Id, project, s3Val.copy(maxFileSize = 2)) - val remote = UpdateStorage(rdId, project, remoteFields, remoteFieldsJson, 1, alice) - val remoteCurrent = storageState(rdId, project, remoteVal.copy(maxFileSize = 3)) - val list = List((diskCurrent, disk), (s3Current, s3), (remoteCurrent, remote)) + val disk = UpdateStorage(dId, project, diskFields, diskFieldsJson, 1, alice) + val diskCurrent = storageState(dId, project, diskVal.copy(maxFileSize = 1)) + val s3 = UpdateStorage(s3Id, project, s3Fields, s3FieldsJson, 1, alice) + val s3Current = storageState(s3Id, project, s3Val.copy(maxFileSize = 2)) + val list = List((diskCurrent, disk), (s3Current, s3)) forAll(list) { case (state, cmd) => eval(Some(state), cmd).accepted shouldEqual @@ -103,21 +96,18 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { } "reject with StorageNotAccessible" in { - val notAllowedDiskVal = diskFields.copy(volume = Some(tmp2)) - val inaccessibleDiskVal = + val notAllowedDiskVal = diskFields.copy(volume = Some(tmp2)) + val inaccessibleDiskVal = diskFields.copy(volume = Some(AbsolutePath(Files.createTempDirectory("other")).rightValue)) - val inaccessibleS3Val = s3Fields.copy(bucket = Some("other")) - val inaccessibleRemoteVal = remoteFields.copy(folder = Label.unsafe("xxx")) - val diskCurrent = storageState(dId, project, diskVal) - val s3Current = storageState(s3Id, project, s3Val) - val remoteCurrent = storageState(rdId, project, remoteVal) + val inaccessibleS3Val = s3Fields.copy(bucket = Some("other")) + val diskCurrent = storageState(dId, project, diskVal) + val s3Current = storageState(s3Id, project, s3Val) forAll( List( dId -> notAllowedDiskVal, dId -> inaccessibleDiskVal, - s3Id -> inaccessibleS3Val, - rdId -> inaccessibleRemoteVal + s3Id -> inaccessibleS3Val ) ) { case (id, value) => val createCmd = CreateStorage(id, project, value, Json.obj(), bob) @@ -126,9 +116,8 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { forAll( List( - diskCurrent -> inaccessibleDiskVal, - s3Current -> inaccessibleS3Val, - remoteCurrent -> inaccessibleRemoteVal + diskCurrent -> inaccessibleDiskVal, + s3Current -> inaccessibleS3Val ) ) { case (state, value) => val updateCmd = UpdateStorage(state.id, project, value, Json.obj(), 1, alice) @@ -137,18 +126,15 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { } "reject with InvalidMaxFileSize" in { - val exceededSizeDiskVal = diskFields.copy(maxFileSize = Some(100)) - val exceededSizeS3Val = s3Fields.copy(maxFileSize = Some(100)) - val exceededSizeRemoteVal = remoteFields.copy(maxFileSize = Some(100)) - val diskCurrent = storageState(dId, project, diskVal) - val s3Current = storageState(s3Id, project, s3Val) - val remoteCurrent = storageState(rdId, project, remoteVal) + val exceededSizeDiskVal = diskFields.copy(maxFileSize = Some(100)) + val exceededSizeS3Val = s3Fields.copy(maxFileSize = Some(100)) + val diskCurrent = storageState(dId, project, diskVal) + val s3Current = storageState(s3Id, project, s3Val) forAll( List( (dId, exceededSizeDiskVal, config.disk.defaultMaxFileSize), - (s3Id, exceededSizeS3Val, config.amazon.value.defaultMaxFileSize), - (rdId, exceededSizeRemoteVal, config.remoteDisk.value.defaultMaxFileSize) + (s3Id, exceededSizeS3Val, config.amazon.value.defaultMaxFileSize) ) ) { case (id, value, maxFileSize) => val createCmd = CreateStorage(id, project, value, Json.obj(), bob) @@ -158,8 +144,7 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { forAll( List( (diskCurrent, exceededSizeDiskVal, config.disk.defaultMaxFileSize), - (s3Current, exceededSizeS3Val, config.amazon.get.defaultMaxFileSize), - (remoteCurrent, exceededSizeRemoteVal, config.remoteDisk.value.defaultMaxFileSize) + (s3Current, exceededSizeS3Val, config.amazon.get.defaultMaxFileSize) ) ) { case (state, value, maxFileSize) => val updateCmd = UpdateStorage(state.id, project, value, Json.obj(), 1, alice) @@ -205,16 +190,11 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { } "reject with DifferentStorageType" in { - val diskCurrent = storageState(dId, project, diskVal) - val s3Current = storageState(s3Id, project, s3Val) - val remoteCurrent = storageState(rdId, project, remoteVal) - val list = List( - diskCurrent -> UpdateStorage(dId, project, s3Fields, Json.obj(), 1, alice), - diskCurrent -> UpdateStorage(dId, project, remoteFields, Json.obj(), 1, alice), - s3Current -> UpdateStorage(s3Id, project, diskFields, Json.obj(), 1, alice), - s3Current -> UpdateStorage(s3Id, project, remoteFields, Json.obj(), 1, alice), - remoteCurrent -> UpdateStorage(rdId, project, diskFields, Json.obj(), 1, alice), - remoteCurrent -> UpdateStorage(rdId, project, s3Fields, Json.obj(), 1, alice) + val diskCurrent = storageState(dId, project, diskVal) + val s3Current = storageState(s3Id, project, s3Val) + val list = List( + diskCurrent -> UpdateStorage(dId, project, s3Fields, Json.obj(), 1, alice), + s3Current -> UpdateStorage(s3Id, project, diskFields, Json.obj(), 1, alice) ) forAll(list) { case (state, cmd) => eval(Some(state), cmd).rejectedWith[DifferentStorageType] @@ -238,19 +218,15 @@ class StoragesStmSpec extends CatsEffectSpec with StorageFixtures { "reject with InvalidStorageType" in { val s3Current = storageState(s3Id, project, s3Val) - val remoteCurrent = storageState(rdId, project, remoteVal) val list = List( - None -> CreateStorage(s3Id, project, s3Fields, Json.obj(), bob), - None -> CreateStorage(s3Id, project, remoteFields, Json.obj(), bob), - Some(s3Current) -> UpdateStorage(s3Id, project, s3Fields, Json.obj(), 1, alice), - Some(remoteCurrent) -> UpdateStorage(rdId, project, remoteFields, Json.obj(), 1, alice) + None -> CreateStorage(s3Id, project, s3Fields, Json.obj(), bob), + Some(s3Current) -> UpdateStorage(s3Id, project, s3Fields, Json.obj(), 1, alice) ) val diskVolume = AbsolutePath(Files.createTempDirectory("disk")).rightValue // format: off val config: StorageTypeConfig = StorageTypeConfig( disk = DiskStorageConfig(diskVolume, Set(diskVolume), DigestAlgorithm.default, permissions.read, permissions.write, showLocation = false, 150), - amazon = None, - remoteDisk = None + amazon = None ) // format: on val eval = evaluate(access, perms, config, clock)(_, _) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteDiskStorageAccessSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteDiskStorageAccessSpec.scala deleted file mode 100644 index b3fc265d41..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/access/RemoteDiskStorageAccessSpec.scala +++ /dev/null @@ -1,33 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access - -import akka.actor.ActorSystem -import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec -import org.scalatest.DoNotDiscover -import org.scalatest.concurrent.Eventually - -@DoNotDiscover -class RemoteDiskStorageAccessSpec(fixture: RemoteStorageClientFixtures) - extends TestKit(ActorSystem("RemoteDiskStorageAccessSpec")) - with CatsEffectSpec - with Eventually - with RemoteStorageClientFixtures { - - private lazy val remoteDiskStorageClient = fixture.init - private lazy val remoteAccess = RemoteStorageAccess(remoteDiskStorageClient) - - "A RemoteDiskStorage access" should { - - "succeed verifying the folder" in eventually { - remoteAccess.checkFolderExists(Label.unsafe(RemoteStorageClientFixtures.BucketName)).accepted - } - - "fail when folder does not exist" in { - remoteAccess.checkFolderExists(Label.unsafe(genString())).rejectedWith[StorageNotAccessible] - } - } - -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala index a242c8cdf0..5ec40a7b24 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala @@ -9,7 +9,6 @@ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.Configuration import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.JsonLdSourceProcessor.JsonLdSourceDecoder import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.{ApiMappings, ProjectContext} import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec class StorageFieldsSpec extends CatsEffectSpec with RemoteContextResolutionFixture with StorageFixtures { @@ -65,29 +64,6 @@ class StorageFieldsSpec extends CatsEffectSpec with RemoteContextResolutionFixtu S3StorageFields(None, None, default = true, Some("mybucket"), None, None, None) } } - - "dealing with remote storages" should { - val json = remoteFieldsJson.addContext(contexts.storages) - - "be created from Json-LD" in { - sourceDecoder(pc, json).accepted._2 shouldEqual remoteFields - } - - "be created from Json-LD without optional values" in { - val jsonNoDefaults = - json.removeKeys( - "name", - "description", - "readPermission", - "writePermission", - "maxFileSize", - "endpoint", - "credentials" - ) - sourceDecoder(pc, jsonNoDefaults).accepted._2 shouldEqual - RemoteDiskStorageFields(None, None, default = true, Label.unsafe("myfolder"), None, None, None) - } - } } } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala index 7608fc68be..c9d0f95f3b 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala @@ -22,15 +22,12 @@ class StorageSerializationSuite extends SerializationSuite with StorageFixtures private val tag = UserTag.unsafe("mytag") private val projectRef = ProjectRef.unsafe("myorg", "myproj") - private val s3ValUpdate = s3Val.copy(bucket = "mybucket2", maxFileSize = 41) - private val remoteValUpdate = remoteVal.copy(folder = Label.unsafe("myfolder2"), maxFileSize = 42) + private val s3ValUpdate = s3Val.copy(bucket = "mybucket2", maxFileSize = 41) private val diskCreated = StorageCreated(dId, projectRef, diskVal, diskFieldsJson, 1, instant, subject) private val s3Created = StorageCreated(s3Id, projectRef, s3Val, s3FieldsJson, 1, instant, subject) - private val remoteCreated = StorageCreated(rdId, projectRef, remoteVal, remoteFieldsJson, 1, instant, subject) private val diskUpdated = StorageUpdated(dId, projectRef, diskValUpdate, diskFieldsJson, 2, instant, subject) private val s3Updated = StorageUpdated(s3Id, projectRef, s3ValUpdate, s3FieldsJson, 2, instant, subject) - private val remoteUpdated = StorageUpdated(rdId, projectRef, remoteValUpdate, remoteFieldsJson, 2, instant, subject) private val diskTagged = StorageTagAdded(dId, projectRef, DiskStorageType, targetRev = 1, tag, 3, instant, subject) private val diskDeprecated = StorageDeprecated(dId, projectRef, DiskStorageType, 4, instant, subject) private val diskUndeprecated = StorageUndeprecated(dId, projectRef, DiskStorageType, 5, instant, subject) @@ -38,10 +35,8 @@ class StorageSerializationSuite extends SerializationSuite with StorageFixtures private val storagesMapping = List( (diskCreated, loadEvents("storages", "disk-storage-created.json"), Created), (s3Created, loadEvents("storages", "s3-storage-created.json"), Created), - (remoteCreated, loadEvents("storages", "remote-storage-created.json"), Created), (diskUpdated, loadEvents("storages", "disk-storage-updated.json"), Updated), (s3Updated, loadEvents("storages", "s3-storage-updated.json"), Updated), - (remoteUpdated, loadEvents("storages", "remote-storage-updated.json"), Updated), (diskTagged, loadEvents("storages", "storage-tag-added.json"), Tagged), (diskDeprecated, loadEvents("storages", "storage-deprecated.json"), Deprecated), (diskUndeprecated, loadEvents("storages", "storage-undeprecated.json"), Undeprecated) @@ -84,9 +79,8 @@ class StorageSerializationSuite extends SerializationSuite with StorageFixtures } private val statesMapping = VectorMap( - (dId, diskVal, diskFieldsJson) -> jsonContentOf("storages/storage-disk-state.json"), - (s3Id, s3Val, s3FieldsJson) -> jsonContentOf("storages/storage-s3-state.json"), - (rdId, remoteVal, remoteFieldsJson) -> jsonContentOf("storages/storage-remote-state.json") + (dId, diskVal, diskFieldsJson) -> jsonContentOf("storages/storage-disk-state.json"), + (s3Id, s3Val, s3FieldsJson) -> jsonContentOf("storages/storage-s3-state.json") ).map { case ((id, value, source), v) => StorageState( id, diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala index 4aae2dc3d2..1549c9bad1 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala @@ -11,23 +11,18 @@ import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec class StorageSpec extends CatsEffectSpec with RemoteContextResolutionFixture with StorageFixtures { "A Storage" should { - val project = ProjectRef(Label.unsafe("org"), Label.unsafe("project")) - val diskStorage = + val project = ProjectRef(Label.unsafe("org"), Label.unsafe("project")) + val diskStorage = DiskStorage(nxv + "disk", project, diskVal, json"""{"disk": "value"}""") - val s3Storage = S3Storage(nxv + "s3", project, s3Val, json"""{"s3": "value"}""") - val remoteStorage = - RemoteDiskStorage(nxv + "remote", project, remoteVal, json"""{"remote": "value"}""") + val s3Storage = S3Storage(nxv + "s3", project, s3Val, json"""{"s3": "value"}""") "be compacted" in { forAll( List( - diskStorage -> diskJson.deepMerge(json"""{"@type": ["Storage", "DiskStorage"]}"""), - s3Storage -> s3Json + diskStorage -> diskJson.deepMerge(json"""{"@type": ["Storage", "DiskStorage"]}"""), + s3Storage -> s3Json .deepMerge(json"""{"@type": ["Storage", "S3Storage"]}""") - .removeKeys("accessKey", "secretKey"), - remoteStorage -> remoteJson - .deepMerge(json"""{"@type": ["Storage", "RemoteDiskStorage"]}""") - .removeKeys("credentials") + .removeKeys("accessKey", "secretKey") ) ) { case (value, compacted) => value.toCompactedJsonLd.accepted.json shouldEqual compacted @@ -35,12 +30,11 @@ class StorageSpec extends CatsEffectSpec with RemoteContextResolutionFixture wit } "be expanded" in { - val diskJson = jsonContentOf("storages/disk-storage-expanded.json") - val s3Json = jsonContentOf("storages/s3-storage-expanded.json") - val remoteJson = jsonContentOf("storages/remote-storage-expanded.json") + val diskJson = jsonContentOf("storages/disk-storage-expanded.json") + val s3Json = jsonContentOf("storages/s3-storage-expanded.json") - forAll(List(diskStorage -> diskJson, s3Storage -> s3Json, remoteStorage -> remoteJson)) { - case (value, expanded) => value.toExpandedJsonLd.accepted.json shouldEqual expanded + forAll(List(diskStorage -> diskJson, s3Storage -> s3Json)) { case (value, expanded) => + value.toExpandedJsonLd.accepted.json shouldEqual expanded } } } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala index f2fe2e6d45..a2807918e4 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala @@ -6,7 +6,6 @@ import akka.testkit.TestKit import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileStorageMetadata -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.UUIDFFixtures import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePath, DigestAlgorithm} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers @@ -33,6 +32,7 @@ class DiskStorageSaveFileSpec "A DiskStorage saving operations" should { val project = ProjectRef.unsafe("org", "project") val content = "file content" + val digest = "e0ac3601005dfa1864f5392aabaf7d898b1b5bab854f1acb4491bcd806b76b0c" val entity = HttpEntity(content) val uploading = DiskUploadingFile(project, volume, DigestAlgorithm.default, "myfile.txt", entity) @@ -47,7 +47,7 @@ class DiskStorageSaveFileSpec FileStorageMetadata( fixedUuid, Files.size(file.value), - ComputedDigest(DigestAlgorithm.default, RemoteStorageClientFixtures.Digest), + ComputedDigest(DigestAlgorithm.default, digest), Client, s"file://$file", Uri.Path("org/project/8/0/4/9/b/a/9/0/myfile.txt") diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala deleted file mode 100644 index 8228a76cf0..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala +++ /dev/null @@ -1,75 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote - -import akka.actor.ActorSystem -import akka.http.scaladsl.model.Uri -import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.NotComputedDigest -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Storage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileStorageMetadata -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.RemoteDiskStorage -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.RemoteDiskStorageValue -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.MoveFileRejection.FileNotFound -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.permissions.{read, write} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{StorageFixtures, UUIDFFixtures} -import ch.epfl.bluebrain.nexus.delta.sdk.ConfigFixtures -import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} -import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec -import io.circe.Json -import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} - -@DoNotDiscover -class RemoteStorageLinkFileSpec(fixture: RemoteStorageClientFixtures) - extends TestKit(ActorSystem("RemoteStorageMoveFileSpec")) - with CatsEffectSpec - with AkkaSourceHelpers - with StorageFixtures - with UUIDFFixtures.Fixed - with BeforeAndAfterAll - with ConfigFixtures { - - private lazy val remoteDiskStorageClient = fixture.init - private lazy val fileOps = RemoteDiskFileOperations.mk(remoteDiskStorageClient) - - private val iri = iri"http://localhost/remote" - private val project = ProjectRef.unsafe("org", "project") - private val filename = "file-2.txt" - - private var storageValue: RemoteDiskStorageValue = _ - private var storage: RemoteDiskStorage = _ - - override protected def beforeAll(): Unit = { - super.beforeAll() - storageValue = RemoteDiskStorageValue( - default = true, - DigestAlgorithm.default, - Label.unsafe(RemoteStorageClientFixtures.BucketName), - read, - write, - 10 - ) - storage = RemoteDiskStorage(iri, project, storageValue, Json.obj()) - } - - "RemoteDiskStorage linking operations" should { - - "succeed" in { - fileOps.legacyLink(storage, Uri.Path("my/file-2.txt"), filename).accepted shouldEqual - FileStorageMetadata( - fixedUuid, - 12, - NotComputedDigest, - Storage, - s"file:///app/${RemoteStorageClientFixtures.BucketName}/nexus/org/project/8/0/4/9/b/a/9/0/file-2.txt", - Uri.Path("org/project/8/0/4/9/b/a/9/0/file-2.txt") - ) - } - - "fail linking a file that does not exist" in { - fileOps.legacyLink(storage, Uri.Path("my/file-40.txt"), filename).rejectedWith[FileNotFound] - } - } -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala deleted file mode 100644 index 8d33918664..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala +++ /dev/null @@ -1,79 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote - -import akka.actor.ActorSystem -import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` -import akka.http.scaladsl.model.{HttpEntity, Uri} -import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileStorageMetadata -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.SaveFileRejection.ResourceAlreadyExists -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.UploadingFile.RemoteUploadingFile -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{StorageFixtures, UUIDFFixtures} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} -import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec -import org.scalatest.concurrent.Eventually -import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} - -@DoNotDiscover -class RemoteStorageSaveAndFetchFileSpec(fixture: RemoteStorageClientFixtures) - extends TestKit(ActorSystem("RemoteStorageSaveAndFetchFileSpec")) - with CatsEffectSpec - with AkkaSourceHelpers - with Eventually - with BeforeAndAfterAll - with StorageFixtures - with UUIDFFixtures.Fixed { - - private lazy val remoteDiskStorageClient = fixture.init - - private val project = ProjectRef.unsafe("org", "project") - private val filename = "myfile.txt" - - private val folder = Label.unsafe(RemoteStorageClientFixtures.BucketName) - - private lazy val fileOps = RemoteDiskFileOperations.mk(remoteDiskStorageClient) - - "RemoteDiskStorage operations" should { - val content = "file content" - val entity = HttpEntity(content) - - val uploading = RemoteUploadingFile(project, folder, filename, entity) - - val bytes = 12L - val digest = ComputedDigest(DigestAlgorithm.default, RemoteStorageClientFixtures.Digest) - val location = s"file:///app/${RemoteStorageClientFixtures.BucketName}/nexus/org/project/8/0/4/9/b/a/9/0/myfile.txt" - val path = Uri.Path("org/project/8/0/4/9/b/a/9/0/myfile.txt") - - "save a file to a folder" in { - fileOps.save(uploading).accepted shouldEqual FileStorageMetadata( - fixedUuid, - bytes, - digest, - Client, - location, - path - ) - } - - "fetch a file from a folder" in { - val sourceFetched = fileOps.fetch(folder, path).accepted - consume(sourceFetched) shouldEqual content - } - - "fetch a file attributes" in eventually { - val computedAttributes = fileOps.fetchAttributes(folder, path).accepted - computedAttributes.digest shouldEqual digest - computedAttributes.bytes shouldEqual bytes - computedAttributes.mediaType shouldEqual `text/plain(UTF-8)` - } - - "fail attempting to save the same file again" in { - val uploading = RemoteUploadingFile(project, folder, filename, entity) - fileOps.save(uploading).rejectedWith[ResourceAlreadyExists] - } - } -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSpec.scala deleted file mode 100644 index 084bc4f8c1..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSpec.scala +++ /dev/null @@ -1,18 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote - -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.FilesSpec -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.access.RemoteDiskStorageAccessSpec -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.RemoteStorageClientSpec -import ch.epfl.bluebrain.nexus.testkit.scalatest.BaseSpec -import org.scalatest.Suite - -class RemoteStorageSpec extends BaseSpec with RemoteStorageClientFixtures { - override def nestedSuites: IndexedSeq[Suite] = Vector( - new RemoteStorageClientSpec(this), - new RemoteDiskStorageAccessSpec(this), - new RemoteStorageSaveAndFetchFileSpec(this), - new RemoteStorageLinkFileSpec(this), - new FilesSpec(this) - ) -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala deleted file mode 100644 index 738120cd74..0000000000 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/client/RemoteStorageClientSpec.scala +++ /dev/null @@ -1,86 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client - -import akka.actor.ActorSystem -import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` -import akka.http.scaladsl.model.{HttpEntity, StatusCodes, Uri} -import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.dependency.ComponentDescription.ServiceDescription -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.{ComputedDigest, NotComputedDigest} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures -import ch.epfl.bluebrain.nexus.delta.plugins.storage.remotestorage.RemoteStorageClientFixtures.BucketName -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.{FetchFileRejection, MoveFileRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.remote.client.model.RemoteDiskStorageFileAttributes -import ch.epfl.bluebrain.nexus.delta.kernel.http.HttpClientError.HttpClientStatusError -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec -import org.scalatest.concurrent.Eventually -import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} - -@DoNotDiscover -class RemoteStorageClientSpec(fixture: RemoteStorageClientFixtures) - extends TestKit(ActorSystem("RemoteStorageClientSpec")) - with CatsEffectSpec - with AkkaSourceHelpers - with Eventually - with BeforeAndAfterAll - with RemoteStorageClientFixtures { - - private lazy val client: RemoteDiskStorageClient = fixture.init - private val bucket: Label = Label.unsafe(BucketName) - - "A RemoteStorage client" should { - - val content = RemoteStorageClientFixtures.Content - val entity = HttpEntity(content) - val attributes = RemoteDiskStorageFileAttributes( - location = s"file:///app/$BucketName/nexus/my/file.txt", - bytes = 12, - digest = ComputedDigest(DigestAlgorithm.default, RemoteStorageClientFixtures.Digest), - mediaType = `text/plain(UTF-8)` - ) - - "fetch the service description" in eventually { - client.serviceDescription.accepted shouldEqual ServiceDescription("remoteStorage", fixture.storageVersion) - } - - "check if a bucket exists" in { - client.exists(bucket).accepted - val error = client.exists(Label.unsafe("other")).rejectedWith[HttpClientStatusError] - error.code == StatusCodes.NotFound - } - - "create a file" in { - client.createFile(bucket, Uri.Path("my/file.txt"), entity).accepted shouldEqual attributes - } - - "get a file" in { - consume(client.getFile(bucket, Uri.Path("my/file.txt")).accepted) shouldEqual content - } - - "fail to get a file that does not exist" in { - client.getFile(bucket, Uri.Path("my/file3.txt")).rejectedWith[FetchFileRejection.FileNotFound] - } - - "get a file attributes" in eventually { - client.getAttributes(bucket, Uri.Path("my/file.txt")).accepted shouldEqual attributes - } - - "move a file" in { - client - .moveFile(bucket, Uri.Path("my/file-1.txt"), Uri.Path("other/file-1.txt")) - .accepted shouldEqual - attributes.copy( - location = s"file:///app/$BucketName/nexus/other/file-1.txt", - digest = NotComputedDigest - ) - } - - "fail to move a file that does not exist" in { - client - .moveFile(bucket, Uri.Path("my/file.txt"), Uri.Path("other/file.txt")) - .rejectedWith[MoveFileRejection.FileNotFound] - } - } -} diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala index 85bdfdbdf5..4e1d2776ad 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala @@ -67,15 +67,12 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi ProjectGen.project("myorg", "myproject", uuid = randomUuid, orgUuid = randomUuid, base = projBase, mappings = am) private val projectRef = project.ref - private val remoteIdEncoded = UrlUtils.encode(rdId.toString) - private val s3IdEncoded = UrlUtils.encode(s3Id.toString) + private val s3IdEncoded = UrlUtils.encode(s3Id.toString) - private val diskRead = Permission.unsafe("disk/read") - private val diskWrite = Permission.unsafe("disk/write") - private val s3Read = Permission.unsafe("s3/read") - private val s3Write = Permission.unsafe("s3/write") - private val remoteRead = Permission.unsafe("remote/read") - private val remoteWrite = Permission.unsafe("remote/write") + private val diskRead = Permission.unsafe("disk/read") + private val diskWrite = Permission.unsafe("disk/write") + private val s3Read = Permission.unsafe("s3/read") + private val s3Write = Permission.unsafe("s3/write") override val allowedPerms = Seq( permissions.read, @@ -84,9 +81,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi diskRead, diskWrite, s3Read, - s3Write, - remoteRead, - remoteWrite + s3Write ) private val perms = allowedPerms.toSet @@ -95,7 +90,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi private val storageStatistics: StoragesStatistics = (storage, project) => - if (project.equals(projectRef) && storage.toString.equals("remote-disk-storage")) + if (project.equals(projectRef) && storage.toString.equals("s3-storage")) IO.pure(StorageStatEntry(50, 5000)) else IO.raiseError(StorageNotFound(iri"https://bluebrain.github.io/nexus/vocabulary/$storage", project)) @@ -120,8 +115,8 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi override def beforeAll(): Unit = { super.beforeAll() - val readPermissions = Set(permissions.read, diskRead, s3Read, remoteRead) - val writePermissions = Set(permissions.write, diskWrite, s3Write, remoteWrite) + val readPermissions = Set(permissions.read, diskRead, s3Read) + val writePermissions = Set(permissions.write, diskWrite, s3Write) aclCheck.append(AclAddress.Root, reader -> readPermissions).accepted aclCheck.append(AclAddress.Root, writer -> writePermissions).accepted } @@ -144,17 +139,6 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi } } - "create a storage with an authenticated user and provided id" in { - Put( - "/v1/storages/myorg/myproject/remote-disk-storage", - remoteFieldsJson.toEntity - ) ~> asWriter ~> routes ~> check { - status shouldEqual StatusCodes.Created - response.asJson shouldEqual - storageMetadata(projectRef, rdId, StorageType.RemoteDiskStorage) - } - } - "reject the creation of a storage which already exists" in { Put("/v1/storages/myorg/myproject/s3-storage", s3FieldsJson.toEntity) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.Conflict @@ -174,10 +158,9 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi s"/v1/storages/myorg/myproject/$s3IdEncoded" ) forAll(endpoints.zipWithIndex) { case (endpoint, idx) => - // the starting revision is 2 because this storage has been updated to default = false - Put(s"$endpoint?rev=${idx + 2}", s3FieldsJson.toEntity) ~> asWriter ~> routes ~> check { + Put(s"$endpoint?rev=${idx + 1}", s3FieldsJson.toEntity) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.OK - response.asJson shouldEqual storageMetadata(projectRef, s3Id, StorageType.S3Storage, rev = idx + 3) + response.asJson shouldEqual storageMetadata(projectRef, s3Id, StorageType.S3Storage, rev = idx + 2) } } } @@ -194,7 +177,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi Put("/v1/storages/myorg/myproject/s3-storage?rev=10", s3FieldsJson.toEntity) ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.Conflict response.asJson shouldEqual - jsonContentOf("storages/errors/incorrect-rev.json", "provided" -> 10, "expected" -> 4) + jsonContentOf("storages/errors/incorrect-rev.json", "provided" -> 10, "expected" -> 3) } } @@ -205,14 +188,14 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi } "deprecate a storage" in { - Delete("/v1/storages/myorg/myproject/s3-storage?rev=4") ~> asWriter ~> routes ~> check { + Delete("/v1/storages/myorg/myproject/s3-storage?rev=3") ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.OK response.asJson shouldEqual storageMetadata( projectRef, s3Id, StorageType.S3Storage, - rev = 5, + rev = 4, deprecated = true, updatedBy = writer, createdBy = writer @@ -228,7 +211,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi } "reject the deprecation of a already deprecated storage" in { - Delete(s"/v1/storages/myorg/myproject/s3-storage?rev=5") ~> asWriter ~> routes ~> check { + Delete(s"/v1/storages/myorg/myproject/s3-storage?rev=4") ~> asWriter ~> routes ~> check { status shouldEqual StatusCodes.BadRequest response.asJson shouldEqual jsonContentOf("storages/errors/storage-deprecated.json", "id" -> s3Id) } @@ -282,16 +265,8 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi } "fail to fetch a storage and do listings without resources/read permission" in { - val endpoints = List( - "/v1/storages/myorg/myproject/caches", - "/v1/storages/myorg/myproject/remote-disk-storage" - ) - forAll(endpoints) { endpoint => - forAll(List("", "?rev=1")) { suffix => - Get(s"$endpoint$suffix") ~> routes ~> check { - response.shouldBeForbidden - } - } + Get(s"/v1/storages/myorg/myproject/s3-storage") ~> routes ~> check { + response.shouldBeForbidden } } @@ -307,31 +282,31 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi "fetch a storage by rev" in { val endpoints = List( - "/v1/storages/myorg/myproject/remote-disk-storage", - "/v1/resources/myorg/myproject/_/remote-disk-storage", - "/v1/resources/myorg/myproject/storage/remote-disk-storage", - s"/v1/storages/myorg/myproject/$remoteIdEncoded", - s"/v1/resources/myorg/myproject/_/$remoteIdEncoded", - s"/v1/resources/myorg/myproject/storage/$remoteIdEncoded" + "/v1/storages/myorg/myproject/s3-storage", + "/v1/resources/myorg/myproject/_/s3-storage", + "/v1/resources/myorg/myproject/storage/s3-storage", + s"/v1/storages/myorg/myproject/$s3IdEncoded", + s"/v1/resources/myorg/myproject/_/$s3IdEncoded", + s"/v1/resources/myorg/myproject/storage/$s3IdEncoded" ) forAll(endpoints) { endpoint => - Get(s"$endpoint?rev=1") ~> asReader ~> routes ~> check { + Get(s"$endpoint?rev=4") ~> asReader ~> routes ~> check { status shouldEqual StatusCodes.OK response.asJson shouldEqual jsonContentOf( - "storages/remote-storage-fetched.json", - "self" -> self(rdId) + "storages/s3-storage-fetched.json", + "self" -> self(s3Id) ) } } } "fetch a storage original payload" in { - val expectedSource = remoteFieldsJson deepMerge json"""{"default": false}""" + val expectedSource = s3FieldsJson val endpoints = List( - "/v1/storages/myorg/myproject/remote-disk-storage/source", - "/v1/resources/myorg/myproject/_/remote-disk-storage/source", - s"/v1/storages/myorg/myproject/$remoteIdEncoded/source", - s"/v1/resources/myorg/myproject/_/$remoteIdEncoded/source" + "/v1/storages/myorg/myproject/s3-storage/source", + "/v1/resources/myorg/myproject/_/s3-storage/source", + s"/v1/storages/myorg/myproject/$s3IdEncoded/source", + s"/v1/resources/myorg/myproject/_/$s3IdEncoded/source" ) forAll(endpoints) { endpoint => Get(endpoint) ~> asReader ~> routes ~> check { @@ -343,19 +318,19 @@ class StoragesRoutesSpec extends BaseRouteSpec with StorageFixtures with UUIDFFi "fetch a storage original payload by rev" in { val endpoints = List( - "/v1/storages/myorg/myproject/remote-disk-storage/source", - s"/v1/storages/myorg/myproject/$remoteIdEncoded/source" + "/v1/storages/myorg/myproject/s3-storage/source", + s"/v1/storages/myorg/myproject/$s3IdEncoded/source" ) forAll(endpoints) { endpoint => - Get(s"$endpoint?rev=1") ~> asReader ~> routes ~> check { + Get(s"$endpoint?rev=4") ~> asReader ~> routes ~> check { status shouldEqual StatusCodes.OK - response.asJson shouldEqual remoteFieldsJson + response.asJson shouldEqual s3FieldsJson } } } "get storage statistics for an existing entry" in { - Get("/v1/storages/myorg/myproject/remote-disk-storage/statistics") ~> asReader ~> routes ~> check { + Get("/v1/storages/myorg/myproject/s3-storage/statistics") ~> asReader ~> routes ~> check { status shouldEqual StatusCodes.OK response.asJson shouldEqual jsonContentOf("storages/statistics.json") } diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/auth/AuthTokenProvider.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/auth/AuthTokenProvider.scala index 769511b411..2a958fb6b6 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/auth/AuthTokenProvider.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/auth/AuthTokenProvider.scala @@ -9,7 +9,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.auth.Credentials.ClientCredentials import java.time.{Duration, Instant} /** - * Provides an auth token for the service account, for use when comunicating with remote storage + * Provides an auth token for the service account, for use when comunicating with remote Delta instances */ trait AuthTokenProvider { def apply(credentials: Credentials): IO[Option[AuthToken]] diff --git a/docs/src/main/paradox/docs/releases/index.md b/docs/src/main/paradox/docs/releases/index.md index 2e3d9611b1..bbc6a9cd43 100644 --- a/docs/src/main/paradox/docs/releases/index.md +++ b/docs/src/main/paradox/docs/releases/index.md @@ -30,7 +30,7 @@ The latest stable release is **v1.10.0** released on **17.09.2024**. The items listed below are changes that have been made in this release that break compatibility with previous releases. -- The Remote storage server implementation has been removed. +- The Remote storage implementation has been removed. - The automatic provisioning of projects has been removed. - The Jira integration plugin has been removed. @@@ diff --git a/docs/src/main/paradox/docs/releases/v1.11-release-notes.md b/docs/src/main/paradox/docs/releases/v1.11-release-notes.md index 35a3c2a51d..89f39cf30a 100644 --- a/docs/src/main/paradox/docs/releases/v1.11-release-notes.md +++ b/docs/src/main/paradox/docs/releases/v1.11-release-notes.md @@ -34,11 +34,9 @@ Nexus now allows to provision acl at start up to simplify automated deployments. @ref[More here](../running-nexus/configuration/index.md#acl-provisioning) -### Remote storage server +### Remote storage -The remote storage server part has been removed. - -The client is deprecated and remains compatible with @link:[the remote storage in 1.10](https://hub.docker.com/r/bluebrain/nexus-storage/tags). +The remote storage implementation has been removed. ## Nexus Fusion diff --git a/ship/src/main/resources/ship-default.conf b/ship/src/main/resources/ship-default.conf index 0e028ae570..b5a4d8a7af 100644 --- a/ship/src/main/resources/ship-default.conf +++ b/ship/src/main/resources/ship-default.conf @@ -142,29 +142,6 @@ ship { default-max-file-size = 10737418240 } - remote-disk { - # to enable remote storage - enabled = false - # the default endpoint - default-endpoint = "http://localhost:8084/v1" - # the default credentials for the endpoint - credentials { - type: "anonymous" - } - # the default digest algorithm - digest-algorithm = "SHA-256" - # the default permission required in order to download a file from a remote disk storage - default-read-permission = "resources/read" - # the default permission required in order to upload a file to a remote disk storage - default-write-permission = "files/write" - # flag to decide whether or not to show the absolute location of the files in the metadata response - show-location = true - # the default maximum allowed file size (in bytes) for uploaded files. 10 GB - default-max-file-size = 10737418240 - # Retry delay for digest computation - digest-computation-retry-delay = 5s - } - # the storages event log configuration event-log = ${ship.input.event-log} pagination { diff --git a/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileWiring.scala b/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileWiring.scala index 9638a698ef..dd4d2e5e3e 100644 --- a/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileWiring.scala +++ b/ship/src/main/scala/ch/epfl/bluebrain/nexus/ship/files/FileWiring.scala @@ -1,12 +1,12 @@ package ch.epfl.bluebrain.nexus.ship.files -import akka.http.scaladsl.model.{HttpEntity, Uri} +import akka.http.scaladsl.model.HttpEntity import cats.effect.IO -import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{ComputedFileAttributes, FileAttributes, FileDelegationRequest, FileStorageMetadata} +import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource +import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDelegationRequest, FileStorageMetadata} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{FormDataExtractor, UploadedFileInformation} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.FileOperations -import ch.epfl.bluebrain.nexus.delta.kernel.AkkaSource object FileWiring { @@ -21,12 +21,6 @@ object FileWiring { override def fetch(storage: Storage, attributes: FileAttributes): IO[AkkaSource] = noFileOperationError - override def legacyLink(storage: Storage, sourcePath: Uri.Path, filename: String): IO[FileStorageMetadata] = - noFileOperationError - - override def fetchAttributes(storage: Storage, attributes: FileAttributes): IO[ComputedFileAttributes] = - noFileOperationError - override def delegate(storage: Storage, filename: String): IO[FileDelegationRequest.TargetLocation] = noFileOperationError } diff --git a/tests/docker/config/delta-postgres.conf b/tests/docker/config/delta-postgres.conf index 56d8f7436a..b0ab2b44a6 100644 --- a/tests/docker/config/delta-postgres.conf +++ b/tests/docker/config/delta-postgres.conf @@ -164,17 +164,6 @@ plugins { default-volume= "/default-volume" } - remote-disk { - enabled = true - credentials { - type: "client-credentials" - user: "delta" - password: "shhh" - realm: "internal" - } - default-endpoint = "http://storage-service:8080/v1" - } - amazon { enabled = true default-endpoint = "http://s3.localhost.localstack.cloud:4566" diff --git a/tests/docker/config/storage.conf b/tests/docker/config/storage.conf deleted file mode 100644 index 67f187162d..0000000000 --- a/tests/docker/config/storage.conf +++ /dev/null @@ -1,27 +0,0 @@ -app { - http { - interface = "0.0.0.0" - public-uri = "http://storage.dev.nise.bbp.epfl.ch" - } - - instance { - interface = "0.0.0.0" - } - - storage { - root-volume = "/tmp" - protected-directory = "protected" - fixer-enabled = false - } - - authorization { - method = "anonymous" - } - - media-type-detector { - extensions { - custom = "application/custom" - } - } - -} \ No newline at end of file diff --git a/tests/docker/docker-compose.yml b/tests/docker/docker-compose.yml index 9ab3540520..b6af12082e 100644 --- a/tests/docker/docker-compose.yml +++ b/tests/docker/docker-compose.yml @@ -15,8 +15,6 @@ services: condition: service_healthy postgres: condition: service_started - storage-service: - condition: service_started localstack: condition: service_started environment: @@ -149,20 +147,6 @@ services: POSTGRES_USER: "postgres" POSTGRES_PASSWORD: "postgres" - storage-service: - container_name: "nexus-storage-service" - image: bluebrain/nexus-storage:1.10.0 - environment: - STORAGE_CONFIG_FILE: "/config/storage.conf" - KAMON_ENABLED: "false" - entrypoint: [ "./bin/storage", - "-Dkamon.modules.prometheus-reporter.enabled=false", - "-Dkamon.modules.jaeger.enabled=false" ] - ports: - - 8090:8090 - volumes: - - ./config:/config - localstack: image: localstack/localstack:4.0 environment: diff --git a/tests/src/test/resources/kg/files/linking-metadata.json b/tests/src/test/resources/kg/files/linking-metadata.json deleted file mode 100644 index f038b39370..0000000000 --- a/tests/src/test/resources/kg/files/linking-metadata.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "@context" : [ - "https://bluebrain.github.io/nexus/contexts/files.json", - "https://bluebrain.github.io/nexus/contexts/metadata.json" - ], - "@id": "{{deltaUri}}/resources/{{projId}}/_/logo.png", - "@type": "File", - "_storage": { - "@id": "https://bluebrain.github.io/nexus/vocabulary/mys3storage2", - "@type" : "S3Storage", - "_rev": 1 - }, - "_bytes": 29625, - "_digest": { - "_value": "05bf442810213b9e5fecd5242eefeff1f3d207913861c96658c75ccf58997e87", - "_algorithm": "SHA-256" - }, - "_filename": "logo.png", - "_location": "{{endpointBucket}}/{{key}}", - "_mediaType": "image/png", - "_origin" : "Storage", - "_incoming": "{{self}}/incoming", - "_outgoing": "{{self}}/outgoing", - "_self": "{{self}}", - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/files.json", - "_project": "{{deltaUri}}/projects/{{projId}}", - "_rev": 1, - "_deprecated": false, - "_createdBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}", - "_updatedBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}" -} \ No newline at end of file diff --git a/tests/src/test/resources/kg/files/linking-notfound.json b/tests/src/test/resources/kg/files/linking-notfound.json deleted file mode 100644 index 53d537e74e..0000000000 --- a/tests/src/test/resources/kg/files/linking-notfound.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "@context" : "https://bluebrain.github.io/nexus/contexts/error.json", - "@type" : "FileNotFound", - "reason" : "File 'http://delta:8080/v1/resources/{{org}}/{{proj}}/_/nonexistent.png' could not be linked using storage 'https://bluebrain.github.io/nexus/vocabulary/mys3storage2'", - "details" : "File could not be retrieved from expected path '{{endpointBucket}}/non/existent.png'." -} \ No newline at end of file diff --git a/tests/src/test/resources/kg/files/linking-notsupported.json b/tests/src/test/resources/kg/files/linking-notsupported.json deleted file mode 100644 index 39e2405de9..0000000000 --- a/tests/src/test/resources/kg/files/linking-notsupported.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "@context" : "https://bluebrain.github.io/nexus/contexts/error.json", - "@type" : "UnsupportedOperation", - "reason" : "File 'http://delta:8080/v1/resources/{{org}}/{{proj}}/_/linking.png' could not be linked using storage 'https://bluebrain.github.io/nexus/vocabulary/diskStorageDefault'", - "details" : "Moving a file is not supported for storages of type 'https://bluebrain.github.io/nexus/vocabulary/DiskStorage'" -} \ No newline at end of file diff --git a/tests/src/test/resources/kg/files/remote-linked.json b/tests/src/test/resources/kg/files/remote-linked.json deleted file mode 100644 index a9a1c64a25..0000000000 --- a/tests/src/test/resources/kg/files/remote-linked.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "@context": [ - "https://bluebrain.github.io/nexus/contexts/files.json", - "https://bluebrain.github.io/nexus/contexts/metadata.json" - ], - "@id": "{{id}}", - "@type": "File", - "_bytes": 13, - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/files.json", - "_createdBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}", - "_deprecated": false, - "_digest": { - "_value": "" - }, - "_filename": "{{filename}}", - {{#if mediaType}} - "_mediaType": "{{mediaType}}", - {{/if}} - "_origin": "Storage", - "_incoming": "{{self}}/incoming", - "_outgoing": "{{self}}/outgoing", - "_project": "{{project}}", - "_rev": 1, - "_self": "{{self}}", - "_storage": { - "@id": "https://bluebrain.github.io/nexus/vocabulary/{{storageId}}", - "@type": "{{storageType}}", - "_rev": 1 - }, - "_updatedBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}" -} \ No newline at end of file diff --git a/tests/src/test/resources/kg/files/remote-updated-linked.json b/tests/src/test/resources/kg/files/remote-updated-linked.json deleted file mode 100644 index 2c3e428a31..0000000000 --- a/tests/src/test/resources/kg/files/remote-updated-linked.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "@context": [ - "https://bluebrain.github.io/nexus/contexts/files.json", - "https://bluebrain.github.io/nexus/contexts/metadata.json" - ], - "@id": "{{id}}", - "@type": "File", - "_bytes": 13, - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/files.json", - "_createdBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}", - "_deprecated": false, - "_digest": { - "_algorithm": "SHA-256", - "_value": "694b27f021c4861b3373cd5ddbc42695c056d0a4297d2d85e2dae040a84e61df" - }, - "_filename": "{{filename}}", - "_incoming": "{{self}}/incoming", - "_mediaType": "{{mediaType}}", - "_origin": "Storage", - "_outgoing": "{{self}}/outgoing", - "_project": "{{project}}", - "_rev": 2, - "_self": "{{self}}", - "_storage": { - "@id": "https://bluebrain.github.io/nexus/vocabulary/{{storageId}}", - "@type": "{{storageType}}", - "_rev": 1 - }, - "_updatedBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}" -} \ No newline at end of file diff --git a/tests/src/test/resources/kg/storages/remote-disk-response.json b/tests/src/test/resources/kg/storages/remote-disk-response.json deleted file mode 100644 index 29cbfe42f5..0000000000 --- a/tests/src/test/resources/kg/storages/remote-disk-response.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "@context": [ - "https://bluebrain.github.io/nexus/contexts/storages.json", - "https://bluebrain.github.io/nexus/contexts/metadata.json" - ], - "@id": "https://bluebrain.github.io/nexus/vocabulary/{{id}}", - "@type": [ - "Storage", - "RemoteDiskStorage" - ], - "default": false, - "maxFileSize" : {{maxFileSize}}, - "folder": "{{folder}}", - "readPermission": "{{read}}", - "writePermission": "{{write}}", - "_algorithm": "SHA-256", - "_incoming": "{{self}}/incoming", - "_outgoing": "{{self}}/outgoing", - "_self": "{{self}}", - "_constrainedBy": "https://bluebrain.github.io/nexus/schemas/storages.json", - "_project": "{{deltaUri}}/projects/{{project}}", - "_rev": 1, - "_deprecated": false, - "_createdBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}", - "_updatedBy": "{{deltaUri}}/realms/{{realm}}/users/{{user}}" -} \ No newline at end of file diff --git a/tests/src/test/resources/kg/storages/remote-disk.json b/tests/src/test/resources/kg/storages/remote-disk.json deleted file mode 100644 index 7620daa2ba..0000000000 --- a/tests/src/test/resources/kg/storages/remote-disk.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "@id": "https://bluebrain.github.io/nexus/vocabulary/{{id}}", - "@type": "RemoteDiskStorage", - "endpoint": "{{endpoint}}", - "folder": "{{folder}}", - "default": false, - "readPermission": "{{read}}", - "writePermission": "{{write}}" -} \ No newline at end of file diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/VersionSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/VersionSpec.scala index 8ed95e2dbc..0f25650725 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/VersionSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/VersionSpec.scala @@ -27,8 +27,7 @@ object VersionSpec { final case class DependenciesBundle( blazegraph: String, postgres: Option[String], - elasticsearch: String, - remoteStorage: String + elasticsearch: String ) object DependenciesBundle { implicit val dependenciesBundleDecoder: Decoder[DependenciesBundle] = deriveDecoder[DependenciesBundle] diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/DiskStorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/DiskStorageSpec.scala index b9a57eebb5..7ed5fa63b8 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/DiskStorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/DiskStorageSpec.scala @@ -63,19 +63,4 @@ class DiskStorageSpec extends StorageSpec { } } } - - s"Linking against the default storage" should { - "reject linking operations" in { - val payload = Json.obj( - "filename" -> Json.fromString("logo.png"), - "path" -> Json.fromString("does/not/matter"), - "mediaType" -> Json.fromString("image/png") - ) - - deltaClient.put[Json](s"/files/$projectRef/linking.png", payload, Coyote) { (json, response) => - response.status shouldEqual StatusCodes.BadRequest - json shouldEqual jsonContentOf("kg/files/linking-notsupported.json", "org" -> orgId, "proj" -> projId) - } - } - } } diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/RemoteStorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/RemoteStorageSpec.scala deleted file mode 100644 index 72a16de397..0000000000 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/RemoteStorageSpec.scala +++ /dev/null @@ -1,440 +0,0 @@ -package ch.epfl.bluebrain.nexus.tests.kg.files - -import akka.http.scaladsl.model.{ContentTypes, MediaTypes, StatusCodes} -import akka.util.ByteString -import cats.effect.IO -import ch.epfl.bluebrain.nexus.tests.HttpClient._ -import ch.epfl.bluebrain.nexus.tests.Identity.storages.Coyote -import ch.epfl.bluebrain.nexus.tests.Optics.{filterKey, filterMetadataKeys, projections} -import ch.epfl.bluebrain.nexus.tests.iam.types.Permission -import ch.epfl.bluebrain.nexus.tests.iam.types.Permission.Supervision -import ch.epfl.bluebrain.nexus.tests.kg.files.model.FileInput -import io.circe.generic.semiauto.deriveDecoder -import io.circe.syntax.KeyOps -import io.circe.{Decoder, Json} -import org.scalatest.Assertion - -class RemoteStorageSpec extends StorageSpec { - - override def storageName: String = "external" - - override def storageType: String = "RemoteDiskStorage" - - override def storageId: String = "myexternalstorage" - - override def locationPrefix: Option[String] = Some(s"file:///tmp/$remoteFolder") - - private val remoteFolder = genId() - - override def beforeAll(): Unit = { - super.beforeAll() - storagesDsl.mkProtectedFolderInStorageService(remoteFolder).accepted - } - - override def afterAll(): Unit = { - super.afterAll() - storagesDsl.deleteFolderInStorageService(remoteFolder).accepted - } - - private def storageResponse(project: String, id: String, readPermission: String, writePermission: String) = - jsonContentOf( - "kg/storages/remote-disk-response.json", - replacements( - Coyote, - "folder" -> remoteFolder, - "id" -> id, - "project" -> project, - "self" -> storageSelf(project, s"https://bluebrain.github.io/nexus/vocabulary/$id"), - "maxFileSize" -> storageConfig.maxFileSize.toString, - "read" -> readPermission, - "write" -> writePermission - ): _* - ) - - override def createStorages(projectRef: String, storId: String, storName: String): IO[Assertion] = { - val storageId2 = s"${storId}2" - val storage2Read = s"$storName/read" - val storage2Write = s"$storName/write" - - val expectedStorage = storageResponse(projectRef, storId, "resources/read", "files/write") - val expectedStorageSource = - jsonContentOf( - "kg/storages/storage-source.json", - "folder" -> remoteFolder, - "storageBase" -> StoragesDsl.StorageServiceBaseUrl, - "id" -> storId - ) - val expectedStorageWithPerms = storageResponse(projectRef, storageId2, storage2Read, storage2Write) - - for { - _ <- storagesDsl.createRemoteStorageWithDefaultPerms(storId, projectRef, remoteFolder) - _ <- storagesDsl.checkStorageMetadata(projectRef, storId, expectedStorage) - _ <- storagesDsl.checkStorageSource(projectRef, storId, expectedStorageSource) - _ <- permissionDsl.addPermissions(Permission(storName, "read"), Permission(storName, "write")) - _ <- storagesDsl.createRemoteStorageWithCustomPerms( - storageId2, - projectRef, - remoteFolder, - storage2Read, - storage2Write - ) - _ <- storagesDsl.checkStorageMetadata(projectRef, storageId2, expectedStorageWithPerms) - } yield succeed - } - - def putFile(name: String, content: String, storageId: String) = { - val file = FileInput(s"test-resource:$name", name, ContentTypes.`text/plain(UTF-8)`, content) - deltaClient.uploadFile(projectRef, storageId, file, None) { expectCreated } - } - - def randomString(length: Int) = { - val r = new scala.util.Random - val sb = new StringBuilder - for (_ <- 1 to length) { - sb.append(r.nextPrintableChar()) - } - sb.toString - } - - case class CustomMetadata(name: String, description: String, keywords: Map[String, String]) - private val customMetadata = - CustomMetadata("cool name", "good description", Map("key1" -> "value1", "key2" -> "value2")) - - "succeed many large files are in the archive, going over the time limit" ignore { - val content = randomString(130000000) - val payload = jsonContentOf("kg/archives/archive-many-large-files.json") - var before = 0L - for { - _ <- putFile("largefile1.txt", content, s"${storageId}2") - _ <- putFile("largefile2.txt", content, s"${storageId}2") - _ <- putFile("largefile3.txt", content, s"${storageId}2") - _ <- putFile("largefile4.txt", content, s"${storageId}2") - _ <- putFile("largefile5.txt", content, s"${storageId}2") - _ <- putFile("largefile6.txt", content, s"${storageId}2") - _ <- putFile("largefile7.txt", content, s"${storageId}2") - _ <- putFile("largefile8.txt", content, s"${storageId}2") - _ <- putFile("largefile9.txt", content, s"${storageId}2") - _ <- putFile("largefile10.txt", content, s"${storageId}2") - _ <- putFile("largefile11.txt", content, s"${storageId}2") - _ <- putFile("largefile12.txt", content, s"${storageId}2") - _ <- putFile("largefile13.txt", content, s"${storageId}2") - _ <- putFile("largefile14.txt", content, s"${storageId}2") - _ <- putFile("largefile15.txt", content, s"${storageId}2") - _ <- - deltaClient.put[ByteString](s"/archives/$projectRef/nxv:very-large-archive", payload, Coyote) { (_, response) => - before = System.currentTimeMillis() - response.status shouldEqual StatusCodes.Created - } - _ <- - deltaClient.get[ByteString](s"/archives/$projectRef/nxv:very-large-archive", Coyote, acceptAll) { - (_, response) => - println(s"time taken to download archive: ${System.currentTimeMillis() - before}ms") - response.status shouldEqual StatusCodes.OK - contentType(response) shouldEqual MediaTypes.`application/zip`.toContentType - } - } yield { - succeed - } - } - - "Creating a remote storage" should { - "fail creating a RemoteDiskStorage without folder" in { - val payload = storagesDsl.remoteDiskPayloadDefaultPerms(storageId, "nexustest").accepted - - deltaClient.post[Json](s"/storages/$projectRef", filterKey("folder")(payload), Coyote) { (_, response) => - response.status shouldEqual StatusCodes.BadRequest - } - } - } - - def createFileInStorageService(filename: String): IO[Unit] = - storagesDsl.runCommandInStorageService(s"echo 'file content' > /tmp/$remoteFolder/$filename") - - def linkPayload(filename: String, path: String, mediaType: Option[String]) = - Json.obj( - "filename" := filename, - "path" := path, - "mediaType" := mediaType - ) - - private def linkPayloadWithMetadata( - filename: String, - path: String, - md: CustomMetadata - ) = - Json.obj( - "filename" := filename, - "path" := path, - "mediaType" := None, - "metadata" := Json.obj( - "name" := md.name, - "description" := md.description, - "keywords" := md.keywords - ) - ) - - def linkFile(payload: Json)(fileId: String, filename: String, mediaType: Option[String]) = { - val expected = jsonContentOf( - "kg/files/remote-linked.json", - replacements( - Coyote, - "id" -> fileId, - "self" -> fileSelf(projectRef, fileId), - "filename" -> filename, - "mediaType" -> mediaType.orNull, - "storageId" -> s"${storageId}2", - "storageType" -> storageType, - "projId" -> s"$projectRef", - "project" -> s"${config.deltaUri}/projects/$projectRef" - ): _* - ) - deltaClient.put[Json](s"/files/$projectRef/$filename?storage=nxv:${storageId}2", payload, Coyote) { - (json, response) => - filterMetadataKeys.andThen(filterKey("_location"))(json) shouldEqual expected - response.status shouldEqual StatusCodes.Created - } - } - - def fetchUpdatedLinkedFile(fileId: String, filename: String, mediaType: String) = { - val expected = jsonContentOf( - "kg/files/remote-updated-linked.json", - replacements( - Coyote, - "id" -> fileId, - "self" -> fileSelf(projectRef, fileId), - "filename" -> filename, - "mediaType" -> mediaType, - "storageId" -> s"${storageId}2", - "storageType" -> storageType, - "projId" -> s"$projectRef", - "project" -> s"${config.deltaUri}/projects/$projectRef" - ): _* - ) - - eventually { - deltaClient.get[Json](s"/files/$projectRef/$filename", Coyote) { (json, response) => - response.status shouldEqual StatusCodes.OK - filterMetadataKeys.andThen(filterKey("_location"))(json) shouldEqual expected - } - } - } - - "Linking a custom file providing a media type for a .custom file" should { - - val filename = "link_file.custom" - val fileId = s"${config.deltaUri}/resources/$projectRef/_/$filename" - - "succeed" in { - - val mediaType = "application/json" - val payload = linkPayload(filename, filename, Some(mediaType)) - - for { - _ <- createFileInStorageService(filename) - // Get a first response without the digest - _ <- linkFile(payload)(fileId, filename, Some(mediaType)) - // Eventually - } yield succeed - } - - "fetch eventually a linked file with updated attributes" in { - val mediaType = "application/json" - fetchUpdatedLinkedFile(fileId, filename, mediaType) - } - } - - "Linking a file without a media type for a .custom file" should { - - val filename = "link_file_no_media_type.custom" - val fileId = s"${config.deltaUri}/resources/$projectRef/_/$filename" - - "succeed" in { - val payload = linkPayload(filename, filename, None) - - for { - _ <- createFileInStorageService(filename) - // Get a first response without the digest - _ <- linkFile(payload)(fileId, filename, None) - // Eventually - } yield succeed - } - - "fetch eventually a linked file with updated attributes detecting application/custom from config" in { - val mediaType = "application/custom" - fetchUpdatedLinkedFile(fileId, filename, mediaType) - } - } - - "Linking a file without a media type for a .txt file" should { - - val filename = "link_file.txt" - val fileId = s"${config.deltaUri}/resources/$projectRef/_/$filename" - - "succeed" in { - val payload = linkPayload(filename, filename, None) - - for { - _ <- createFileInStorageService(filename) - // Get a first response without the digest - _ <- linkFile(payload)(fileId, filename, None) - // Eventually - } yield succeed - } - - "fetch eventually a linked file with updated attributes detecting text/plain from akka" in { - val mediaType = "text/plain; charset=UTF-8" - fetchUpdatedLinkedFile(fileId, filename, mediaType) - } - } - - "Linking a file without a media type for a file without extension" should { - - val filename = "link_file_no_extension" - val fileId = s"${config.deltaUri}/resources/$projectRef/_/$filename" - - "succeed" in { - val payload = linkPayload(filename, filename, None) - - for { - _ <- createFileInStorageService(filename) - // Get a first response without the digest - _ <- linkFile(payload)(fileId, filename, None) - // Eventually - } yield succeed - } - - "fetch eventually a linked file with updated attributes falling back to default mediaType" in { - val mediaType = "application/octet-stream" - fetchUpdatedLinkedFile(fileId, filename, mediaType) - } - } - - "Linking providing a nonexistent file" should { - - "fail" in { - val payload = linkPayload("logo.png", "non/existent.png", Some("image/png")) - - deltaClient.put[Json](s"/files/$projectRef/nonexistent.png?storage=nxv:${storageId}2", payload, Coyote) { - (_, response) => - response.status shouldEqual StatusCodes.BadRequest - } - } - - } - - "Linking a file with custom metadata should" should { - - "succeed" in { - val filename = s"${genString()}.txt" - val md = customMetadata - val payload = linkPayloadWithMetadata(filename, filename, md) - - for { - _ <- createFileInStorageService(filename) - _ <- linkFile(filename, payload) - _ <- assertCorrectCustomMetadata(filename, md) - } yield succeed - } - - "succeed when updating with metadata" in { - val filename = s"${genString()}.txt" - val filename2 = s"${genString()}.txt" - val md = customMetadata - - val simplePayload = linkPayload(filename, filename, None) - val payloadWithMetadata = linkPayloadWithMetadata(filename2, filename2, md) - - val setup = for { - _ <- createFileInStorageService(filename) - _ <- createFileInStorageService(filename2) - _ <- linkFile(filename, simplePayload) - } yield succeed - - setup.accepted - eventually { assertFileRevision(filename, 2) } - updateFileLink(filename, payloadWithMetadata).accepted - - eventually { assertCorrectCustomMetadata(filename, md) } - } - - } - - "The file-attributes-updated projection description" should { - "exist" in { - aclDsl.addPermission("/", Coyote, Supervision.Read).accepted - deltaClient.get[Json]("/supervision/projections", Coyote) { (json, _) => - val expected = json"""{ "module": "system", "name": "file-attributes-update" }""" - assert(projections.metadata.json.exist(_ == expected)(json)) - } - } - - "have updated progress when a file is updated" in { - case class SupervisedDescription(metadata: Metadata, progress: ProjectionProgress) - case class Metadata(module: String, name: String) - case class ProjectionProgress(processed: Int) - - implicit val metadataDecoder: Decoder[Metadata] = deriveDecoder - - implicit val progressDecoder: Decoder[ProjectionProgress] = deriveDecoder - implicit val descriptionDecoder: Decoder[SupervisedDescription] = deriveDecoder - - /** - * Given a list of supervised descriptions (json), get the number of processed elements for the - * `file-attribute-update` projection - */ - def getProcessed(json: Json): Option[Int] = { - val Right(projections) = json.hcursor.downField("projections").as[List[SupervisedDescription]] - val fileAttributeProjection = - projections.find(p => p.metadata.name == "file-attribute-update" && p.metadata.module == "system") - fileAttributeProjection.map(_.progress.processed) - } - - // get progress prior to updating the file - deltaClient.get[Json]("/supervision/projections", Coyote) { (json1, _) => - val file = FileInput("file.txt", "file.txt", ContentTypes.`application/json`, s"""{ "json": "content"}""") - eventually { - // update the file - deltaClient.uploadFile(projectRef, s"${storageId}2", file, Some(2)) { (_, _) => - eventually { - // get progress after the file update and compare - deltaClient.get[Json]("/supervision/projections", Coyote) { (json2, _) => - assert(getProcessed(json2) == getProcessed(json1).map(_ + 1)) - } - } - } - } - } - } - - } - - private def linkFile(filename: String, payload: Json) = - deltaClient.put[Json](s"/files/$projectRef/$filename?storage=nxv:${storageId}2", payload, Coyote) { (_, response) => - response.status shouldEqual StatusCodes.Created - } - - private def assertFileRevision(filename: String, expectedRev: Int) = - deltaClient - .get[Json](s"/files/$projectRef/$filename", Coyote) { (json, _) => - json.hcursor.get[Int]("_rev").toOption should contain(expectedRev) - } - - private def updateFileLink(filename: String, payload: Json) = - deltaClient - .put[Json](s"/files/$projectRef/$filename?rev=2&storage=nxv:${storageId}2", payload, Coyote) { (_, response) => - response.status shouldEqual StatusCodes.OK - } - - private def assertCorrectCustomMetadata( - filename: String, - customMetadata: CustomMetadata - ) = - deltaClient - .get[Json](s"/files/$projectRef/$filename", Coyote) { (json, response) => - response.status shouldEqual StatusCodes.OK - json.hcursor.get[String]("name").toOption should contain(customMetadata.name) - json.hcursor.get[String]("description").toOption should contain(customMetadata.description) - json.hcursor.get[Map[String, String]]("_keywords").toOption should contain(customMetadata.keywords) - } - -} diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/S3StorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/S3StorageSpec.scala index 5650193c31..237a5149e9 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/S3StorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/S3StorageSpec.scala @@ -135,20 +135,6 @@ class S3StorageSpec extends StorageSpec with S3ClientFixtures { } } - s"Linking in S3" should { - "be rejected" in { - val payload = Json.obj( - "filename" := "logo.png", - "path" := logoKey, - "mediaType" := "image/png" - ) - deltaClient.put[Json](s"/files/$projectRef/logo.png?storage=nxv:${storageId}2", payload, Coyote) { - (_, response) => - response.status shouldEqual StatusCodes.BadRequest - } - } - } - "Filenames with url-encodable characters" should { "have an appropriate filename in S3" in { val id = genId() diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/StoragesDsl.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/StoragesDsl.scala index 6655fc6321..d2c9a4e922 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/StoragesDsl.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/files/StoragesDsl.scala @@ -12,8 +12,6 @@ import io.circe.Json import org.scalatest._ import org.scalatest.matchers.should.Matchers -import scala.sys.process._ - class StoragesDsl(deltaClient: HttpClient) extends CirceUnmarshalling with Matchers with CirceEq { private val loader = ClasspathResourceLoader() @@ -29,18 +27,6 @@ class StoragesDsl(deltaClient: HttpClient) extends CirceUnmarshalling with Match def createDiskStorageWithCustomPerms(id: String, projectRef: String, read: String, write: String): IO[Assertion] = diskPayload(id, read, write).flatMap(createStorage(_, projectRef)) - def createRemoteStorageWithDefaultPerms(id: String, projectRef: String, folder: String): IO[Assertion] = - remoteDiskPayloadDefaultPerms(id, folder).flatMap(createStorage(_, projectRef)) - - def createRemoteStorageWithCustomPerms( - id: String, - projectRef: String, - folder: String, - read: String, - write: String - ): IO[Assertion] = - remoteDiskPayload(id, folder, read, write).flatMap(createStorage(_, projectRef)) - def checkStorageMetadata(projectRef: String, storageId: String, expected: Json): IO[Assertion] = deltaClient.get[Json](s"/storages/$projectRef/nxv:$storageId", Coyote) { (json, response) => response.status shouldEqual StatusCodes.OK @@ -53,38 +39,11 @@ class StoragesDsl(deltaClient: HttpClient) extends CirceUnmarshalling with Match filterKey("credentials")(json) should equalIgnoreArrayOrder(expected) } - def remoteDiskPayloadDefaultPerms(id: String, folder: String): IO[Json] = - remoteDiskPayload(id, folder, "resources/read", "files/write") - - def remoteDiskPayload(id: String, folder: String, readPerm: String, writePerm: String): IO[Json] = - loader.jsonContentOf( - "kg/storages/remote-disk.json", - "endpoint" -> StoragesDsl.StorageServiceBaseUrl, - "read" -> readPerm, - "write" -> writePerm, - "folder" -> folder, - "id" -> id - ) - def diskPayloadDefaultPerms(id: String): IO[Json] = loader.jsonContentOf("kg/storages/disk.json", "id" -> id) def diskPayload(id: String, read: String, write: String): IO[Json] = loader.jsonContentOf("kg/storages/disk-perms.json", "id" -> id, "read" -> read, "write" -> write) - - def mkProtectedFolderInStorageService(folder: String): IO[Unit] = - runCommandInStorageService(s"mkdir -p /tmp/$folder/protected") - - def deleteFolderInStorageService(folder: String): IO[Unit] = - runCommandInStorageService(s"rm -rf /tmp/$folder") - - def runCommandInStorageService(cmd: String): IO[Unit] = - runBlockingProcess(s"docker exec nexus-storage-service bash -c \"$cmd\"") - - private def runBlockingProcess(cmd: String): IO[Unit] = IO.blocking(cmd.!).flatMap { - case 0 => IO.unit - case other => IO.raiseError(new Exception(s"Command $cmd failed with code $other")) - } } object StoragesDsl {