diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala index d9640a1a08..bfd3985728 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala @@ -1,11 +1,9 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model -import akka.http.scaladsl.model.Uri import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.configuration.semiauto.deriveConfigJsonLdDecoder import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.{Configuration => JsonLdConfiguration, JsonLdDecoder} @@ -151,8 +149,6 @@ object StorageFields { description: Option[String], default: Boolean, bucket: String, - endpoint: Option[Uri], - region: Option[Region], readPermission: Option[Permission], writePermission: Option[Permission], maxFileSize: Option[Long] @@ -169,8 +165,6 @@ object StorageFields { default, cfg.digestAlgorithm, bucket, - endpoint.orElse(Some(cfg.defaultEndpoint)), - region, readPermission.getOrElse(cfg.defaultReadPermission), writePermission.getOrElse(cfg.defaultWritePermission), computeMaxFileSize(maxFileSize, cfg.defaultMaxFileSize) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala index e33007da4e..665cdd217b 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala @@ -6,22 +6,19 @@ import akka.stream.alpakka.s3.{ApiVersion, MemoryBufferType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords -import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.{deriveConfiguredCodec, deriveConfiguredEncoder} import io.circe.syntax._ -import io.circe.{Codec, Decoder, Encoder} +import io.circe.{Codec, Encoder} import software.amazon.awssdk.auth.credentials.{AnonymousCredentialsProvider, AwsBasicCredentials, StaticCredentialsProvider} import software.amazon.awssdk.regions.Region import software.amazon.awssdk.regions.providers.AwsRegionProvider import java.io.File -import java.nio.file.Path import scala.annotation.nowarn import scala.reflect.io.Directory -import scala.util.Try sealed trait StorageValue extends Product with Serializable { @@ -137,8 +134,6 @@ object StorageValue { default: Boolean, algorithm: DigestAlgorithm, bucket: String, - endpoint: Option[Uri], - region: Option[Region], readPermission: Permission, writePermission: Permission, maxFileSize: Long @@ -147,12 +142,7 @@ object StorageValue { override val tpe: StorageType = StorageType.S3Storage override val capacity: Option[Long] = None - def address(bucket: String): Uri = - endpoint match { - case Some(host) if host.scheme.trim.isEmpty => Uri(s"https://$bucket.$host") - case Some(e) => e.withHost(s"$bucket.${e.authority.host}") - case None => region.fold(s"https://$bucket.s3.amazonaws.com")(r => s"https://$bucket.s3.$r.amazonaws.com") - } + def address(bucket: String): Uri = s"https://$bucket.s3.${Region.US_EAST_1}.amazonaws.com" /** * @return @@ -172,7 +162,7 @@ object StorageValue { } val regionProvider: AwsRegionProvider = new AwsRegionProvider { - val getRegion: Region = region.getOrElse(Region.US_EAST_1) + val getRegion: Region = Region.US_EAST_1 } s3.S3Settings(MemoryBufferType, credsProvider, regionProvider, ApiVersion.ListBucketVersion2) @@ -190,8 +180,6 @@ object StorageValue { default: Boolean, algorithm: DigestAlgorithm, bucket: String, - endpoint: Option[Uri], - region: Option[Region], readPermission: Permission, writePermission: Permission, maxFileSize: Long @@ -202,8 +190,6 @@ object StorageValue { default, algorithm, bucket, - endpoint, - region, readPermission, writePermission, maxFileSize @@ -270,11 +256,6 @@ object StorageValue { @SuppressWarnings(Array("TryGet")) @nowarn("cat=unused") def databaseCodec(implicit configuration: Configuration): Codec.AsObject[StorageValue] = { - implicit val pathEncoder: Encoder[Path] = Encoder.encodeString.contramap(_.toString) - implicit val pathDecoder: Decoder[Path] = Decoder.decodeString.emapTry(str => Try(Path.of(str))) - implicit val regionEncoder: Encoder[Region] = Encoder.encodeString.contramap(_.toString) - implicit val regionDecoder: Decoder[Region] = Decoder.decodeString.map(Region.of) - implicit val digestCodec: Codec.AsObject[Digest] = deriveConfiguredCodec[Digest] deriveConfiguredCodec[StorageValue] diff --git a/delta/plugins/storage/src/test/resources/storages/s3-storage.json b/delta/plugins/storage/src/test/resources/storages/s3-storage.json index c873bfbb6c..020650b969 100644 --- a/delta/plugins/storage/src/test/resources/storages/s3-storage.json +++ b/delta/plugins/storage/src/test/resources/storages/s3-storage.json @@ -7,9 +7,7 @@ "name": "s3name", "description": "s3description", "bucket": "mybucket", - "endpoint": "http://localhost", "readPermission": "s3/read", - "region": "eu-west-1", "writePermission": "s3/write", "maxFileSize": 51 } \ No newline at end of file diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala index 9a3a94e40c..ba4718230a 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala @@ -13,7 +13,6 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import ch.epfl.bluebrain.nexus.testkit.CirceLiteral import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker import ch.epfl.bluebrain.nexus.testkit.scalatest.ClasspathResources -import software.amazon.awssdk.regions.Region import java.nio.file.{Files, Paths} import scala.concurrent.duration._ @@ -42,7 +41,7 @@ trait StorageFixtures extends CirceLiteral { val diskVal = diskFields.toValue(config).get val diskFieldsUpdate = DiskStorageFields(Some("diskName"), Some("diskDescription"), default = false, Some(tmpVolume), Some(Permission.unsafe("disk/read")), Some(Permission.unsafe("disk/write")), Some(2000), Some(40)) val diskValUpdate = diskFieldsUpdate.toValue(config).get - val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, "mybucket", Some("http://localhost"), Some(Region.EU_WEST_1), Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51)) + val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, "mybucket", Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51)) val s3Val = s3Fields.toValue(config).get val remoteFields = RemoteDiskStorageFields(Some("remoteName"), Some("remoteDescription"), default = true, Label.unsafe("myfolder"), Some(Permission.unsafe("remote/read")), Some(Permission.unsafe("remote/write")), Some(52)) val remoteVal = remoteFields.toValue(config).get diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala index 21af6f81d3..d7fc6c8fa0 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala @@ -63,7 +63,7 @@ class StorageFieldsSpec extends CatsEffectSpec with RemoteContextResolutionFixtu "region" ) sourceDecoder(pc, jsonNoDefaults).accepted._2 shouldEqual - S3StorageFields(None, None, default = true, "mybucket", None, None, None, None, None) + S3StorageFields(None, None, default = true, "mybucket", None, None, None) } } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala index a63f002a6d..bdbc0282f2 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala @@ -23,7 +23,6 @@ import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker._ import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec import io.circe.Json import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} -import software.amazon.awssdk.regions.Region import java.util.UUID @@ -53,8 +52,6 @@ class S3StorageSaveAndFetchFileSpec(docker: MinioDocker) default = false, algorithm = DigestAlgorithm.default, bucket = "bucket2", - endpoint = Some(docker.hostConfig.endpoint), - region = Some(Region.EU_CENTRAL_1), readPermission = read, writePermission = write, maxFileSize = 20