Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
shinyhappydan committed Apr 4, 2024
1 parent 54214d8 commit f5935de
Show file tree
Hide file tree
Showing 6 changed files with 5 additions and 36 deletions.
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model

import akka.http.scaladsl.model.Uri
import ch.epfl.bluebrain.nexus.delta.kernel.Secret
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue}
import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri
import ch.epfl.bluebrain.nexus.delta.sdk.implicits._
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.configuration.semiauto.deriveConfigJsonLdDecoder
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.decoder.{Configuration => JsonLdConfiguration, JsonLdDecoder}
Expand Down Expand Up @@ -151,8 +149,6 @@ object StorageFields {
description: Option[String],
default: Boolean,
bucket: String,
endpoint: Option[Uri],
region: Option[Region],
readPermission: Option[Permission],
writePermission: Option[Permission],
maxFileSize: Option[Long]
Expand All @@ -169,8 +165,6 @@ object StorageFields {
default,
cfg.digestAlgorithm,
bucket,
endpoint.orElse(Some(cfg.defaultEndpoint)),
region,
readPermission.getOrElse(cfg.defaultReadPermission),
writePermission.getOrElse(cfg.defaultWritePermission),
computeMaxFileSize(maxFileSize, cfg.defaultMaxFileSize)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,19 @@ import akka.stream.alpakka.s3.{ApiVersion, MemoryBufferType}
import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest
import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig
import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords
import ch.epfl.bluebrain.nexus.delta.sdk.implicits._
import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission
import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef}
import io.circe.generic.extras.Configuration
import io.circe.generic.extras.semiauto.{deriveConfiguredCodec, deriveConfiguredEncoder}
import io.circe.syntax._
import io.circe.{Codec, Decoder, Encoder}
import io.circe.{Codec, Encoder}
import software.amazon.awssdk.auth.credentials.{AnonymousCredentialsProvider, AwsBasicCredentials, StaticCredentialsProvider}
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.regions.providers.AwsRegionProvider

import java.io.File
import java.nio.file.Path
import scala.annotation.nowarn
import scala.reflect.io.Directory
import scala.util.Try

sealed trait StorageValue extends Product with Serializable {

Expand Down Expand Up @@ -137,8 +134,6 @@ object StorageValue {
default: Boolean,
algorithm: DigestAlgorithm,
bucket: String,
endpoint: Option[Uri],
region: Option[Region],
readPermission: Permission,
writePermission: Permission,
maxFileSize: Long
Expand All @@ -147,12 +142,7 @@ object StorageValue {
override val tpe: StorageType = StorageType.S3Storage
override val capacity: Option[Long] = None

def address(bucket: String): Uri =
endpoint match {
case Some(host) if host.scheme.trim.isEmpty => Uri(s"https://$bucket.$host")
case Some(e) => e.withHost(s"$bucket.${e.authority.host}")
case None => region.fold(s"https://$bucket.s3.amazonaws.com")(r => s"https://$bucket.s3.$r.amazonaws.com")
}
def address(bucket: String): Uri = s"https://$bucket.s3.${Region.US_EAST_1}.amazonaws.com"

/**
* @return
Expand All @@ -172,7 +162,7 @@ object StorageValue {
}

val regionProvider: AwsRegionProvider = new AwsRegionProvider {
val getRegion: Region = region.getOrElse(Region.US_EAST_1)
val getRegion: Region = Region.US_EAST_1
}

s3.S3Settings(MemoryBufferType, credsProvider, regionProvider, ApiVersion.ListBucketVersion2)
Expand All @@ -190,8 +180,6 @@ object StorageValue {
default: Boolean,
algorithm: DigestAlgorithm,
bucket: String,
endpoint: Option[Uri],
region: Option[Region],
readPermission: Permission,
writePermission: Permission,
maxFileSize: Long
Expand All @@ -202,8 +190,6 @@ object StorageValue {
default,
algorithm,
bucket,
endpoint,
region,
readPermission,
writePermission,
maxFileSize
Expand Down Expand Up @@ -270,11 +256,6 @@ object StorageValue {
@SuppressWarnings(Array("TryGet"))
@nowarn("cat=unused")
def databaseCodec(implicit configuration: Configuration): Codec.AsObject[StorageValue] = {
implicit val pathEncoder: Encoder[Path] = Encoder.encodeString.contramap(_.toString)
implicit val pathDecoder: Decoder[Path] = Decoder.decodeString.emapTry(str => Try(Path.of(str)))
implicit val regionEncoder: Encoder[Region] = Encoder.encodeString.contramap(_.toString)
implicit val regionDecoder: Decoder[Region] = Decoder.decodeString.map(Region.of)

implicit val digestCodec: Codec.AsObject[Digest] = deriveConfiguredCodec[Digest]

deriveConfiguredCodec[StorageValue]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@
"name": "s3name",
"description": "s3description",
"bucket": "mybucket",
"endpoint": "http://localhost",
"readPermission": "s3/read",
"region": "eu-west-1",
"writePermission": "s3/write",
"maxFileSize": 51
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label
import ch.epfl.bluebrain.nexus.testkit.CirceLiteral
import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker
import ch.epfl.bluebrain.nexus.testkit.scalatest.ClasspathResources
import software.amazon.awssdk.regions.Region

import java.nio.file.{Files, Paths}
import scala.concurrent.duration._
Expand Down Expand Up @@ -42,7 +41,7 @@ trait StorageFixtures extends CirceLiteral {
val diskVal = diskFields.toValue(config).get
val diskFieldsUpdate = DiskStorageFields(Some("diskName"), Some("diskDescription"), default = false, Some(tmpVolume), Some(Permission.unsafe("disk/read")), Some(Permission.unsafe("disk/write")), Some(2000), Some(40))
val diskValUpdate = diskFieldsUpdate.toValue(config).get
val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, "mybucket", Some("http://localhost"), Some(Region.EU_WEST_1), Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51))
val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, "mybucket", Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51))
val s3Val = s3Fields.toValue(config).get
val remoteFields = RemoteDiskStorageFields(Some("remoteName"), Some("remoteDescription"), default = true, Label.unsafe("myfolder"), Some(Permission.unsafe("remote/read")), Some(Permission.unsafe("remote/write")), Some(52))
val remoteVal = remoteFields.toValue(config).get
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class StorageFieldsSpec extends CatsEffectSpec with RemoteContextResolutionFixtu
"region"
)
sourceDecoder(pc, jsonNoDefaults).accepted._2 shouldEqual
S3StorageFields(None, None, default = true, "mybucket", None, None, None, None, None)
S3StorageFields(None, None, default = true, "mybucket", None, None, None)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker._
import ch.epfl.bluebrain.nexus.testkit.scalatest.ce.CatsEffectSpec
import io.circe.Json
import org.scalatest.{BeforeAndAfterAll, DoNotDiscover}
import software.amazon.awssdk.regions.Region

import java.util.UUID

Expand Down Expand Up @@ -53,8 +52,6 @@ class S3StorageSaveAndFetchFileSpec(docker: MinioDocker)
default = false,
algorithm = DigestAlgorithm.default,
bucket = "bucket2",
endpoint = Some(docker.hostConfig.endpoint),
region = Some(Region.EU_CENTRAL_1),
readPermission = read,
writePermission = write,
maxFileSize = 20
Expand Down

0 comments on commit f5935de

Please sign in to comment.