From 00a0eb7ff47a599bdf6470755b464c53752e5655 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Fri, 14 Jun 2024 13:46:58 -0400 Subject: [PATCH 01/22] make the env checkers have an init function --- env-checker/env_checker.go | 7 +++++++ main.go | 4 ---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/env-checker/env_checker.go b/env-checker/env_checker.go index e76af80..35c6da6 100644 --- a/env-checker/env_checker.go +++ b/env-checker/env_checker.go @@ -2,11 +2,18 @@ package envcheck import ( "fmt" + "log" "os" ) var REQUIRED_ENV_VAR = []string{"S3API_SERVICE_PORT", "KEYCLOAK_PUBLIC_KEYS_URL"} +func init() { + if err := CheckEnvVariablesExist(REQUIRED_ENV_VAR); err != nil { + log.Fatal(err) + } +} + func CheckEnvVariablesExist(envVars []string) error { var missingVars []string diff --git a/main.go b/main.go index b2b3c94..53a6a98 100644 --- a/main.go +++ b/main.go @@ -11,7 +11,6 @@ import ( "github.com/Dewberry/s3api/auth" "github.com/Dewberry/s3api/blobstore" - envcheck "github.com/Dewberry/s3api/env-checker" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" @@ -19,9 +18,6 @@ import ( ) func main() { - if err := envcheck.CheckEnvVariablesExist(envcheck.REQUIRED_ENV_VAR); err != nil { - log.Fatal(err) - } log.SetFormatter(&log.JSONFormatter{}) logLevel := os.Getenv("LOG_LEVEL") if logLevel == "" { From 9372b12190f34a4a2ce3f738160e3023d4066c00 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 11:27:28 -0400 Subject: [PATCH 02/22] add error handleing package --- errorshandling/errors_handling.go | 215 ++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 errorshandling/errors_handling.go diff --git a/errorshandling/errors_handling.go b/errorshandling/errors_handling.go new file mode 100644 index 0000000..7092945 --- /dev/null +++ b/errorshandling/errors_handling.go @@ -0,0 +1,215 @@ +package errorshandling + +import ( + "database/sql" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/go-playground/validator" + "github.com/jackc/pgx" + "github.com/labstack/echo/v4" + log "github.com/sirupsen/logrus" +) + +// ErrorType is a struct that holds both the numeric value and the name of the error type. +type ErrorType struct { + Value uint + Name string +} + +// Define constants for different types of errors as structs. +var ( + DatabaseError = ErrorType{Value: 1, Name: "Database Error"} + ValidationError = ErrorType{Value: 2, Name: "Validation Error"} + NotFoundError = ErrorType{Value: 3, Name: "Not Found Error"} + UnauthorizedError = ErrorType{Value: 4, Name: "Unauthorized Error"} + InternalServerError = ErrorType{Value: 5, Name: "Internal Server Error"} + AWSError = ErrorType{Value: 6, Name: "AWS Error"} +) + +// AppError includes the error type, message, and the original error. +type AppError struct { + Type ErrorType + Message string + Err error +} + +// NewAppError creates a new AppError. +func NewAppError(errorType ErrorType, message string, err error) *AppError { + return &AppError{ + Type: errorType, + Message: message, + Err: err, + } +} + +// ErrorFormatter formats the error message. +func LogErrorFormatter(err *AppError, withOrgError bool) string { + if withOrgError && err.Err != nil { + return fmt.Sprintf("Type: %s, Error: %s, Original error: %v", err.Type.Name, err.Message, err.Err) + } + return fmt.Sprintf("Type: %s, Error: %s", err.Type.Name, err.Message) +} + +func ErrorFormatter(err *AppError, withOrgError bool) string { + if withOrgError && err.Err != nil { + return fmt.Sprintf("%s, Original error: %v", err.Message, err.Err) + } + return err.Message +} + +// HandleErrorResponse sends a JSON response with the error message and status code. +func HandleErrorResponse(c echo.Context, err *AppError) error { + if err == nil { + log.Error("Attempted to handle a nil *AppError") + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "An unexpected error occurred"}) + } + + responseMessage := ErrorFormatter(err, err.Type.Value != DatabaseError.Value) + statusCode := http.StatusInternalServerError // Default status + + switch err.Type.Value { + case ValidationError.Value: + statusCode = http.StatusBadRequest + case NotFoundError.Value: + statusCode = http.StatusNotFound + case UnauthorizedError.Value: + statusCode = http.StatusUnauthorized + } + return c.JSON(statusCode, map[string]string{"Type": err.Type.Name, "Error": responseMessage}) +} + +// HandleStructValidationErrors checks if the error is a validation error, formats it, and returns an AppError. +func HandleStructValidationErrors(err error) *AppError { + // Attempt to type-assert the error to a validator.ValidationErrors type + if ve, ok := err.(validator.ValidationErrors); ok { + // If the assertion is successful, process the validation errors + errMsgs := make([]string, 0) + for _, err := range ve { + errMsgs = append(errMsgs, fmt.Sprintf("Field '%s' failed validation for rule '%s'", err.Field(), err.Tag())) + } + errMsg := strings.Join(errMsgs, ", ") + return NewAppError(ValidationError, "Validation failed: "+errMsg, nil) + } + return NewAppError(InternalServerError, "error handeling validation error", err) +} + +// HandleVarValidationErrors is similar to HandleStructValidationErrors but for standalone variables +func HandleVarValidationErrors(err error, variableName string) *AppError { + // Attempt to type-assert the error to a validator.ValidationErrors type + if ve, ok := err.(validator.ValidationErrors); ok { + // If the assertion is successful, process the validation errors + errMsgs := make([]string, 0) + for _, err := range ve { + // Use the provided variableName since ve.Field() won't be useful for single variable validation + errMsgs = append(errMsgs, fmt.Sprintf("Variable '%s' failed validation for rule '%s'", variableName, err.Tag())) + } + errMsg := strings.Join(errMsgs, ", ") + return NewAppError(ValidationError, "Validation failed: "+errMsg, nil) + } + return NewAppError(InternalServerError, "Error handling validation error", err) +} + +func HandleSQLError(err error, errMsg string) *AppError { + if err == sql.ErrNoRows { + return NewAppError(NotFoundError, fmt.Sprintf("%s: %s", errMsg, pgx.ErrNoRows), nil) // Set the message as needed + } + + if pqErr, ok := err.(pgx.PgError); ok { + switch pqErr.Code { + case "23503": + return NewAppError(NotFoundError, fmt.Sprintf("%s: %s, SQLSTATE code is %s", errMsg, pqErr.Message, pqErr.Code), nil) + case "22P02": + return NewAppError(ValidationError, fmt.Sprintf("%s: %s, SQLSTATE code is %s", errMsg, pqErr.Message, pqErr.Code), nil) + } + } + return NewAppError(DatabaseError, errMsg, err) +} + +// HandleAWSError processes AWS-specific errors and returns an appropriate AppError. +// referencing https://github.com/aws/aws-sdk-go/blob/70ea45043fd9021c223e79de5755bc1b4b3af0aa/models/apis/cloudformation/2010-05-15/api-2.json +func HandleAWSError(err error, errMsg string) *AppError { + if aerr, ok := err.(awserr.Error); ok { + formattedMessage := fmt.Sprintf("%s: %s (AWS Error Code: %s)", errMsg, aerr.Message(), aerr.Code()) + switch aerr.Code() { + case "AccessDenied", "InvalidCredentials": + return NewAppError(UnauthorizedError, formattedMessage, err) + case "NotFound": + return NewAppError(NotFoundError, formattedMessage, err) + case "NotUpdatable", "InvalidRequest", "AlreadyExists", "ResourceConflict", "Throttling", "ServiceLimitExceeded", "NotStabilized", "GeneralServiceException", "NetworkFailure", "InvalidTypeConfiguration", "NonCompliant", "Unknown", "UnsupportedTarget": + return NewAppError(AWSError, formattedMessage, err) + case "ServiceInternalError", "InternalFailure", "HandlerInternalFailure": + return NewAppError(InternalServerError, formattedMessage, err) + default: + return NewAppError(AWSError, formattedMessage, err) + } + } + return NewAppError(AWSError, errMsg, err) +} + +// CheckRequiredParams checks if the required parameters are present and returns an error if any are missing. +func CheckRequiredParams(params map[string]string) *AppError { + var missingParams []string + for paramName, paramValue := range params { + if paramValue == "" { + missingParams = append(missingParams, paramName) + } + } + if len(missingParams) > 0 { + errMsg := fmt.Sprintf("The following required parameters are missing: %s", strings.Join(missingParams, ", ")) + return NewAppError(ValidationError, errMsg, nil) + } + return nil +} + +// isEmpty checks if the provided data is considered empty. +func isEmpty(data interface{}) bool { + if data == nil { + return true + } + + v := reflect.ValueOf(data) + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Array: + // Collections are empty if they have no elements. + return v.Len() == 0 + case reflect.Ptr: + // Pointers are empty if they are nil. + return v.IsNil() + case reflect.Struct: + return false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.Bool, reflect.String: + // For basic types, consider them non-empty even if they are zero values. + return false + default: + // For all other data types, check if they are their zero value. + return v.IsZero() + } +} + +// HandleSuccessfulResponse sends an appropriate JSON response with the given data or status code. +func HandleSuccessfulResponse(c echo.Context, data interface{}, statusCode ...int) error { + // Determine the status code. + code := http.StatusOK // Default status code is 200 OK. + if len(statusCode) > 0 { + code = statusCode[0] // Use provided status code if any. + } + // For all other data types, marshal as JSON. + // Check if the data is struct or empty, then return an empty struct. + //if data is an array it will automatically return an empty array with how we're defining array + if isEmpty(data) && reflect.ValueOf(data).Kind() != reflect.Slice && reflect.ValueOf(data).Kind() != reflect.Array { + return c.JSON(code, struct{}{}) + } + switch v := data.(type) { + case []byte: + // If data is of type []byte, send it as a JSON blob. + return c.JSONBlob(code, v) + default: + return c.JSON(code, data) + } +} From 7ceeefcea460ca7da77d10191d95539f84992435 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 11:27:43 -0400 Subject: [PATCH 03/22] add definition documentation --- documentation/definition.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 documentation/definition.md diff --git a/documentation/definition.md b/documentation/definition.md new file mode 100644 index 0000000..f1f0486 --- /dev/null +++ b/documentation/definition.md @@ -0,0 +1,12 @@ +I nned you to add region and how buckets are region specific +| Term | Definition | Example/Analogy | +| -------------------- | ----------------------------------------------------------------------- | -------------------------------------------------------------------- | +| **Bucket** | Container for storing objects in S3. | Directory or root-level folder | +| **Object** | Fundamental entity stored in a bucket, consisting of data and metadata. | File or item | +| **Key** | Unique identifier for an object within a bucket. | Full path to the object | +| **Prefix** | Part of the key used to group objects, similar to directories. | Directory-like grouping | +| **Folder** | Conceptual representation of common prefixes. | Pseudo-directory | +| **S3 URI** | URI format for accessing S3 objects. | `s3://my-bucket/photos/vacation/2023/photo1.jpg` | +| **URL** | Web address format for accessing S3 objects. | `https://my-bucket.s3.amazonaws.com/photos/vacation/2023/photo1.jpg` | +| **Multipart Upload** | Method to upload large objects in parts. | Efficient large file upload | +| **Presigned URL** | URL granting temporary access to an S3 object. | Secure file sharing | From ab2d0c4b034dce6f61cd3176d584ebbd2234b116 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 11:29:03 -0400 Subject: [PATCH 04/22] add necessary packages --- go.mod | 6 ++++++ go.sum | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/go.mod b/go.mod index 736fa77..ad052db 100644 --- a/go.mod +++ b/go.mod @@ -12,10 +12,16 @@ require ( ) require ( + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator v9.31.0+incompatible // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/jackc/pgx v3.6.2+incompatible // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect golang.org/x/crypto v0.17.0 // indirect diff --git a/go.sum b/go.sum index 4f2a961..43f9fcc 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,18 @@ github.com/aws/aws-sdk-go v1.44.315/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator v9.31.0+incompatible h1:UA72EPEogEnq76ehGdEDp4Mit+3FDh548oRqwVgNsHA= +github.com/go-playground/validator v9.31.0+incompatible/go.mod h1:yrEkQXlcI+PugkyDjY2bRrL/UBU4f3rvrgkN3V8JEig= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -15,6 +23,8 @@ github.com/labstack/echo/v4 v4.11.1 h1:dEpLU2FLg4UVmvCGPuk/APjlH6GDpbEPti61srUUU github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ= github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -24,6 +34,7 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -32,6 +43,7 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= From 7e31990ea433e53697f6a6a901e2a91d4636d5f8 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 17 Jun 2024 15:25:29 -0400 Subject: [PATCH 05/22] organize into package --- blobstore/blobhandler.go | 4 ++-- {env-checker => configberry}/env_checker.go | 11 +---------- {errorshandling => configberry}/errors_handling.go | 3 ++- main.go | 6 ++++++ utils/utils.go | 2 ++ 5 files changed, 13 insertions(+), 13 deletions(-) rename {env-checker => configberry}/env_checker.go (66%) rename {errorshandling => configberry}/errors_handling.go (98%) diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index d071546..1e132b0 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/Dewberry/s3api/auth" - envcheck "github.com/Dewberry/s3api/env-checker" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" @@ -55,7 +55,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } if authLvl > 0 { - if err := envcheck.CheckEnvVariablesExist([]string{"AUTH_LIMITED_WRITER_ROLE"}); err != nil { + if err := configberry.CheckEnvVariablesExist([]string{"AUTH_LIMITED_WRITER_ROLE"}); err != nil { log.Fatal(err) } config.Config.AuthLevel = authLvl diff --git a/env-checker/env_checker.go b/configberry/env_checker.go similarity index 66% rename from env-checker/env_checker.go rename to configberry/env_checker.go index 35c6da6..27b54e2 100644 --- a/env-checker/env_checker.go +++ b/configberry/env_checker.go @@ -1,19 +1,10 @@ -package envcheck +package configberry import ( "fmt" - "log" "os" ) -var REQUIRED_ENV_VAR = []string{"S3API_SERVICE_PORT", "KEYCLOAK_PUBLIC_KEYS_URL"} - -func init() { - if err := CheckEnvVariablesExist(REQUIRED_ENV_VAR); err != nil { - log.Fatal(err) - } -} - func CheckEnvVariablesExist(envVars []string) error { var missingVars []string diff --git a/errorshandling/errors_handling.go b/configberry/errors_handling.go similarity index 98% rename from errorshandling/errors_handling.go rename to configberry/errors_handling.go index 7092945..ef8e66d 100644 --- a/errorshandling/errors_handling.go +++ b/configberry/errors_handling.go @@ -1,4 +1,4 @@ -package errorshandling +package configberry import ( "database/sql" @@ -28,6 +28,7 @@ var ( UnauthorizedError = ErrorType{Value: 4, Name: "Unauthorized Error"} InternalServerError = ErrorType{Value: 5, Name: "Internal Server Error"} AWSError = ErrorType{Value: 6, Name: "AWS Error"} + Fatal = ErrorType{Value: 7, Name: "Fatal Error"} ) // AppError includes the error type, message, and the original error. diff --git a/main.go b/main.go index 53a6a98..c222d53 100644 --- a/main.go +++ b/main.go @@ -11,6 +11,8 @@ import ( "github.com/Dewberry/s3api/auth" "github.com/Dewberry/s3api/blobstore" + "github.com/Dewberry/s3api/configberry" + "github.com/Dewberry/s3api/utils" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" @@ -18,6 +20,10 @@ import ( ) func main() { + err := configberry.CheckEnvVariablesExist(utils.REQUIRED_ENV_VAR) + if err != nil { + log.Fatal(err) + } log.SetFormatter(&log.JSONFormatter{}) logLevel := os.Getenv("LOG_LEVEL") if logLevel == "" { diff --git a/utils/utils.go b/utils/utils.go index 946770f..d16d661 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1,5 +1,7 @@ package utils +var REQUIRED_ENV_VAR = []string{"S3API_SERVICE_PORT", "KEYCLOAK_PUBLIC_KEYS_URL"} + // Check if a string is in string slice func StringInSlice(a string, list []string) bool { for _, b := range list { From ef03b0acc78f11d9d93c6c4d0c629c906b669e8e Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Tue, 18 Jun 2024 10:16:47 -0400 Subject: [PATCH 06/22] add env checkers in the beggining of main --- configberry/env_checker.go | 4 ++-- configberry/errors_handling.go | 2 +- documentation/definition.md | 21 ++++++++++----------- main.go | 11 +++++++---- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/configberry/env_checker.go b/configberry/env_checker.go index 27b54e2..afca721 100644 --- a/configberry/env_checker.go +++ b/configberry/env_checker.go @@ -16,8 +16,8 @@ func CheckEnvVariablesExist(envVars []string) error { } if len(missingVars) > 0 { - errMsg := fmt.Sprintf("The following environment variables are missing: %v", missingVars) - return fmt.Errorf(errMsg) + errMsg := fmt.Errorf("the following environment variables are missing: %v", missingVars) + return errMsg } return nil diff --git a/configberry/errors_handling.go b/configberry/errors_handling.go index ef8e66d..a26c7ab 100644 --- a/configberry/errors_handling.go +++ b/configberry/errors_handling.go @@ -28,7 +28,7 @@ var ( UnauthorizedError = ErrorType{Value: 4, Name: "Unauthorized Error"} InternalServerError = ErrorType{Value: 5, Name: "Internal Server Error"} AWSError = ErrorType{Value: 6, Name: "AWS Error"} - Fatal = ErrorType{Value: 7, Name: "Fatal Error"} + FatalError = ErrorType{Value: 7, Name: "Fatal Error"} ) // AppError includes the error type, message, and the original error. diff --git a/documentation/definition.md b/documentation/definition.md index f1f0486..f5946cd 100644 --- a/documentation/definition.md +++ b/documentation/definition.md @@ -1,12 +1,11 @@ -I nned you to add region and how buckets are region specific -| Term | Definition | Example/Analogy | +| Term | Definition | Example/Analogy | | -------------------- | ----------------------------------------------------------------------- | -------------------------------------------------------------------- | -| **Bucket** | Container for storing objects in S3. | Directory or root-level folder | -| **Object** | Fundamental entity stored in a bucket, consisting of data and metadata. | File or item | -| **Key** | Unique identifier for an object within a bucket. | Full path to the object | -| **Prefix** | Part of the key used to group objects, similar to directories. | Directory-like grouping | -| **Folder** | Conceptual representation of common prefixes. | Pseudo-directory | -| **S3 URI** | URI format for accessing S3 objects. | `s3://my-bucket/photos/vacation/2023/photo1.jpg` | -| **URL** | Web address format for accessing S3 objects. | `https://my-bucket.s3.amazonaws.com/photos/vacation/2023/photo1.jpg` | -| **Multipart Upload** | Method to upload large objects in parts. | Efficient large file upload | -| **Presigned URL** | URL granting temporary access to an S3 object. | Secure file sharing | +| **Bucket** | Container for storing objects in S3. | Directory or root-level folder | +| **Object** | Fundamental entity stored in a bucket, consisting of data and metadata. | File or item | +| **Key** | Unique identifier for an object within a bucket. | Full path to the object | +| **Prefix** | Part of the key used to group objects, similar to directories. | Directory-like grouping | +| **Folder** | Conceptual representation of common prefixes. | Pseudo-directory | +| **S3 URI** | URI format for accessing S3 objects. | `s3://my-bucket/photos/vacation/2023/photo1.jpg` | +| **URL** | Web address format for accessing S3 objects. | `https://my-bucket.s3.amazonaws.com/photos/vacation/2023/photo1.jpg` | +| **Multipart Upload** | Method to upload large objects in parts. | Efficient large file upload | +| **Presigned URL** | URL granting temporary access to an S3 object. | Secure file sharing | diff --git a/main.go b/main.go index c222d53..7f3abf9 100644 --- a/main.go +++ b/main.go @@ -20,20 +20,23 @@ import ( ) func main() { - err := configberry.CheckEnvVariablesExist(utils.REQUIRED_ENV_VAR) - if err != nil { - log.Fatal(err) - } log.SetFormatter(&log.JSONFormatter{}) logLevel := os.Getenv("LOG_LEVEL") if logLevel == "" { logLevel = "info" } + err := configberry.CheckEnvVariablesExist(utils.REQUIRED_ENV_VAR) + if err != nil { + appErr := configberry.NewAppError(configberry.FatalError, "Critical configuration error: ", err) + log.Fatal(configberry.LogErrorFormatter(appErr, true)) + } + level, err := log.ParseLevel(logLevel) if err != nil { log.WithError(err).Error("Invalid log level") level = log.InfoLevel } + log.SetLevel(level) log.SetReportCaller(true) log.Infof("level level set to: %s", level) From 72b4213d0fba65ec25f54dcd4c4e1eb2dd5816a1 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Tue, 18 Jun 2024 10:16:59 -0400 Subject: [PATCH 07/22] move ping handlers into its file --- blobstore/blobhandler.go | 91 ++++++++-------------------------------- blobstore/ping.go | 73 ++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 74 deletions(-) create mode 100644 blobstore/ping.go diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index c092f98..4ae6cab 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -57,12 +57,12 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { if authLvl > 0 { if err := configberry.CheckEnvVariablesExist([]string{"AUTH_LIMITED_WRITER_ROLE"}); err != nil { - log.Fatal(err) + return &config, err } config.Config.AuthLevel = authLvl db, err := auth.NewPostgresDB() if err != nil { - log.Fatal(err) + return &config, err } config.DB = db } @@ -73,7 +73,8 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } s3Mock, err := strconv.Atoi(s3MockStr) if err != nil { - log.Fatalf("could not convert S3_MOCK env variable to integer: %v", err) + errMsg := fmt.Errorf("could not convert S3_MOCK env variable to integer: %v", err) + return &config, errMsg } // Check if the S3_MOCK environment variable is set to "true" if s3Mock == 1 { @@ -85,13 +86,13 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { // Validate MinIO credentials, check if they are missing or incomplete // if not then the s3api won't start if err := creds.validateMinioConfig(); err != nil { - log.Fatalf("MINIO credentials are either not provided or contain missing variables: %s", err.Error()) + return &config, err } // Create a MinIO session and S3 client s3SVC, sess, err := minIOSessionManager(creds) if err != nil { - log.Fatalf("failed to create MinIO session: %s", err.Error()) + return &config, err } // Configure the BlobHandler with MinIO session and bucket information @@ -107,7 +108,8 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { // Check if loading AWS credentials from .env.json failed if err != nil { - return nil, fmt.Errorf("env.json credentials extraction failed, please check `.env.json.example` for reference on formatting, %s", err.Error()) + errMsg := fmt.Errorf("env.json credentials extraction failed, please check `.env.json.example` for reference on formatting, %s", err.Error()) + return &config, errMsg } //does it contain "*" @@ -126,7 +128,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { if err != nil { errMsg := fmt.Errorf("failed to create AWS session: %s", err.Error()) log.Error(errMsg.Error()) - return nil, errMsg + return &config, errMsg } S3Ctrl := S3Controller{Sess: sess, S3Svc: s3SVC} @@ -134,7 +136,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { result, err := S3Ctrl.ListBuckets() if err != nil { errMsg := fmt.Errorf("failed to retrieve list of buckets with access key: %s, error: %s", creds.AWS_ACCESS_KEY_ID, err.Error()) - return nil, errMsg + return &config, errMsg } var bucketNames []string @@ -164,7 +166,8 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { for bucket := range allowedBucketsMap { missingBuckets = append(missingBuckets, bucket) } - return nil, fmt.Errorf("some buckets in the allow list were not found: %v", missingBuckets) + errMsg := fmt.Errorf("some buckets in the allow list were not found: %v", missingBuckets) + return &config, errMsg } // Return the configured BlobHandler @@ -178,7 +181,7 @@ func aWSSessionManager(creds AWSCreds) (*s3.S3, *session.Session, error) { Credentials: credentials.NewStaticCredentials(creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, ""), }) if err != nil { - return nil, nil, fmt.Errorf("error creating s3 session: %s", err.Error()) + return nil, nil, err } return s3.New(sess), sess, nil } @@ -191,7 +194,7 @@ func minIOSessionManager(mc MinioConfig) (*s3.S3, *session.Session, error) { S3ForcePathStyle: aws.Bool(true), }) if err != nil { - return nil, nil, fmt.Errorf("error connecting to minio session: %s", err.Error()) + return nil, nil, err } log.Info("Using minio to mock s3") @@ -219,8 +222,7 @@ func minIOSessionManager(mc MinioConfig) (*s3.S3, *session.Session, error) { func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { if bucket == "" { - err := fmt.Errorf("parameter 'bucket' is required") - log.Error(err.Error()) + err := fmt.Errorf("parameter `bucket` is required") return nil, err } var s3Ctrl S3Controller @@ -259,7 +261,8 @@ func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { } } } - return &s3Ctrl, fmt.Errorf("bucket '%s' not found", bucket) + errMsg := fmt.Errorf("bucket '%s' not found", bucket) + return &s3Ctrl, errMsg } func getBucketRegion(S3Svc *s3.S3, bucketName string) (string, error) { @@ -279,35 +282,6 @@ func getBucketRegion(S3Svc *s3.S3, bucketName string) (string, error) { return *output.LocationConstraint, nil } -func (bh *BlobHandler) Ping(c echo.Context) error { - return c.JSON(http.StatusOK, "connection without Auth is healthy") -} - -func (bh *BlobHandler) PingWithAuth(c echo.Context) error { - // Perform a HeadBucket operation to check the health of the S3 connection - bucketHealth := make(map[string]string) - var valid string - - for _, s3Ctrl := range bh.S3Controllers { - for _, b := range s3Ctrl.Buckets { - _, err := s3Ctrl.S3Svc.HeadBucket(&s3.HeadBucketInput{ - Bucket: aws.String(b), - }) - if err != nil { - valid = "unhealthy" - } else { - valid = "healthy" - } - log.Debugf("Ping operation preformed succesfully, connection to `%s` is %s", b, valid) - - bucketHealth[b] = valid - print(b, valid) - } - } - - return c.JSON(http.StatusOK, bucketHealth) -} - func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) if err != nil { @@ -323,34 +297,3 @@ func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]st } return permissions, fullAccess, http.StatusOK, nil } - -func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { - if bh.Config.AuthLevel == 0 { - log.Info("Checked user permissions successfully") - return c.JSON(http.StatusOK, true) - } - initAuth := os.Getenv("INIT_AUTH") - if initAuth == "0" { - errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) - } - prefix := c.QueryParam("prefix") - bucket := c.QueryParam("bucket") - operation := c.QueryParam("operation") - claims, ok := c.Get("claims").(*auth.Claims) - if !ok { - errMsg := fmt.Errorf("could not get claims from request context") - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - userEmail := claims.Email - if operation == "" || prefix == "" || bucket == "" { - errMsg := fmt.Errorf("`prefix`, `operation` and 'bucket are required params") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - isAllowed := bh.DB.CheckUserPermission(userEmail, bucket, prefix, []string{operation}) - log.Info("Checked user permissions successfully") - return c.JSON(http.StatusOK, isAllowed) -} diff --git a/blobstore/ping.go b/blobstore/ping.go new file mode 100644 index 0000000..e91cb4b --- /dev/null +++ b/blobstore/ping.go @@ -0,0 +1,73 @@ +package blobstore + +import ( + "fmt" + "net/http" + "os" + + "github.com/Dewberry/s3api/auth" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/labstack/echo/v4" + log "github.com/sirupsen/logrus" +) + +func (bh *BlobHandler) Ping(c echo.Context) error { + return c.JSON(http.StatusOK, "connection without Auth is healthy") +} + +func (bh *BlobHandler) PingWithAuth(c echo.Context) error { + // Perform a HeadBucket operation to check the health of the S3 connection + bucketHealth := make(map[string]string) + var valid string + + for _, s3Ctrl := range bh.S3Controllers { + for _, b := range s3Ctrl.Buckets { + _, err := s3Ctrl.S3Svc.HeadBucket(&s3.HeadBucketInput{ + Bucket: aws.String(b), + }) + if err != nil { + valid = "unhealthy" + } else { + valid = "healthy" + } + log.Debugf("Ping operation preformed succesfully, connection to `%s` is %s", b, valid) + + bucketHealth[b] = valid + print(b, valid) + } + } + + return c.JSON(http.StatusOK, bucketHealth) +} + +func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { + if bh.Config.AuthLevel == 0 { + log.Info("Checked user permissions successfully") + return c.JSON(http.StatusOK, true) + } + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + prefix := c.QueryParam("prefix") + bucket := c.QueryParam("bucket") + operation := c.QueryParam("operation") + claims, ok := c.Get("claims").(*auth.Claims) + if !ok { + errMsg := fmt.Errorf("could not get claims from request context") + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + userEmail := claims.Email + if operation == "" || prefix == "" || bucket == "" { + errMsg := fmt.Errorf("`prefix`, `operation` and 'bucket are required params") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + } + isAllowed := bh.DB.CheckUserPermission(userEmail, bucket, prefix, []string{operation}) + log.Info("Checked user permissions successfully") + return c.JSON(http.StatusOK, isAllowed) +} From eadbf7bc006cd22d4ea63464d245d0e59874aebe Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Tue, 18 Jun 2024 13:16:05 -0400 Subject: [PATCH 08/22] clean up code based on best practices defined in documentation --- blobstore/blobhandler.go | 98 ++--------- blobstore/blobstore.go | 205 ++++++++++++---------- blobstore/buckets.go | 138 +-------------- blobstore/creds.go | 111 +++++------- blobstore/fgac.go | 130 ++++++++++++++ blobstore/list.go | 77 +-------- blobstore/metadata.go | 36 +++- blobstore/object_content.go | 4 +- blobstore/ping.go | 41 +---- blobstore/presigned_url.go | 193 +-------------------- blobstore/upload.go | 10 +- documentation/general.md | 23 +++ main.go | 15 +- utils/deprecated.txt | 332 ++++++++++++++++++++++++++++++++++++ 14 files changed, 728 insertions(+), 685 deletions(-) create mode 100644 blobstore/fgac.go create mode 100644 documentation/general.md create mode 100644 utils/deprecated.txt diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index 4ae6cab..3d5cd47 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -2,19 +2,15 @@ package blobstore import ( "fmt" - "net/http" "os" "strconv" - "strings" "sync" "github.com/Dewberry/s3api/auth" "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" ) @@ -81,22 +77,22 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { log.Info("Using MinIO") // Load MinIO credentials from environment - creds := newMinioConfig() + mc := newMinioConfig() // Validate MinIO credentials, check if they are missing or incomplete // if not then the s3api won't start - if err := creds.validateMinioConfig(); err != nil { + if err := mc.validateMinioConfig(); err != nil { return &config, err } // Create a MinIO session and S3 client - s3SVC, sess, err := minIOSessionManager(creds) + s3SVC, sess, err := mc.minIOSessionManager() if err != nil { return &config, err } // Configure the BlobHandler with MinIO session and bucket information - config.S3Controllers = []S3Controller{{Sess: sess, S3Svc: s3SVC, Buckets: []string{creds.Bucket}, S3Mock: true}} + config.S3Controllers = []S3Controller{{Sess: sess, S3Svc: s3SVC, Buckets: []string{mc.Bucket}, S3Mock: true}} // Return the configured BlobHandler return &config, nil } @@ -122,9 +118,9 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } // Load AWS credentials for multiple accounts from .env.json - for _, creds := range awsConfig.Accounts { + for _, ac := range awsConfig.Accounts { // Create an AWS session and S3 client for each account - s3SVC, sess, err := aWSSessionManager(creds) + s3SVC, sess, err := ac.aWSSessionManager() if err != nil { errMsg := fmt.Errorf("failed to create AWS session: %s", err.Error()) log.Error(errMsg.Error()) @@ -135,7 +131,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { // Retrieve the list of buckets for each account result, err := S3Ctrl.ListBuckets() if err != nil { - errMsg := fmt.Errorf("failed to retrieve list of buckets with access key: %s, error: %s", creds.AWS_ACCESS_KEY_ID, err.Error()) + errMsg := fmt.Errorf("failed to retrieve list of buckets with access key: %s, error: %s", ac.AWS_ACCESS_KEY_ID, err.Error()) return &config, errMsg } @@ -174,50 +170,21 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { return &config, nil } -func aWSSessionManager(creds AWSCreds) (*s3.S3, *session.Session, error) { - log.Info("Using AWS S3") - sess, err := session.NewSession(&aws.Config{ - Region: aws.String("us-east-1"), - Credentials: credentials.NewStaticCredentials(creds.AWS_ACCESS_KEY_ID, creds.AWS_SECRET_ACCESS_KEY, ""), +func (s3Ctrl *S3Controller) getBucketRegion(bucketName string) (string, error) { + req, output := s3Ctrl.S3Svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ + Bucket: aws.String(bucketName), }) - if err != nil { - return nil, nil, err - } - return s3.New(sess), sess, nil -} -func minIOSessionManager(mc MinioConfig) (*s3.S3, *session.Session, error) { - sess, err := session.NewSession(&aws.Config{ - Endpoint: aws.String(mc.S3Endpoint), - Region: aws.String("us-east-1"), - Credentials: credentials.NewStaticCredentials(mc.AccessKeyID, mc.SecretAccessKey, ""), - S3ForcePathStyle: aws.Bool(true), - }) + err := req.Send() if err != nil { - return nil, nil, err + return "", err } - log.Info("Using minio to mock s3") - // Check if the bucket exists - s3SVC := s3.New(sess) - _, err = s3SVC.HeadBucket(&s3.HeadBucketInput{ - Bucket: aws.String(mc.Bucket), - }) - if err != nil { - // Bucket does not exist, create it - _, err := s3SVC.CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(mc.Bucket), - }) - if err != nil { - log.Errorf("Error creating bucket: %s", err.Error()) - return nil, nil, nil - } - log.Info("Bucket created successfully") - } else { - log.Info("Bucket already exists") + if output.LocationConstraint == nil { + return "us-east-1", nil } - return s3SVC, sess, nil + return *output.LocationConstraint, nil } func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { @@ -232,7 +199,7 @@ func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { s3Ctrl = bh.S3Controllers[i] // Detect the bucket's region - region, err := getBucketRegion(bh.S3Controllers[i].S3Svc, b) + region, err := s3Ctrl.getBucketRegion(b) if err != nil { log.Errorf("Failed to get region for bucket '%s': %s", b, err.Error()) continue @@ -264,36 +231,3 @@ func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { errMsg := fmt.Errorf("bucket '%s' not found", bucket) return &s3Ctrl, errMsg } - -func getBucketRegion(S3Svc *s3.S3, bucketName string) (string, error) { - req, output := S3Svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ - Bucket: aws.String(bucketName), - }) - - err := req.Send() - if err != nil { - return "", err - } - - if output.LocationConstraint == nil { - return "us-east-1", nil - } - - return *output.LocationConstraint, nil -} - -func (bh *BlobHandler) GetS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { - permissions, fullAccess, err := bh.GetUserS3ReadListPermission(c, bucket) - if err != nil { - //TEMP solution before error library is implimented and string check ups become redundant - httpStatus := http.StatusInternalServerError - if strings.Contains(err.Error(), "this endpoint requires authentication information that is unavailable when authorization is disabled.") { - httpStatus = http.StatusForbidden - } - return nil, false, httpStatus, fmt.Errorf("error fetching user permissions: %s", err.Error()) - } - if !fullAccess && len(permissions) == 0 { - return nil, false, http.StatusForbidden, fmt.Errorf("user does not have permission to read the %s bucket", bucket) - } - return permissions, fullAccess, http.StatusOK, nil -} diff --git a/blobstore/blobstore.go b/blobstore/blobstore.go index c9db565..f675692 100644 --- a/blobstore/blobstore.go +++ b/blobstore/blobstore.go @@ -1,58 +1,15 @@ package blobstore import ( + "encoding/json" "fmt" "net/http" "os" "strings" - "github.com/Dewberry/s3api/auth" - "github.com/Dewberry/s3api/utils" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/labstack/echo/v4" + log "github.com/sirupsen/logrus" ) -func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { - - _, err := s3Ctrl.S3Svc.HeadObject(&s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case "NotFound": // s3.ErrCodeNoSuchKey does not work, aws is missing this error code so we hardwire a string - return false, nil - default: - return false, fmt.Errorf("KeyExists: %s", err) - } - } - return false, fmt.Errorf("KeyExists: %s", err) - } - return true, nil -} - -// function that will get the most recently uploaded file in a prefix -// func (s3Ctrl *S3Controller) getMostRecentModTime(bucket, prefix string, permissions []string, fullAccess bool) (time.Time, error) { -// // Initialize a time variable to store the most recent modification time -// var mostRecent time.Time - -// // Call GetList to retrieve the list of objects with the specified prefix -// response, err := s3Ctrl.GetList(bucket, prefix, false) -// if err != nil { -// return time.Time{}, err -// } -// // Iterate over the returned objects to find the most recent modification time -// for _, item := range response.Contents { -// if item.LastModified != nil && item.LastModified.After(mostRecent) { -// mostRecent = *item.LastModified -// } -// } -// return mostRecent, nil -// } - func arrayContains(a string, arr []string) bool { for _, b := range arr { if b == a { @@ -82,70 +39,130 @@ func isIdenticalArray(array1, array2 []string) bool { return true } -func (bh *BlobHandler) CheckUserS3Permission(c echo.Context, bucket, prefix string, permissions []string) (int, error) { - if bh.Config.AuthLevel > 0 { - initAuth := os.Getenv("INIT_AUTH") - if initAuth == "0" { - errMsg := fmt.Errorf("this requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - return http.StatusForbidden, errMsg +// isPermittedPrefix checks if the prefix is within the user's permissions. +func isPermittedPrefix(bucket, prefix string, permissions []string) bool { + prefixForChecking := fmt.Sprintf("/%s/%s", bucket, prefix) + + // Check if any of the permissions indicate the prefixForChecking is a parent directory + for _, perm := range permissions { + // Add a trailing slash to permission if it represents a directory + if !strings.HasSuffix(perm, "/") { + perm += "/" } - claims, ok := c.Get("claims").(*auth.Claims) - if !ok { - return http.StatusInternalServerError, fmt.Errorf("could not get claims from request context") + // Split the paths into components + prefixComponents := strings.Split(prefixForChecking, "/") + permComponents := strings.Split(perm, "/") + + // Compare each component + match := true + for i := 1; i < len(prefixComponents) && i < len(permComponents); i++ { + if permComponents[i] == "" || prefixComponents[i] == "" { + break + } + if prefixComponents[i] != permComponents[i] { + match = false + break + } } - roles := claims.RealmAccess["roles"] - ue := claims.Email - - // Check for required roles - isLimitedWriter := utils.StringInSlice(bh.Config.LimitedWriterRoleName, roles) - // Ensure the prefix ends with a slash - if !strings.HasSuffix(prefix, "/") { - prefix += "/" + + // If all components match up to the length of the permission path, + // and the permission path has no additional components, return true + if match { + return true } + } + return false +} - // We assume if someone is limited_writer, they should never be admin or super_writer - if isLimitedWriter { - if !bh.DB.CheckUserPermission(ue, bucket, prefix, permissions) { - return http.StatusForbidden, fmt.Errorf("forbidden") +// checkAndAdjustPrefix checks if the prefix is an object and adjusts the prefix accordingly. +// Returns the adjusted prefix, an error message (if any), and the HTTP status code. +func checkAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, string, int) { + // As of 6/12/24, unsure why ./ is included here, may be needed for an edge case, but could also cause problems + if prefix != "" && prefix != "./" && prefix != "/" { + isObject, err := s3Ctrl.KeyExists(bucket, prefix) + if err != nil { + return "", fmt.Sprintf("error checking if object exists: %s", err.Error()), http.StatusInternalServerError + } + if isObject { + objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) + if err != nil { + return "", fmt.Sprintf("error checking for object's metadata: %s", err.Error()), http.StatusInternalServerError + } + // This is because AWS considers empty prefixes with a .keep as an object, so we ignore and log + if *objMeta.ContentLength == 0 { + log.Infof("detected a zero byte directory marker within prefix: %s", prefix) + } else { + return "", fmt.Sprintf("`%s` is an object, not a prefix. Please see options for keys or pass a prefix", prefix), http.StatusTeapot } } + prefix = strings.Trim(prefix, "/") + "/" } - return 0, nil + return prefix, "", http.StatusOK } -func (bh *BlobHandler) GetUserS3ReadListPermission(c echo.Context, bucket string) ([]string, bool, error) { - permissions := make([]string, 0) +func validateEnvJSON(filePath string) error { + // Read the contents of the .env.json file + jsonData, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("error reading .env.json: %s", err.Error()) + } - if bh.Config.AuthLevel > 0 { - initAuth := os.Getenv("INIT_AUTH") - if initAuth == "0" { - errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - return permissions, false, errMsg - } - fullAccess := false - claims, ok := c.Get("claims").(*auth.Claims) - if !ok { - return permissions, fullAccess, fmt.Errorf("could not get claims from request context") - } - roles := claims.RealmAccess["roles"] + // Parse the JSON data into the AWSConfig struct + var awsConfig AWSConfig + if err := json.Unmarshal(jsonData, &awsConfig); err != nil { + return fmt.Errorf("error parsing .env.json: %s", err.Error()) + } - // Check if user has the limited reader role - isLimitedReader := utils.StringInSlice(bh.Config.LimitedReaderRoleName, roles) + // Check if there is at least one account defined + if len(awsConfig.Accounts) == 0 { + return fmt.Errorf("no AWS accounts defined in .env.json") + } - // If user is not a limited reader, assume they have full read access - if !isLimitedReader { - fullAccess = true // Indicating full access - return permissions, fullAccess, nil + // Check if each account has the required fields + for i, account := range awsConfig.Accounts { + missingFields := []string{} + if account.AWS_ACCESS_KEY_ID == "" { + missingFields = append(missingFields, "AWS_ACCESS_KEY_ID") + } + if account.AWS_SECRET_ACCESS_KEY == "" { + missingFields = append(missingFields, "AWS_SECRET_ACCESS_KEY") } - // If user is a limited reader, fetch specific permissions - ue := claims.Email - permissions, err := bh.DB.GetUserAccessiblePrefixes(ue, bucket, []string{"read", "write"}) - if err != nil { - return permissions, fullAccess, err + if len(missingFields) > 0 { + return fmt.Errorf("missing fields (%s) for AWS account %d in envJson file", strings.Join(missingFields, ", "), i+1) } - return permissions, fullAccess, nil } + if len(awsConfig.BucketAllowList) == 0 { + return fmt.Errorf("no buckets in the `bucket_allow_list`, please provide required buckets, or `*` for access to all buckets") + } + // If all checks pass, return nil (no error) + return nil +} + +func newAWSConfig(envJson string) (AWSConfig, error) { + var awsConfig AWSConfig + err := validateEnvJSON(envJson) + if err != nil { + return awsConfig, fmt.Errorf(err.Error()) + } + jsonData, err := os.ReadFile(envJson) + if err != nil { + return awsConfig, err + } + + if err := json.Unmarshal(jsonData, &awsConfig); err != nil { + return awsConfig, err + } + return awsConfig, nil +} - return permissions, true, nil +func newMinioConfig() MinioConfig { + var mc MinioConfig + mc.S3Endpoint = os.Getenv("MINIO_S3_ENDPOINT") + mc.DisableSSL = os.Getenv("MINIO_S3_DISABLE_SSL") + mc.ForcePathStyle = os.Getenv("MINIO_S3_FORCE_PATH_STYLE") + mc.AccessKeyID = os.Getenv("MINIO_ACCESS_KEY_ID") + mc.Bucket = os.Getenv("AWS_S3_BUCKET") + mc.SecretAccessKey = os.Getenv("MINIO_SECRET_ACCESS_KEY") + return mc } diff --git a/blobstore/buckets.go b/blobstore/buckets.go index e813656..17745b3 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -12,6 +12,12 @@ import ( log "github.com/sirupsen/logrus" ) +type BucketInfo struct { + ID int `json:"id"` + Name string `json:"name"` + CanRead bool `json:"can_read"` +} + // listBuckets returns the list of all S3 buckets. func (s3Ctrl *S3Controller) ListBuckets() (*s3.ListBucketsOutput, error) { // Set up input parameters for the ListBuckets API @@ -28,59 +34,6 @@ func (s3Ctrl *S3Controller) ListBuckets() (*s3.ListBucketsOutput, error) { return result, nil } -// func (bh *BlobHandler) createBucket(bucketName string) error { -// // Set up input parameters for the CreateBucket API -// input := &s3.CreateBucketInput{ -// Bucket: aws.String(bucketName), -// } - -// // Create the bucket -// _, err := bh.S3Svc.CreateBucket(input) -// if err != nil { -// return err -// } - -// return nil -// } - -// // deleteBucket deletes the specified S3 bucket. -// func (bh *BlobHandler) deleteBucket(bucketName string) error { -// // Set up input parameters for the DeleteBucket API -// input := &s3.DeleteBucketInput{ -// Bucket: aws.String(bucketName), -// } - -// // Delete the bucket -// _, err := bh.S3Svc.DeleteBucket(input) -// if err != nil { -// return err -// } - -// return nil -// } - -// // getBucketACL retrieves the ACL (Access Control List) for the specified bucket. -// func (bh *BlobHandler) getBucketACL(bucketName string) (*s3.GetBucketAclOutput, error) { -// // Set up input parameters for the GetBucketAcl API -// input := &s3.GetBucketAclInput{ -// Bucket: aws.String(bucketName), -// } - -// // Get the bucket ACL -// result, err := bh.S3Svc.GetBucketAcl(input) -// if err != nil { -// return nil, err -// } - -// return result, nil -// } - -type BucketInfo struct { - ID int `json:"id"` - Name string `json:"name"` - CanRead bool `json:"can_read"` -} - func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { var allBuckets []BucketInfo @@ -88,7 +41,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { defer bh.Mu.Unlock() // Check user's overall read access level - _, fullAccess, err := bh.GetUserS3ReadListPermission(c, "") + _, fullAccess, err := bh.getUserS3ReadListPermission(c, "") if err != nil { return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) } @@ -114,7 +67,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { for i, bucket := range controller.Buckets { canRead := fullAccess if !fullAccess { - permissions, _, err := bh.GetUserS3ReadListPermission(c, bucket) + permissions, _, err := bh.getUserS3ReadListPermission(c, bucket) if err != nil { return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) } @@ -140,78 +93,3 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { return c.JSON(http.StatusOK, allBuckets) } - -// func (bh *BlobHandler) HandleCreateBucket(c echo.Context) error { -// bucketName := c.QueryParam("name") - -// if bucketName == "" { -// err := fmt.Errorf("request must include a `name` parameter") -// log.Info("HandleCreateBucket: " + err.Error()) -// return c.JSON(http.StatusBadRequest, err.Error()) -// } - -// // Check if the bucket already exists -// buckets, err := bh.listBuckets() -// if err != nil { -// log.Info("HandleCreateBucket: Error listing buckets:", err.Error()) -// return c.JSON(http.StatusInternalServerError, err.Error()) -// } - -// for _, b := range buckets.Buckets { -// if aws.StringValue(b.Name) == bucketName { -// err := fmt.Errorf("bucket with the name `%s` already exists", bucketName) -// log.Info("HandleCreateBucket: " + err.Error()) -// return c.JSON(http.StatusConflict, err.Error()) -// } -// } - -// // Create the S3 bucket -// err = bh.createBucket(bucketName) -// if err != nil { -// log.Info("HandleCreateBucket: Error creating bucket:", err.Error()) -// return c.JSON(http.StatusInternalServerError, err.Error()) -// } - -// log.Info("HandleCreateBucket: Successfully created bucket:", bucketName) -// return c.JSON(http.StatusOK, "Bucket created successfully") -// } - -// func (bh *BlobHandler) HandleDeleteBucket(c echo.Context) error { -// bucketName := c.QueryParam("name") - -// if bucketName == "" { -// err := fmt.Errorf("request must include a `name` parameter") -// log.Info("HandleDeleteBucket: " + err.Error()) -// return c.JSON(http.StatusBadRequest, err.Error()) -// } - -// // Delete the S3 bucket -// err := bh.deleteBucket(bucketName) -// if err != nil { -// log.Info("HandleDeleteBucket: Error deleting bucket:", err.Error()) -// return c.JSON(http.StatusInternalServerError, err.Error()) -// } - -// log.Info("HandleDeleteBucket: Successfully deleted bucket:", bucketName) -// return c.JSON(http.StatusOK, "Bucket deleted successfully") -// } - -// func (bh *BlobHandler) HandleGetBucketACL(c echo.Context) error { -// bucketName := c.QueryParam("name") - -// if bucketName == "" { -// err := fmt.Errorf("request must include a `name` parameter") -// log.Info("HandleGetBucketACL: " + err.Error()) -// return c.JSON(http.StatusBadRequest, err.Error()) -// } - -// // Get the bucket ACL -// acl, err := bh.getBucketACL(bucketName) -// if err != nil { -// log.Info("HandleGetBucketACL: Error getting bucket ACL:", err.Error()) -// return c.JSON(http.StatusInternalServerError, err.Error()) -// } - -// log.Info("HandleGetBucketACL: Successfully retrieved ACL for bucket:", bucketName) -// return c.JSON(http.StatusOK, acl) -// } diff --git a/blobstore/creds.go b/blobstore/creds.go index de1d49e..f0fd50c 100644 --- a/blobstore/creds.go +++ b/blobstore/creds.go @@ -1,10 +1,14 @@ package blobstore import ( - "encoding/json" "fmt" - "os" "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + log "github.com/sirupsen/logrus" ) type AWSCreds struct { @@ -28,21 +32,21 @@ type MinioConfig struct { S3Mock string `json:"S3_MOCK"` } -func (creds MinioConfig) validateMinioConfig() error { +func (mc MinioConfig) validateMinioConfig() error { missingFields := []string{} - if creds.S3Endpoint == "" { + if mc.S3Endpoint == "" { missingFields = append(missingFields, "S3Endpoint") } - if creds.DisableSSL == "" { + if mc.DisableSSL == "" { missingFields = append(missingFields, "DisableSSL") } - if creds.ForcePathStyle == "" { + if mc.ForcePathStyle == "" { missingFields = append(missingFields, "ForcePathStyle") } - if creds.AccessKeyID == "" { + if mc.AccessKeyID == "" { missingFields = append(missingFields, "AccessKeyID") } - if creds.SecretAccessKey == "" { + if mc.SecretAccessKey == "" { missingFields = append(missingFields, "SecretAccessKey") } @@ -52,69 +56,48 @@ func (creds MinioConfig) validateMinioConfig() error { return nil } -func validateEnvJSON(filePath string) error { - // Read the contents of the .env.json file - jsonData, err := os.ReadFile(filePath) +func (mc MinioConfig) minIOSessionManager() (*s3.S3, *session.Session, error) { + sess, err := session.NewSession(&aws.Config{ + Endpoint: aws.String(mc.S3Endpoint), + Region: aws.String("us-east-1"), + Credentials: credentials.NewStaticCredentials(mc.AccessKeyID, mc.SecretAccessKey, ""), + S3ForcePathStyle: aws.Bool(true), + }) if err != nil { - return fmt.Errorf("error reading .env.json: %s", err.Error()) - } - - // Parse the JSON data into the AWSConfig struct - var awsConfig AWSConfig - if err := json.Unmarshal(jsonData, &awsConfig); err != nil { - return fmt.Errorf("error parsing .env.json: %s", err.Error()) - } - - // Check if there is at least one account defined - if len(awsConfig.Accounts) == 0 { - return fmt.Errorf("no AWS accounts defined in .env.json") + return nil, nil, err } + log.Info("Using minio to mock s3") - // Check if each account has the required fields - for i, account := range awsConfig.Accounts { - missingFields := []string{} - if account.AWS_ACCESS_KEY_ID == "" { - missingFields = append(missingFields, "AWS_ACCESS_KEY_ID") - } - if account.AWS_SECRET_ACCESS_KEY == "" { - missingFields = append(missingFields, "AWS_SECRET_ACCESS_KEY") - } - - if len(missingFields) > 0 { - return fmt.Errorf("missing fields (%s) for AWS account %d in envJson file", strings.Join(missingFields, ", "), i+1) + // Check if the bucket exists + s3SVC := s3.New(sess) + _, err = s3SVC.HeadBucket(&s3.HeadBucketInput{ + Bucket: aws.String(mc.Bucket), + }) + if err != nil { + // Bucket does not exist, create it + _, err := s3SVC.CreateBucket(&s3.CreateBucketInput{ + Bucket: aws.String(mc.Bucket), + }) + if err != nil { + log.Errorf("Error creating bucket: %s", err.Error()) + return nil, nil, nil } + log.Info("Bucket created successfully") + } else { + log.Info("Bucket already exists") } - if len(awsConfig.BucketAllowList) == 0 { - return fmt.Errorf("no buckets in the `bucket_allow_list`, please provide required buckets, or `*` for access to all buckets") - } - // If all checks pass, return nil (no error) - return nil + + return s3SVC, sess, nil } -func newAWSConfig(envJson string) (AWSConfig, error) { - var awsConfig AWSConfig - err := validateEnvJSON(envJson) - if err != nil { - return awsConfig, fmt.Errorf(err.Error()) - } - jsonData, err := os.ReadFile(envJson) +func (ac AWSCreds) aWSSessionManager() (*s3.S3, *session.Session, error) { + log.Info("Using AWS S3") + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + Credentials: credentials.NewStaticCredentials(ac.AWS_ACCESS_KEY_ID, ac.AWS_SECRET_ACCESS_KEY, ""), + }) if err != nil { - return awsConfig, err + return nil, nil, err } - - if err := json.Unmarshal(jsonData, &awsConfig); err != nil { - return awsConfig, err - } - return awsConfig, nil -} - -func newMinioConfig() MinioConfig { - var mc MinioConfig - mc.S3Endpoint = os.Getenv("MINIO_S3_ENDPOINT") - mc.DisableSSL = os.Getenv("MINIO_S3_DISABLE_SSL") - mc.ForcePathStyle = os.Getenv("MINIO_S3_FORCE_PATH_STYLE") - mc.AccessKeyID = os.Getenv("MINIO_ACCESS_KEY_ID") - mc.Bucket = os.Getenv("AWS_S3_BUCKET") - mc.SecretAccessKey = os.Getenv("MINIO_SECRET_ACCESS_KEY") - return mc + return s3.New(sess), sess, nil } diff --git a/blobstore/fgac.go b/blobstore/fgac.go new file mode 100644 index 0000000..0b4db7d --- /dev/null +++ b/blobstore/fgac.go @@ -0,0 +1,130 @@ +package blobstore + +import ( + "fmt" + "net/http" + "os" + "strings" + + "github.com/Dewberry/s3api/auth" + "github.com/Dewberry/s3api/utils" + "github.com/labstack/echo/v4" + log "github.com/sirupsen/logrus" +) + +//Utility Methods for Endpoint Handlers + +func (bh *BlobHandler) getS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { + permissions, fullAccess, err := bh.getUserS3ReadListPermission(c, bucket) + if err != nil { + //TEMP solution before error library is implimented and string check ups become redundant + httpStatus := http.StatusInternalServerError + if strings.Contains(err.Error(), "this endpoint requires authentication information that is unavailable when authorization is disabled.") { + httpStatus = http.StatusForbidden + } + return nil, false, httpStatus, fmt.Errorf("error fetching user permissions: %s", err.Error()) + } + if !fullAccess && len(permissions) == 0 { + return nil, false, http.StatusForbidden, fmt.Errorf("user does not have permission to read the %s bucket", bucket) + } + return permissions, fullAccess, http.StatusOK, nil +} + +func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string) ([]string, bool, error) { + permissions := make([]string, 0) + + if bh.Config.AuthLevel > 0 { + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + return permissions, false, errMsg + } + fullAccess := false + claims, ok := c.Get("claims").(*auth.Claims) + if !ok { + return permissions, fullAccess, fmt.Errorf("could not get claims from request context") + } + roles := claims.RealmAccess["roles"] + + // Check if user has the limited reader role + isLimitedReader := utils.StringInSlice(bh.Config.LimitedReaderRoleName, roles) + + // If user is not a limited reader, assume they have full read access + if !isLimitedReader { + fullAccess = true // Indicating full access + return permissions, fullAccess, nil + } + + // If user is a limited reader, fetch specific permissions + ue := claims.Email + permissions, err := bh.DB.GetUserAccessiblePrefixes(ue, bucket, []string{"read", "write"}) + if err != nil { + return permissions, fullAccess, err + } + return permissions, fullAccess, nil + } + + return permissions, true, nil +} + +func (bh *BlobHandler) validateUserAccessToPrefix(c echo.Context, bucket, prefix string, permissions []string) (int, error) { + if bh.Config.AuthLevel > 0 { + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + return http.StatusForbidden, errMsg + } + claims, ok := c.Get("claims").(*auth.Claims) + if !ok { + return http.StatusInternalServerError, fmt.Errorf("could not get claims from request context") + } + roles := claims.RealmAccess["roles"] + ue := claims.Email + + // Check for required roles + isLimitedWriter := utils.StringInSlice(bh.Config.LimitedWriterRoleName, roles) + // Ensure the prefix ends with a slash + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + // We assume if someone is limited_writer, they should never be admin or super_writer + if isLimitedWriter { + if !bh.DB.CheckUserPermission(ue, bucket, prefix, permissions) { + return http.StatusForbidden, fmt.Errorf("forbidden") + } + } + } + return 0, nil +} + +func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { + if bh.Config.AuthLevel == 0 { + log.Info("Checked user permissions successfully") + return c.JSON(http.StatusOK, true) + } + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + log.Error(errMsg.Error()) + return c.JSON(http.StatusForbidden, errMsg.Error()) + } + prefix := c.QueryParam("prefix") + bucket := c.QueryParam("bucket") + operation := c.QueryParam("operation") + claims, ok := c.Get("claims").(*auth.Claims) + if !ok { + errMsg := fmt.Errorf("could not get claims from request context") + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + userEmail := claims.Email + if operation == "" || prefix == "" || bucket == "" { + errMsg := fmt.Errorf("`prefix`, `operation` and 'bucket are required params") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + } + isAllowed := bh.DB.CheckUserPermission(userEmail, bucket, prefix, []string{operation}) + log.Info("Checked user permissions successfully") + return c.JSON(http.StatusOK, isAllowed) +} diff --git a/blobstore/list.go b/blobstore/list.go index 5f6906b..40e3a22 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -27,32 +27,6 @@ type ListResult struct { ModifiedBy string `json:"modified_by"` } -// CheckAndAdjustPrefix checks if the prefix is an object and adjusts the prefix accordingly. -// Returns the adjusted prefix, an error message (if any), and the HTTP status code. -func CheckAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, string, int) { - // As of 6/12/24, unsure why ./ is included here, may be needed for an edge case, but could also cause problems - if prefix != "" && prefix != "./" && prefix != "/" { - isObject, err := s3Ctrl.KeyExists(bucket, prefix) - if err != nil { - return "", fmt.Sprintf("error checking if object exists: %s", err.Error()), http.StatusInternalServerError - } - if isObject { - objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) - if err != nil { - return "", fmt.Sprintf("error checking for object's metadata: %s", err.Error()), http.StatusInternalServerError - } - // This is because AWS considers empty prefixes with a .keep as an object, so we ignore and log - if *objMeta.ContentLength == 0 { - log.Infof("detected a zero byte directory marker within prefix: %s", prefix) - } else { - return "", fmt.Sprintf("`%s` is an object, not a prefix. Please see options for keys or pass a prefix", prefix), http.StatusTeapot - } - } - prefix = strings.Trim(prefix, "/") + "/" - } - return prefix, "", http.StatusOK -} - // HandleListByPrefix handles the API endpoint for listing objects by prefix in an S3 bucket. func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix := c.QueryParam("prefix") @@ -65,7 +39,7 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - adjustedPrefix, errMsg, statusCode := CheckAndAdjustPrefix(s3Ctrl, bucket, prefix) + adjustedPrefix, errMsg, statusCode := checkAndAdjustPrefix(s3Ctrl, bucket, prefix) if errMsg != "" { log.Error(errMsg) return c.JSON(statusCode, errMsg) @@ -89,7 +63,7 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { } var result []string - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) @@ -97,14 +71,14 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) - if fullAccess || IsPermittedPrefix(bucket, *cp.Prefix, permissions) { + if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { result = append(result, aws.StringValue(cp.Prefix)) } } for _, object := range page.Contents { // Handle files - if fullAccess || IsPermittedPrefix(bucket, *object.Key, permissions) { + if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { result = append(result, aws.StringValue(object.Key)) } @@ -134,7 +108,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - adjustedPrefix, errMsg, statusCode := CheckAndAdjustPrefix(s3Ctrl, bucket, prefix) + adjustedPrefix, errMsg, statusCode := checkAndAdjustPrefix(s3Ctrl, bucket, prefix) if errMsg != "" { log.Error(errMsg) return c.JSON(statusCode, errMsg) @@ -159,7 +133,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { var results []ListResult var count int - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) @@ -167,7 +141,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) - if fullAccess || IsPermittedPrefix(bucket, *cp.Prefix, permissions) { + if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { dir := ListResult{ ID: count, Name: filepath.Base(*cp.Prefix), @@ -185,7 +159,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { for _, object := range page.Contents { // Handle files - if fullAccess || IsPermittedPrefix(bucket, *object.Key, permissions) { + if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { file := ListResult{ ID: count, Name: filepath.Base(*object.Key), @@ -279,38 +253,3 @@ func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter } return err // Return any errors encountered in the pagination process } - -// IsPermittedPrefix checks if the prefix is within the user's permissions. -func IsPermittedPrefix(bucket, prefix string, permissions []string) bool { - prefixForChecking := fmt.Sprintf("/%s/%s", bucket, prefix) - - // Check if any of the permissions indicate the prefixForChecking is a parent directory - for _, perm := range permissions { - // Add a trailing slash to permission if it represents a directory - if !strings.HasSuffix(perm, "/") { - perm += "/" - } - // Split the paths into components - prefixComponents := strings.Split(prefixForChecking, "/") - permComponents := strings.Split(perm, "/") - - // Compare each component - match := true - for i := 1; i < len(prefixComponents) && i < len(permComponents); i++ { - if permComponents[i] == "" || prefixComponents[i] == "" { - break - } - if prefixComponents[i] != permComponents[i] { - match = false - break - } - } - - // If all components match up to the length of the permission path, - // and the permission path has no additional components, return true - if match { - return true - } - } - return false -} diff --git a/blobstore/metadata.go b/blobstore/metadata.go index ef58a4e..d2b647c 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -11,7 +11,7 @@ import ( log "github.com/sirupsen/logrus" ) -func (bh *BlobHandler) GetSize(page *s3.ListObjectsV2Output, totalSize *uint64, fileCount *uint64) error { +func GetListSize(page *s3.ListObjectsV2Output, totalSize *uint64, fileCount *uint64) error { if page == nil { return fmt.Errorf("input page is nil") } @@ -43,12 +43,12 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) } - if !fullAccess && !IsPermittedPrefix(bucket, prefix, permissions) { + if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { errMsg := fmt.Errorf("user does not have permission to read the %s prefix", prefix) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -70,7 +70,7 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { var totalSize uint64 var fileCount uint64 err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, func(page *s3.ListObjectsV2Output) error { - return bh.GetSize(page, &totalSize, &fileCount) + return GetListSize(page, &totalSize, &fileCount) }) if err != nil { @@ -112,13 +112,13 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) } - if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -155,13 +155,13 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) } - if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -191,3 +191,23 @@ func (s3Ctrl *S3Controller) GetMetaData(bucket, key string) (*s3.HeadObjectOutpu return result, nil } + +func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { + + _, err := s3Ctrl.S3Svc.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case "NotFound": // s3.ErrCodeNoSuchKey does not work, aws is missing this error code so we hardwire a string + return false, nil + default: + return false, fmt.Errorf("KeyExists: %s", err) + } + } + return false, fmt.Errorf("KeyExists: %s", err) + } + return true, nil +} diff --git a/blobstore/object_content.go b/blobstore/object_content.go index f74e4ae..28d5250 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -47,13 +47,13 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) } - if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) diff --git a/blobstore/ping.go b/blobstore/ping.go index e91cb4b..3fae927 100644 --- a/blobstore/ping.go +++ b/blobstore/ping.go @@ -5,19 +5,23 @@ import ( "net/http" "os" - "github.com/Dewberry/s3api/auth" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" ) -func (bh *BlobHandler) Ping(c echo.Context) error { +func (bh *BlobHandler) HandlePing(c echo.Context) error { return c.JSON(http.StatusOK, "connection without Auth is healthy") } -func (bh *BlobHandler) PingWithAuth(c echo.Context) error { +func (bh *BlobHandler) HandlePingWithAuth(c echo.Context) error { // Perform a HeadBucket operation to check the health of the S3 connection + initAuth := os.Getenv("INIT_AUTH") + if initAuth == "0" { + errMsg := fmt.Errorf("this requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") + return c.JSON(http.StatusForbidden, errMsg.Error()) + } bucketHealth := make(map[string]string) var valid string @@ -40,34 +44,3 @@ func (bh *BlobHandler) PingWithAuth(c echo.Context) error { return c.JSON(http.StatusOK, bucketHealth) } - -func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { - if bh.Config.AuthLevel == 0 { - log.Info("Checked user permissions successfully") - return c.JSON(http.StatusOK, true) - } - initAuth := os.Getenv("INIT_AUTH") - if initAuth == "0" { - errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) - } - prefix := c.QueryParam("prefix") - bucket := c.QueryParam("bucket") - operation := c.QueryParam("operation") - claims, ok := c.Get("claims").(*auth.Claims) - if !ok { - errMsg := fmt.Errorf("could not get claims from request context") - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - userEmail := claims.Email - if operation == "" || prefix == "" || bucket == "" { - errMsg := fmt.Errorf("`prefix`, `operation` and 'bucket are required params") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - isAllowed := bh.DB.CheckUserPermission(userEmail, bucket, prefix, []string{operation}) - log.Info("Checked user permissions successfully") - return c.JSON(http.StatusOK, isAllowed) -} diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 74a4305..b767897 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -25,90 +25,6 @@ func (s3Ctrl *S3Controller) GetDownloadPresignedURL(bucket, key string, expDays return req.Presign(duration) } -// func (s3Ctrl *S3Controller) tarS3Files(r *s3.ListObjectsV2Output, bucket string, outputFile string, prefix string) (err error) { -// uploader := s3manager.NewUploader(s3Ctrl.Sess) -// pr, pw := io.Pipe() - -// gzipWriter := gzip.NewWriter(pw) -// tarWriter := tar.NewWriter(gzipWriter) - -// var wg sync.WaitGroup -// wg.Add(1) - -// go func() { -// defer wg.Done() -// log.Debug("start writing files to:", outputFile) -// _, err := uploader.Upload(&s3manager.UploadInput{ -// Bucket: aws.String(bucket), -// Key: aws.String(outputFile), -// Body: pr, -// }) -// if err != nil { -// log.Errorf("failed to upload tar.gz file to S3: %s", err) -// return -// } -// log.Debug("completed writing files to:", outputFile) -// }() - -// for _, item := range r.Contents { -// filePath := filepath.Join(strings.TrimPrefix(aws.StringValue(item.Key), prefix)) -// copyObj := aws.StringValue(item.Key) -// log.Debugf("copying %s to %s", copyObj, outputFile) - -// getResp, err := s3Ctrl.S3Svc.GetObject(&s3.GetObjectInput{ -// Bucket: aws.String(bucket), -// Key: aws.String(copyObj), -// }) -// if err != nil { -// log.Errorf("failed to download file: %s, error: %s", copyObj, err) -// return err -// } -// defer getResp.Body.Close() - -// header := &tar.Header{ -// Name: filePath, -// Size: *getResp.ContentLength, -// Mode: int64(0644), -// } - -// err = tarWriter.WriteHeader(header) -// if err != nil { -// log.Errorf("failed to write tar header for file: %s, error: %s", copyObj, err) -// return err -// } - -// _, err = io.Copy(tarWriter, getResp.Body) -// if err != nil { -// log.Errorf("failed to write file content to tar for file: %s, error: %s", copyObj, err) -// return err -// } -// log.Debugf("completed copying: %s", copyObj) -// } - -// err = tarWriter.Close() -// if err != nil { -// log.Error("tar close failure:", err) -// return err -// } - -// err = gzipWriter.Close() -// if err != nil { -// log.Error("gzip close failure:", err) -// return err -// } - -// err = pw.Close() -// if err != nil { -// log.Error("pipe writer close failure:", err) -// return err -// } - -// wg.Wait() - -// log.Debug("completed tar of file successfully") -// return nil -// } - func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) @@ -124,13 +40,13 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) } - if !fullAccess && !IsPermittedPrefix(bucket, key, permissions) { + if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { errMsg := fmt.Errorf("user does not have permission to read the %s key", key) log.Error(errMsg.Error()) return c.JSON(http.StatusForbidden, errMsg.Error()) @@ -159,107 +75,6 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { return c.JSON(http.StatusOK, url) } -// func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { -// prefix := c.QueryParam("prefix") -// if prefix == "" { -// errMsg := fmt.Errorf("request must include a `prefix` parameter") -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) -// } - -// bucket := c.QueryParam("bucket") -// s3Ctrl, err := bh.GetController(bucket) -// if err != nil { -// errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) -// } - -// if !strings.HasSuffix(prefix, "/") { -// prefix = prefix + "/" -// } - -// response, err := s3Ctrl.GetList(bucket, prefix, false) -// if err != nil { -// errMsg := fmt.Errorf("error getting list: %s", err.Error()) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } -// if *response.KeyCount == 0 { -// errMsg := fmt.Errorf("the specified prefix %s does not exist in S3", prefix) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusNotFound, errMsg.Error()) -// } -// //check if size is below 5GB -// var size, fileCount uint64 -// err = bh.GetSize(response, &size, &fileCount) -// if err != nil { -// errMsg := fmt.Errorf("error getting size: %s", err.Error()) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } - -// limit := uint64(1024 * 1024 * 1024 * bh.Config.DefaultZipDownloadSizeLimit) -// if size >= limit { -// errMsg := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultZipDownloadSizeLimit, float64(size)/(1024*1024*1024)) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusRequestEntityTooLarge, errMsg.Error()) -// } - -// filename := fmt.Sprintf("%s.%s", strings.TrimSuffix(prefix, "/"), "tar.gz") -// outputFile := filepath.Join(bh.Config.DefaultTempPrefix, filename) - -// // Check if the tar.gz file already exists in S3 -// tarFileResponse, err := s3Ctrl.GetList(bucket, outputFile, false) -// if err != nil { -// errMsg := fmt.Errorf("error checking if tar.gz file exists in S3: %s", err) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } - -// if len(tarFileResponse.Contents) > 0 { -// log.Debug("the prefix was once downloaded, checking if it is outdated") -// // Tar.gz file exists, now compare modification dates -// mostRecentModTime, err := s3Ctrl.getMostRecentModTime(bucket, prefix) -// if err != nil { -// errMsg := fmt.Errorf("error getting most recent modification time: %s", err) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } - -// if tarFileResponse.Contents[0].LastModified.After(mostRecentModTime) { -// log.Debug("folder already downloaded and is current") - -// // Existing tar.gz file is up-to-date, return pre-signed URL -// href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) -// if err != nil { -// errMsg := fmt.Errorf("error getting presigned: %s", err) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } -// return c.JSON(http.StatusOK, string(href)) -// } -// log.Debug("folder already downloaded but is outdated starting the zip process") -// } - -// err = s3Ctrl.tarS3Files(response, bucket, outputFile, prefix) -// if err != nil { -// errMsg := fmt.Errorf("error tarring S3 files: %s", err) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } - -// href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) -// if err != nil { -// errMsg := fmt.Errorf("error getting presigned URL: %s", err) -// log.Error(errMsg.Error()) -// return c.JSON(http.StatusInternalServerError, errMsg.Error()) -// } - -// log.Info("successfully generated presigned URL for prefix:", prefix) -// return c.JSON(http.StatusOK, string(href)) -// } - func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { prefix := c.QueryParam("prefix") if prefix == "" { @@ -288,7 +103,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { scriptBuilder.WriteString("REM 5. Windows Defender SmartScreen (Optional): If you see a message like \"Windows Defender SmartScreen prevented an unrecognized app from starting,\" click \"More info\" and then click \"Run anyway\" to proceed with the download.\n\n") scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", basePrefix)) - permissions, fullAccess, statusCode, err := bh.GetS3ReadPermissions(c, bucket) + permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) if err != nil { log.Error(err.Error()) return c.JSON(statusCode, err.Error()) @@ -296,7 +111,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { // Define the processPage function processPage := func(page *s3.ListObjectsV2Output) error { for _, item := range page.Contents { - if fullAccess || IsPermittedPrefix(bucket, *item.Key, permissions) { + if fullAccess || isPermittedPrefix(bucket, *item.Key, permissions) { // Size checking if item.Size != nil { diff --git a/blobstore/upload.go b/blobstore/upload.go index 77dc47f..de13df8 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -130,7 +130,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) + httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) @@ -273,7 +273,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) + httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) @@ -343,7 +343,7 @@ func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) + httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) @@ -392,7 +392,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) + httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err.Error()) log.Error(errMsg.Error()) @@ -460,7 +460,7 @@ func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { log.Error(errMsg.Error()) return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) } - httpCode, err := bh.CheckUserS3Permission(c, bucket, key, []string{"write"}) + httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) if err != nil { errMsg := fmt.Errorf("error while checking for user permission: %s", err) log.Error(errMsg.Error()) diff --git a/documentation/general.md b/documentation/general.md new file mode 100644 index 0000000..5b869d1 --- /dev/null +++ b/documentation/general.md @@ -0,0 +1,23 @@ +# Kinds of Functions and Methods + +### Normal Functions + +Functions that return native Go errors and are used for utility logic. + +### Methods Defined on `S3Ctrl` + +Methods defined on `S3Ctrl` that return native Go errors. These methods encapsulate native AWS SDK Go functions to interact with S3. They can be used by both endpoint handlers and for package/library purposes. These methods can also encapsulate other `S3Ctrl` methods that perform specific utilities. For example, `RecursivelyDeleteObjects` uses `GetListWithCallBack` and `DeleteList`, which both utilize native AWS SDK Go functions to perform a common prefix (or folder) deletion. + +### Methods Defined on `BlobHandler` + +Methods defined on `BlobHandler` have three primary uses: + +- **Endpoint Handlers:** Methods that communicate with HTTP requests and always return a ConfigBerry `AppError`. These methods' names should always start with `Handle`. + +- **Utility Methods for Endpoint Handlers:** Private methods that encapsulate reusable logic for endpoints, such as `checkAndAdjustPrefix`. These methods also always return a ConfigBerry `AppError`. + +- **Utility Methods for Both Endpoint and External Use:** Methods that can be used by both endpoint handlers and external packages/libraries, returning native Go errors. (Note: Only one method currently falls under this category, `GetController`.) + +# deprecated functions/methods + +deprecated functions will be in /utils/deprecated.txt, the file should consist of file_name where the function/methods were deprectaed from. diff --git a/main.go b/main.go index 7f3abf9..9c696a7 100644 --- a/main.go +++ b/main.go @@ -84,8 +84,8 @@ func main() { AllowOrigins: []string{"*"}, })) - e.GET("/ping_with_auth", auth.Authorize(bh.PingWithAuth, allUsers...)) - e.GET("/ping", bh.Ping) + e.GET("/ping_with_auth", auth.Authorize(bh.HandlePingWithAuth, allUsers...)) + e.GET("/ping", bh.HandlePing) // object content e.GET("/object/metadata", auth.Authorize(bh.HandleGetMetaData, allUsers...)) @@ -102,24 +102,23 @@ func main() { // prefix e.GET("/prefix/list", auth.Authorize(bh.HandleListByPrefix, allUsers...)) e.GET("/prefix/list_with_details", auth.Authorize(bh.HandleListByPrefixWithDetail, allUsers...)) - // e.GET("/prefix/download", auth.Authorize(bh.HandleGetPresignedURLMultiObj, allUsers...)) e.GET("/prefix/download/script", auth.Authorize(bh.HandleGenerateDownloadScript, allUsers...)) e.PUT("/prefix/move", auth.Authorize(bh.HandleMovePrefix, admin...)) e.DELETE("/prefix/delete", auth.Authorize(bh.HandleDeletePrefix, admin...)) e.GET("/prefix/size", auth.Authorize(bh.HandleGetSize, allUsers...)) - // universal e.DELETE("/delete_keys", auth.Authorize(bh.HandleDeleteObjectsByList, admin...)) - // multi-bucket e.GET("/list_buckets", auth.Authorize(bh.HandleListBuckets, allUsers...)) + //auth + e.GET("/check_user_permission", auth.Authorize(bh.HandleCheckS3UserPermission, writers...)) + + //deprecated endpoints (code can be found in /utils/deprecated.txt) + // e.GET("/prefix/download", auth.Authorize(bh.HandleGetPresignedURLMultiObj, allUsers...)) // multi-bucket -- not implemented // e.PUT("/object/cross-bucket/copy", auth.Authorize(bh., writers...)) // e.PUT("/prefix/cross-bucket/copy", auth.Authorize(bh., writers...)) - //auth - e.GET("/check_user_permission", auth.Authorize(bh.HandleCheckS3UserPermission, writers...)) - // Start server go func() { log.Info("server starting on port: ", os.Getenv("S3API_SERVICE_PORT")) diff --git a/utils/deprecated.txt b/utils/deprecated.txt new file mode 100644 index 0000000..7a26978 --- /dev/null +++ b/utils/deprecated.txt @@ -0,0 +1,332 @@ +deprecated files that might be used in teh future: + + +//function that will get the most recently uploaded file in a prefix +func (s3Ctrl *S3Controller) getMostRecentModTime(bucket, prefix string, permissions []string, fullAccess bool) (time.Time, error) { + // Initialize a time variable to store the most recent modification time + var mostRecent time.Time + + // Call GetList to retrieve the list of objects with the specified prefix + response, err := s3Ctrl.GetList(bucket, prefix, false) + if err != nil { + return time.Time{}, err + } + // Iterate over the returned objects to find the most recent modification time + for _, item := range response.Contents { + if item.LastModified != nil && item.LastModified.After(mostRecent) { + mostRecent = *item.LastModified + } + } + return mostRecent, nil +} + + +//buckets.go: +func (bh *BlobHandler) HandleCreateBucket(c echo.Context) error { + bucketName := c.QueryParam("name") + + if bucketName == "" { + err := fmt.Errorf("request must include a `name` parameter") + log.Info("HandleCreateBucket: " + err.Error()) + return c.JSON(http.StatusBadRequest, err.Error()) + } + + // Check if the bucket already exists + buckets, err := bh.listBuckets() + if err != nil { + log.Info("HandleCreateBucket: Error listing buckets:", err.Error()) + return c.JSON(http.StatusInternalServerError, err.Error()) + } + + for _, b := range buckets.Buckets { + if aws.StringValue(b.Name) == bucketName { + err := fmt.Errorf("bucket with the name `%s` already exists", bucketName) + log.Info("HandleCreateBucket: " + err.Error()) + return c.JSON(http.StatusConflict, err.Error()) + } + } + + // Create the S3 bucket + err = bh.createBucket(bucketName) + if err != nil { + log.Info("HandleCreateBucket: Error creating bucket:", err.Error()) + return c.JSON(http.StatusInternalServerError, err.Error()) + } + + log.Info("HandleCreateBucket: Successfully created bucket:", bucketName) + return c.JSON(http.StatusOK, "Bucket created successfully") +} + +func (bh *BlobHandler) HandleDeleteBucket(c echo.Context) error { + bucketName := c.QueryParam("name") + + if bucketName == "" { + err := fmt.Errorf("request must include a `name` parameter") + log.Info("HandleDeleteBucket: " + err.Error()) + return c.JSON(http.StatusBadRequest, err.Error()) + } + + // Delete the S3 bucket + err := bh.deleteBucket(bucketName) + if err != nil { + log.Info("HandleDeleteBucket: Error deleting bucket:", err.Error()) + return c.JSON(http.StatusInternalServerError, err.Error()) + } + + log.Info("HandleDeleteBucket: Successfully deleted bucket:", bucketName) + return c.JSON(http.StatusOK, "Bucket deleted successfully") +} + +func (bh *BlobHandler) HandleGetBucketACL(c echo.Context) error { + bucketName := c.QueryParam("name") + + if bucketName == "" { + err := fmt.Errorf("request must include a `name` parameter") + log.Info("HandleGetBucketACL: " + err.Error()) + return c.JSON(http.StatusBadRequest, err.Error()) + } + + // Get the bucket ACL + acl, err := bh.getBucketACL(bucketName) + if err != nil { + log.Info("HandleGetBucketACL: Error getting bucket ACL:", err.Error()) + return c.JSON(http.StatusInternalServerError, err.Error()) + } + + log.Info("HandleGetBucketACL: Successfully retrieved ACL for bucket:", bucketName) + return c.JSON(http.StatusOK, acl) +} + +func (bh *BlobHandler) createBucket(bucketName string) error { + // Set up input parameters for the CreateBucket API + input := &s3.CreateBucketInput{ + Bucket: aws.String(bucketName), + } + + // Create the bucket + _, err := bh.S3Svc.CreateBucket(input) + if err != nil { + return err + } + + return nil +} + +// deleteBucket deletes the specified S3 bucket. +func (bh *BlobHandler) deleteBucket(bucketName string) error { + // Set up input parameters for the DeleteBucket API + input := &s3.DeleteBucketInput{ + Bucket: aws.String(bucketName), + } + + // Delete the bucket + _, err := bh.S3Svc.DeleteBucket(input) + if err != nil { + return err + } + + return nil +} + +// getBucketACL retrieves the ACL (Access Control List) for the specified bucket. +func (bh *BlobHandler) getBucketACL(bucketName string) (*s3.GetBucketAclOutput, error) { + // Set up input parameters for the GetBucketAcl API + input := &s3.GetBucketAclInput{ + Bucket: aws.String(bucketName), + } + + // Get the bucket ACL + result, err := bh.S3Svc.GetBucketAcl(input) + if err != nil { + return nil, err + } + + return result, nil + } + + +/presigned_url.go +func (bh *BlobHandler) HandleGetPresignedURLMultiObj(c echo.Context) error { + prefix := c.QueryParam("prefix") + if prefix == "" { + errMsg := fmt.Errorf("request must include a `prefix` parameter") + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + } + + bucket := c.QueryParam("bucket") + s3Ctrl, err := bh.GetController(bucket) + if err != nil { + errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + } + + if !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" + } + + response, err := s3Ctrl.GetList(bucket, prefix, false) + if err != nil { + errMsg := fmt.Errorf("error getting list: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + if *response.KeyCount == 0 { + errMsg := fmt.Errorf("the specified prefix %s does not exist in S3", prefix) + log.Error(errMsg.Error()) + return c.JSON(http.StatusNotFound, errMsg.Error()) + } + //check if size is below 5GB + var size, fileCount uint64 + err = GetListSize(response, &size, &fileCount) + if err != nil { + errMsg := fmt.Errorf("error getting size: %s", err.Error()) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + + limit := uint64(1024 * 1024 * 1024 * bh.Config.DefaultZipDownloadSizeLimit) + if size >= limit { + errMsg := fmt.Errorf("request entity is larger than %v GB, current prefix size is: %v GB", bh.Config.DefaultZipDownloadSizeLimit, float64(size)/(1024*1024*1024)) + log.Error(errMsg.Error()) + return c.JSON(http.StatusRequestEntityTooLarge, errMsg.Error()) + } + + filename := fmt.Sprintf("%s.%s", strings.TrimSuffix(prefix, "/"), "tar.gz") + outputFile := filepath.Join(bh.Config.DefaultTempPrefix, filename) + + // Check if the tar.gz file already exists in S3 + tarFileResponse, err := s3Ctrl.GetList(bucket, outputFile, false) + if err != nil { + errMsg := fmt.Errorf("error checking if tar.gz file exists in S3: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + + if len(tarFileResponse.Contents) > 0 { + log.Debug("the prefix was once downloaded, checking if it is outdated") + // Tar.gz file exists, now compare modification dates + mostRecentModTime, err := s3Ctrl.getMostRecentModTime(bucket, prefix) + if err != nil { + errMsg := fmt.Errorf("error getting most recent modification time: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + + if tarFileResponse.Contents[0].LastModified.After(mostRecentModTime) { + log.Debug("folder already downloaded and is current") + + // Existing tar.gz file is up-to-date, return pre-signed URL + href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) + if err != nil { + errMsg := fmt.Errorf("error getting presigned: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + return c.JSON(http.StatusOK, string(href)) + } + log.Debug("folder already downloaded but is outdated starting the zip process") + } + + err = s3Ctrl.tarS3Files(response, bucket, outputFile, prefix) + if err != nil { + errMsg := fmt.Errorf("error tarring S3 files: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + + href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, bh.Config.DefaultDownloadPresignedUrlExpiration) + if err != nil { + errMsg := fmt.Errorf("error getting presigned URL: %s", err) + log.Error(errMsg.Error()) + return c.JSON(http.StatusInternalServerError, errMsg.Error()) + } + + log.Info("successfully generated presigned URL for prefix:", prefix) + return c.JSON(http.StatusOK, string(href)) +} + +func (s3Ctrl *S3Controller) tarS3Files(r *s3.ListObjectsV2Output, bucket string, outputFile string, prefix string) (err error) { + uploader := s3manager.NewUploader(s3Ctrl.Sess) + pr, pw := io.Pipe() + + gzipWriter := gzip.NewWriter(pw) + tarWriter := tar.NewWriter(gzipWriter) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + log.Debug("start writing files to:", outputFile) + _, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(outputFile), + Body: pr, + }) + if err != nil { + log.Errorf("failed to upload tar.gz file to S3: %s", err) + return + } + log.Debug("completed writing files to:", outputFile) + }() + + for _, item := range r.Contents { + filePath := filepath.Join(strings.TrimPrefix(aws.StringValue(item.Key), prefix)) + copyObj := aws.StringValue(item.Key) + log.Debugf("copying %s to %s", copyObj, outputFile) + + getResp, err := s3Ctrl.S3Svc.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(copyObj), + }) + if err != nil { + log.Errorf("failed to download file: %s, error: %s", copyObj, err) + return err + } + defer getResp.Body.Close() + + header := &tar.Header{ + Name: filePath, + Size: *getResp.ContentLength, + Mode: int64(0644), + } + + err = tarWriter.WriteHeader(header) + if err != nil { + log.Errorf("failed to write tar header for file: %s, error: %s", copyObj, err) + return err + } + + _, err = io.Copy(tarWriter, getResp.Body) + if err != nil { + log.Errorf("failed to write file content to tar for file: %s, error: %s", copyObj, err) + return err + } + log.Debugf("completed copying: %s", copyObj) + } + + err = tarWriter.Close() + if err != nil { + log.Error("tar close failure:", err) + return err + } + + err = gzipWriter.Close() + if err != nil { + log.Error("gzip close failure:", err) + return err + } + + err = pw.Close() + if err != nil { + log.Error("pipe writer close failure:", err) + return err + } + + wg.Wait() + + log.Debug("completed tar of file successfully") + return nil +} \ No newline at end of file From 29d0e8a7c464f204381701e371fd1920aa83219c Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Tue, 18 Jun 2024 23:52:27 -0400 Subject: [PATCH 09/22] use the error handeling package --- blobstore/blobhandler.go | 18 +- blobstore/blobstore.go | 38 ++- blobstore/buckets.go | 29 ++- blobstore/config.go | 2 +- blobstore/creds.go | 5 +- blobstore/delete.go | 202 ++++++++------- blobstore/fgac.go | 76 +++--- blobstore/list.go | 220 ++++++++--------- blobstore/metadata.go | 209 ++++++++-------- blobstore/move.go | 155 ++++++------ blobstore/object_content.go | 49 ++-- blobstore/ping.go | 12 +- blobstore/presigned_url.go | 98 ++++---- blobstore/upload.go | 436 +++++++++++++++++---------------- configberry/errors_handling.go | 18 +- documentation/general.md | 33 ++- 16 files changed, 830 insertions(+), 770 deletions(-) diff --git a/blobstore/blobhandler.go b/blobstore/blobhandler.go index 3d5cd47..f23fef5 100644 --- a/blobstore/blobhandler.go +++ b/blobstore/blobhandler.go @@ -1,6 +1,7 @@ package blobstore import ( + "errors" "fmt" "os" "strconv" @@ -69,12 +70,12 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { } s3Mock, err := strconv.Atoi(s3MockStr) if err != nil { - errMsg := fmt.Errorf("could not convert S3_MOCK env variable to integer: %v", err) + errMsg := fmt.Errorf("could not convert `S3_MOCK` env variable to integer: %v", err) return &config, errMsg } // Check if the S3_MOCK environment variable is set to "true" if s3Mock == 1 { - log.Info("Using MinIO") + log.Info("using MinIO") // Load MinIO credentials from environment mc := newMinioConfig() @@ -99,12 +100,12 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { // Using AWS S3 // Load AWS credentials from the provided .env.json file - log.Debug("looking for .env.json") + log.Debug("looking for `.env.json`") awsConfig, err := newAWSConfig(envJson) // Check if loading AWS credentials from .env.json failed if err != nil { - errMsg := fmt.Errorf("env.json credentials extraction failed, please check `.env.json.example` for reference on formatting, %s", err.Error()) + errMsg := fmt.Errorf("`env.json` credentials extraction failed, please check `.env.json.example` for reference on formatting, %s", err.Error()) return &config, errMsg } @@ -123,7 +124,6 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { s3SVC, sess, err := ac.aWSSessionManager() if err != nil { errMsg := fmt.Errorf("failed to create AWS session: %s", err.Error()) - log.Error(errMsg.Error()) return &config, errMsg } @@ -162,7 +162,7 @@ func NewBlobHandler(envJson string, authLvl int) (*BlobHandler, error) { for bucket := range allowedBucketsMap { missingBuckets = append(missingBuckets, bucket) } - errMsg := fmt.Errorf("some buckets in the allow list were not found: %v", missingBuckets) + errMsg := fmt.Errorf("some buckets in the `bucket_allow_list` were not found: %v", missingBuckets) return &config, errMsg } @@ -189,7 +189,7 @@ func (s3Ctrl *S3Controller) getBucketRegion(bucketName string) (string, error) { func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { if bucket == "" { - err := fmt.Errorf("parameter `bucket` is required") + err := errors.New("parameter `bucket` is required") return nil, err } var s3Ctrl S3Controller @@ -207,7 +207,7 @@ func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { // Check if the region is the same. If not, update the session and client currentRegion := *s3Ctrl.Sess.Config.Region if currentRegion != region { - log.Debugf("current region: %s region of bucket: %s, attempting to create a new controller", currentRegion, region) + log.Debugf("current region: %s, region of bucket: %s, attempting to create a new controller", currentRegion, region) newSession, err := session.NewSession(&aws.Config{ Region: aws.String(region), @@ -228,6 +228,6 @@ func (bh *BlobHandler) GetController(bucket string) (*S3Controller, error) { } } } - errMsg := fmt.Errorf("bucket '%s' not found", bucket) + errMsg := fmt.Errorf("`bucket` '%s' not found", bucket) return &s3Ctrl, errMsg } diff --git a/blobstore/blobstore.go b/blobstore/blobstore.go index f675692..2aee3b3 100644 --- a/blobstore/blobstore.go +++ b/blobstore/blobstore.go @@ -2,11 +2,13 @@ package blobstore import ( "encoding/json" + "errors" "fmt" - "net/http" "os" "strings" + "github.com/Dewberry/s3api/configberry" + "github.com/aws/aws-sdk-go/service/s3" log "github.com/sirupsen/logrus" ) @@ -76,46 +78,48 @@ func isPermittedPrefix(bucket, prefix string, permissions []string) bool { // checkAndAdjustPrefix checks if the prefix is an object and adjusts the prefix accordingly. // Returns the adjusted prefix, an error message (if any), and the HTTP status code. -func checkAndAdjustPrefix(s3Ctrl *S3Controller, bucket, prefix string) (string, string, int) { +// Methods defined on `S3Ctrl` that return return a ConfigBerry `AppError` +func (s3Ctrl *S3Controller) checkAndAdjustPrefix(bucket, prefix string) (string, *configberry.AppError) { // As of 6/12/24, unsure why ./ is included here, may be needed for an edge case, but could also cause problems if prefix != "" && prefix != "./" && prefix != "/" { isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { - return "", fmt.Sprintf("error checking if object exists: %s", err.Error()), http.StatusInternalServerError + fmt.Println(err) + return "", configberry.HandleAWSError(err, "error checking if object exists") } if isObject { objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) if err != nil { - return "", fmt.Sprintf("error checking for object's metadata: %s", err.Error()), http.StatusInternalServerError + return "", configberry.HandleAWSError(err, "error checking for object's metadata") } // This is because AWS considers empty prefixes with a .keep as an object, so we ignore and log if *objMeta.ContentLength == 0 { log.Infof("detected a zero byte directory marker within prefix: %s", prefix) } else { - return "", fmt.Sprintf("`%s` is an object, not a prefix. Please see options for keys or pass a prefix", prefix), http.StatusTeapot + return "", configberry.NewAppError(configberry.TeapotError, fmt.Sprintf("`%s` is an object, not a prefix", prefix), nil) } } prefix = strings.Trim(prefix, "/") + "/" } - return prefix, "", http.StatusOK + return prefix, nil } func validateEnvJSON(filePath string) error { // Read the contents of the .env.json file jsonData, err := os.ReadFile(filePath) if err != nil { - return fmt.Errorf("error reading .env.json: %s", err.Error()) + return fmt.Errorf("error reading `.env.json`: %s", err.Error()) } // Parse the JSON data into the AWSConfig struct var awsConfig AWSConfig if err := json.Unmarshal(jsonData, &awsConfig); err != nil { - return fmt.Errorf("error parsing .env.json: %s", err.Error()) + return fmt.Errorf("error parsing `.env.json`: %s", err.Error()) } // Check if there is at least one account defined if len(awsConfig.Accounts) == 0 { - return fmt.Errorf("no AWS accounts defined in .env.json") + return errors.New("no AWS accounts defined in `.env.json`") } // Check if each account has the required fields @@ -166,3 +170,19 @@ func newMinioConfig() MinioConfig { mc.SecretAccessKey = os.Getenv("MINIO_SECRET_ACCESS_KEY") return mc } + +func GetListSize(page *s3.ListObjectsV2Output, totalSize *uint64, fileCount *uint64) error { + if page == nil { + return errors.New("input page is nil") + } + + for _, file := range page.Contents { + if file.Size == nil { + return errors.New("file size is nil") + } + *totalSize += uint64(*file.Size) + *fileCount++ + } + + return nil +} diff --git a/blobstore/buckets.go b/blobstore/buckets.go index 17745b3..3082aa4 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -3,10 +3,9 @@ package blobstore // Not implemented import ( - "fmt" - "net/http" "sort" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" @@ -28,8 +27,7 @@ func (s3Ctrl *S3Controller) ListBuckets() (*s3.ListBucketsOutput, error) { // Retrieve the list of buckets result, err = s3Ctrl.S3Svc.ListBuckets(input) if err != nil { - errMsg := fmt.Errorf("failed to call ListBuckets: %s", err.Error()) - return nil, errMsg + return nil, err } return result, nil } @@ -41,18 +39,19 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { defer bh.Mu.Unlock() // Check user's overall read access level - _, fullAccess, err := bh.getUserS3ReadListPermission(c, "") - if err != nil { - return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) + _, fullAccess, appErr := bh.getUserS3ReadListPermission(c, "") + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } for _, controller := range bh.S3Controllers { if bh.AllowAllBuckets { result, err := controller.ListBuckets() if err != nil { - errMsg := fmt.Errorf("error returning list of buckets, error: %s", err) - log.Error(errMsg) - return c.JSON(http.StatusInternalServerError, errMsg) + appErr := configberry.HandleAWSError(err, "error retunring list of buckets") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } var mostRecentBucketList []string for _, b := range result.Buckets { @@ -67,9 +66,10 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { for i, bucket := range controller.Buckets { canRead := fullAccess if !fullAccess { - permissions, _, err := bh.getUserS3ReadListPermission(c, bucket) - if err != nil { - return c.JSON(http.StatusInternalServerError, fmt.Errorf("error fetching user permissions: %s", err.Error())) + permissions, _, appErr := bh.getUserS3ReadListPermission(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } canRead = len(permissions) > 0 } @@ -90,6 +90,5 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { }) log.Info("Successfully retrieved list of buckets") - - return c.JSON(http.StatusOK, allBuckets) + return configberry.HandleSuccessfulResponse(c, allBuckets) } diff --git a/blobstore/config.go b/blobstore/config.go index a97cb51..9574eb1 100644 --- a/blobstore/config.go +++ b/blobstore/config.go @@ -46,7 +46,7 @@ func getIntEnvOrDefault(envKey string, defaultValue int) int { } value, err := strconv.Atoi(valueStr) if err != nil { - log.Debugf("Error parsing %s, defaulting to %v: %v", envKey, defaultValue, err) + log.Errorf("error parsing %s, defaulting to %v: %v", envKey, defaultValue, err) return defaultValue } return value diff --git a/blobstore/creds.go b/blobstore/creds.go index f0fd50c..0ecade9 100644 --- a/blobstore/creds.go +++ b/blobstore/creds.go @@ -2,7 +2,6 @@ package blobstore import ( "fmt" - "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -51,7 +50,7 @@ func (mc MinioConfig) validateMinioConfig() error { } if len(missingFields) > 0 { - return fmt.Errorf("missing fields: %s", strings.Join(missingFields, ", ")) + return fmt.Errorf("missing fields: %+q", missingFields) } return nil } @@ -79,7 +78,7 @@ func (mc MinioConfig) minIOSessionManager() (*s3.S3, *session.Session, error) { Bucket: aws.String(mc.Bucket), }) if err != nil { - log.Errorf("Error creating bucket: %s", err.Error()) + log.Errorf("error creating bucket: %s", err.Error()) return nil, nil, nil } log.Info("Bucket created successfully") diff --git a/blobstore/delete.go b/blobstore/delete.go index 47fafe1..4373070 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -2,11 +2,12 @@ package blobstore import ( "fmt" - "net/http" "strings" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/go-playground/validator" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" ) @@ -30,26 +31,33 @@ func (s3Ctrl *S3Controller) DeleteList(page *s3.ListObjectsV2Output, bucket stri }, }) if err != nil { - return fmt.Errorf("error deleting objects: %v", err) + return err } return nil } -func (s3Ctrl *S3Controller) RecursivelyDeleteObjects(bucket, prefix string) error { - var objectsFound bool - err := s3Ctrl.GetListWithCallBack(bucket, prefix, false, func(page *s3.ListObjectsV2Output) error { - if len(page.Contents) > 0 { - objectsFound = true +func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { + objects := make([]*s3.ObjectIdentifier, 0, len(key)) + for _, p := range key { + s3Path := strings.TrimPrefix(p, "/") + object := &s3.ObjectIdentifier{ + Key: aws.String(s3Path), } - return s3Ctrl.DeleteList(page, bucket) - }) - if err != nil { - return fmt.Errorf("error processing objects for deletion: %v", err) + objects = append(objects, object) } - if !objectsFound { - return fmt.Errorf("prefix not found") + input := &s3.DeleteObjectsInput{ + Bucket: aws.String(bucket), + Delete: &s3.Delete{ + Objects: objects, + Quiet: aws.Bool(false), + }, + } + + _, err := s3Ctrl.S3Svc.DeleteObjects(input) + if err != nil { + return err } return nil } @@ -61,29 +69,30 @@ func (bh *BlobHandler) HandleDeleteObject(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("parameter `bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("parameter `key` is required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } // If the key is not a folder, proceed with deleting a single object keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - errMsg := fmt.Errorf("error checking if object exists: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.HandleAWSError(err, "error checking if object exists") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + if !keyExist { - errMsg := fmt.Errorf("object %s not found", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("object %s not found", key), nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } deleteInput := &s3.DeleteObjectInput{ @@ -93,97 +102,112 @@ func (bh *BlobHandler) HandleDeleteObject(c echo.Context) error { _, err = s3Ctrl.S3Svc.DeleteObject(deleteInput) if err != nil { - errMsg := fmt.Errorf("error deleting object. %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error deleting object. %s", err.Error()), nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("successfully deleted file with key: %s", key) - return c.JSON(http.StatusOK, fmt.Sprintf("Successfully deleted object: %s", key)) + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully deleted object: %s", key)) } func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { + const maxRetries = 3 + bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("parameter `bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + prefix := c.QueryParam("prefix") if prefix == "" { - errMsg := fmt.Errorf("parameter `prefix` is required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `prefix` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + if !strings.HasSuffix(prefix, "/") { prefix = prefix + "/" } - err = s3Ctrl.RecursivelyDeleteObjects(bucket, prefix) - if err != nil { - if strings.Contains(err.Error(), "prefix not found") { - errMsg := fmt.Errorf("no objects found with prefix: %s", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) + + var objectsFound bool + + err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, func(page *s3.ListObjectsV2Output) error { + if len(page.Contents) > 0 { + objectsFound = true } - errMsg := fmt.Errorf("error deleting objects: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - log.Info("Successfully deleted prefix and its contents for prefix:", prefix) - return c.JSON(http.StatusOK, "Successfully deleted prefix and its contents") -} -func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { - objects := make([]*s3.ObjectIdentifier, 0, len(key)) - for _, p := range key { - s3Path := strings.TrimPrefix(p, "/") - object := &s3.ObjectIdentifier{ - Key: aws.String(s3Path), + if len(page.Contents) == 0 { + return nil // No objects to delete in this page } - objects = append(objects, object) - } - input := &s3.DeleteObjectsInput{ - Bucket: aws.String(bucket), - Delete: &s3.Delete{ - Objects: objects, - Quiet: aws.Bool(false), - }, - } + // Perform the delete operation for the current page + for retries := 0; retries < maxRetries; retries++ { + deleteErr := s3Ctrl.DeleteList(page, bucket) + if deleteErr == nil { + // Successfully deleted, break out of the retry loop + break + } + if retries == maxRetries-1 { + // Log the error and return if we've reached the max retries + log.Errorf("failed to delete objects in page after %d retries: %v", maxRetries, deleteErr) + return deleteErr + } + // Log retry attempt + log.Warnf("retrying delete for page, attempt %d/%d", retries+1, maxRetries) + } + + return nil + }) - _, err := s3Ctrl.S3Svc.DeleteObjects(input) if err != nil { - return fmt.Errorf("error deleting objects: %s", err.Error()) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("failed to delete objects with prefix %s", prefix)) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - return nil + + if !objectsFound { + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("prefix %s not found", prefix), nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + log.Infof("Successfully deleted prefix and its contents for prefix: %s", prefix) + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully deleted prefix and its contents for prefix: %s", prefix)) } func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { + // Define the validator + validate := validator.New() + // Parse the list of objects from the request body type DeleteRequest struct { - Keys []string `json:"keys"` + Keys []string `json:"keys" validate:"required,min=1,dive,required"` } var deleteRequest DeleteRequest if err := c.Bind(&deleteRequest); err != nil { - errMsg := fmt.Errorf("error parsing request body: %s" + err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusBadRequest, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "error parsing request body", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - // Ensure there are keys to delete - if len(deleteRequest.Keys) == 0 { - errMsg := fmt.Errorf("no keys to delete. Please provide 'keys' in the request body") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + // Validate the request + if err := validate.Struct(deleteRequest); err != nil { + appErr := configberry.HandleStructValidationErrors(err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } // Prepare the keys for deletion @@ -195,14 +219,14 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { // Check if the key exists before appending it to the keys list keyExists, err := s3Ctrl.KeyExists(bucket, s3Path) if err != nil { - errMsg := fmt.Errorf("error checking if object exists. %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg) + appErr := configberry.HandleAWSError(err, "error checking if object exists") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if !keyExists { - errMsg := fmt.Errorf("object %s not found", s3Path) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("object %s not found", s3Path), nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } keys = append(keys, *key) @@ -211,11 +235,11 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { // Delete the objects using the deleteKeys function err = s3Ctrl.DeleteKeys(bucket, keys) if err != nil { - errMsg := fmt.Errorf("error deleting objects. %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg) + appErr := configberry.NewAppError(configberry.InternalServerError, "error deleting objects", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - log.Info("Successfully deleted objects:", deleteRequest.Keys) - return c.JSON(http.StatusOK, "Successfully deleted objects") + log.Infof("Successfully deleted objects: %v", deleteRequest.Keys) + return configberry.HandleSuccessfulResponse(c, "Successfully deleted objects") } diff --git a/blobstore/fgac.go b/blobstore/fgac.go index 0b4db7d..7fc74fc 100644 --- a/blobstore/fgac.go +++ b/blobstore/fgac.go @@ -2,11 +2,11 @@ package blobstore import ( "fmt" - "net/http" "os" "strings" "github.com/Dewberry/s3api/auth" + "github.com/Dewberry/s3api/configberry" "github.com/Dewberry/s3api/utils" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" @@ -14,35 +14,29 @@ import ( //Utility Methods for Endpoint Handlers -func (bh *BlobHandler) getS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, int, error) { - permissions, fullAccess, err := bh.getUserS3ReadListPermission(c, bucket) - if err != nil { - //TEMP solution before error library is implimented and string check ups become redundant - httpStatus := http.StatusInternalServerError - if strings.Contains(err.Error(), "this endpoint requires authentication information that is unavailable when authorization is disabled.") { - httpStatus = http.StatusForbidden - } - return nil, false, httpStatus, fmt.Errorf("error fetching user permissions: %s", err.Error()) +func (bh *BlobHandler) getS3ReadPermissions(c echo.Context, bucket string) ([]string, bool, *configberry.AppError) { + permissions, fullAccess, appError := bh.getUserS3ReadListPermission(c, bucket) + if appError != nil { + return nil, false, appError } if !fullAccess && len(permissions) == 0 { - return nil, false, http.StatusForbidden, fmt.Errorf("user does not have permission to read the %s bucket", bucket) + return nil, false, configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s bucket", bucket), nil) } - return permissions, fullAccess, http.StatusOK, nil + return permissions, fullAccess, nil } -func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string) ([]string, bool, error) { +func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string) ([]string, bool, *configberry.AppError) { permissions := make([]string, 0) if bh.Config.AuthLevel > 0 { initAuth := os.Getenv("INIT_AUTH") if initAuth == "0" { - errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - return permissions, false, errMsg + return permissions, false, configberry.NewAppError(configberry.ForbiddenError, "this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality", nil) } fullAccess := false claims, ok := c.Get("claims").(*auth.Claims) if !ok { - return permissions, fullAccess, fmt.Errorf("could not get claims from request context") + return permissions, fullAccess, configberry.NewAppError(configberry.InternalServerError, "could not get claims from request context", nil) } roles := claims.RealmAccess["roles"] @@ -59,7 +53,7 @@ func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string ue := claims.Email permissions, err := bh.DB.GetUserAccessiblePrefixes(ue, bucket, []string{"read", "write"}) if err != nil { - return permissions, fullAccess, err + return permissions, fullAccess, configberry.HandleSQLError(err, "error getting common prefix that the user can read and write to") } return permissions, fullAccess, nil } @@ -67,16 +61,15 @@ func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string return permissions, true, nil } -func (bh *BlobHandler) validateUserAccessToPrefix(c echo.Context, bucket, prefix string, permissions []string) (int, error) { +func (bh *BlobHandler) validateUserAccessToPrefix(c echo.Context, bucket, prefix string, permissions []string) *configberry.AppError { if bh.Config.AuthLevel > 0 { initAuth := os.Getenv("INIT_AUTH") if initAuth == "0" { - errMsg := fmt.Errorf("this requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - return http.StatusForbidden, errMsg + return configberry.NewAppError(configberry.ForbiddenError, "this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality", nil) } claims, ok := c.Get("claims").(*auth.Claims) if !ok { - return http.StatusInternalServerError, fmt.Errorf("could not get claims from request context") + return configberry.NewAppError(configberry.InternalServerError, "could not get claims from request context", nil) } roles := claims.RealmAccess["roles"] ue := claims.Email @@ -91,40 +84,43 @@ func (bh *BlobHandler) validateUserAccessToPrefix(c echo.Context, bucket, prefix // We assume if someone is limited_writer, they should never be admin or super_writer if isLimitedWriter { if !bh.DB.CheckUserPermission(ue, bucket, prefix, permissions) { - return http.StatusForbidden, fmt.Errorf("forbidden") + return configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have %+q access to %s", permissions, prefix), nil) } } } - return 0, nil + return nil } func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { if bh.Config.AuthLevel == 0 { log.Info("Checked user permissions successfully") - return c.JSON(http.StatusOK, true) + return configberry.HandleSuccessfulResponse(c, true) } initAuth := os.Getenv("INIT_AUTH") if initAuth == "0" { - errMsg := fmt.Errorf("this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, "this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality", nil) + log.Error(configberry.LogErrorFormatter(appErr, false)) + return configberry.HandleErrorResponse(c, appErr) + } + + params := map[string]string{ + "prefix": c.QueryParam("prefix"), + "bucket": c.QueryParam("bucket"), + "operation": c.QueryParam("operation"), + } + if appErr := configberry.CheckRequiredParams(params); appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, false)) + return configberry.HandleErrorResponse(c, appErr) } - prefix := c.QueryParam("prefix") - bucket := c.QueryParam("bucket") - operation := c.QueryParam("operation") claims, ok := c.Get("claims").(*auth.Claims) if !ok { - errMsg := fmt.Errorf("could not get claims from request context") - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "could not get claims from request context", nil) + log.Error(configberry.LogErrorFormatter(appErr, false)) + return configberry.HandleErrorResponse(c, appErr) } userEmail := claims.Email - if operation == "" || prefix == "" || bucket == "" { - errMsg := fmt.Errorf("`prefix`, `operation` and 'bucket are required params") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - isAllowed := bh.DB.CheckUserPermission(userEmail, bucket, prefix, []string{operation}) + + isAllowed := bh.DB.CheckUserPermission(userEmail, params["bucket"], params["prefix"], []string{params["operation"]}) log.Info("Checked user permissions successfully") - return c.JSON(http.StatusOK, isAllowed) + return configberry.HandleSuccessfulResponse(c, isAllowed) } diff --git a/blobstore/list.go b/blobstore/list.go index 40e3a22..3e3d2b1 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -1,13 +1,12 @@ package blobstore import ( - "fmt" - "net/http" "path/filepath" "strconv" "strings" "time" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" @@ -27,22 +26,87 @@ type ListResult struct { ModifiedBy string `json:"modified_by"` } +// GetList retrieves a list of objects in the specified S3 bucket with the given prefix. +// If delimiter is set to true, it will search for any objects within the prefix provided. +// If no objects are found, it will return null even if there were prefixes within the user-provided prefix. +// If delimiter is set to false, it will look for all prefixes that start with the user-provided prefix. +func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3.ListObjectsV2Output, error) { + // Set up input parameters for the ListObjectsV2 API + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + MaxKeys: aws.Int64(1000), // Set the desired maximum keys per request + } + if delimiter { + input.SetDelimiter("/") + } + // Retrieve the list of objects in the bucket with the specified prefix + var response *s3.ListObjectsV2Output + err := s3Ctrl.S3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, _ bool) bool { + if response == nil { + response = page + } else { + response.Contents = append(response.Contents, page.Contents...) + } + + // Check if there are more pages to retrieve + if *page.IsTruncated { + // Set the continuation token for the next request + input.ContinuationToken = page.NextContinuationToken + return true // Continue to the next page + } + + return false // Stop pagination + }) + if err != nil { + return nil, err + } + + return response, nil +} + +// GetListWithCallBack is the same as GetList, except instead of returning the entire list at once, it allows processing page by page. +// This method is safer than GetList as it avoids memory overload for large datasets by processing data on the go. +func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter bool, processPage func(*s3.ListObjectsV2Output) error) error { + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + MaxKeys: aws.Int64(1000), // Adjust the MaxKeys as needed + } + + if delimiter { + input.SetDelimiter("/") + } + + var lastError error // Variable to capture the last error + + // Iterate over the pages of results + err := s3Ctrl.S3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, _ bool) bool { + lastError = processPage(page) + return lastError == nil && *page.IsTruncated // Continue if no error and more pages are available + }) + + if lastError != nil { + return lastError // Return the last error encountered in the processPage function + } + return err // Return any errors encountered in the pagination process +} + // HandleListByPrefix handles the API endpoint for listing objects by prefix in an S3 bucket. func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix := c.QueryParam("prefix") - bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - adjustedPrefix, errMsg, statusCode := checkAndAdjustPrefix(s3Ctrl, bucket, prefix) - if errMsg != "" { - log.Error(errMsg) - return c.JSON(statusCode, errMsg) + adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, prefix) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } prefix = adjustedPrefix @@ -51,9 +115,9 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { if delimiterParam != "" { delimiter, err = strconv.ParseBool(delimiterParam) if err != nil { - errMsg := fmt.Errorf("error parsing `delimiter` param: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "error parsing `delimiter` param", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } } @@ -62,56 +126,57 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { prefix = prefix + "/" } - var result []string - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + var results []string + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) if fullAccess || isPermittedPrefix(bucket, *cp.Prefix, permissions) { - result = append(result, aws.StringValue(cp.Prefix)) + results = append(results, aws.StringValue(cp.Prefix)) } } for _, object := range page.Contents { // Handle files if fullAccess || isPermittedPrefix(bucket, *object.Key, permissions) { - result = append(result, aws.StringValue(object.Key)) + results = append(results, aws.StringValue(object.Key)) } } return nil } + err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - errMsg := fmt.Errorf("error processing objects: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Info("Successfully retrieved list by prefix:", prefix) - return c.JSON(http.StatusOK, result) + return configberry.HandleSuccessfulResponse(c, results) } // HandleListByPrefixWithDetail retrieves a detailed list of objects in the specified S3 bucket with the given prefix. func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { prefix := c.QueryParam("prefix") - bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - adjustedPrefix, errMsg, statusCode := checkAndAdjustPrefix(s3Ctrl, bucket, prefix) - if errMsg != "" { - log.Error(errMsg) - return c.JSON(statusCode, errMsg) + adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, prefix) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } prefix = adjustedPrefix @@ -120,9 +185,9 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { if delimiterParam != "" { delimiter, err = strconv.ParseBool(delimiterParam) if err != nil { - errMsg := fmt.Errorf("error parsing `delimiter` param: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "error parsing `delimiter` param", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } } @@ -133,11 +198,12 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { var results []ListResult var count int - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + processPage := func(page *s3.ListObjectsV2Output) error { for _, cp := range page.CommonPrefixes { // Handle directories (common prefixes) @@ -179,77 +245,11 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - errMsg := fmt.Errorf("error processing objects: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - log.Info("Successfully retrieved detailed list by prefix:", prefix) - return c.JSON(http.StatusOK, results) -} - -// GetList retrieves a list of objects in the specified S3 bucket with the given prefix. -// If delimiter is set to true, it will search for any objects within the prefix provided. -// If no objects are found, it will return null even if there were prefixes within the user-provided prefix. -// If delimiter is set to false, it will look for all prefixes that start with the user-provided prefix. -func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3.ListObjectsV2Output, error) { - // Set up input parameters for the ListObjectsV2 API - input := &s3.ListObjectsV2Input{ - Bucket: aws.String(bucket), - Prefix: aws.String(prefix), - MaxKeys: aws.Int64(1000), // Set the desired maximum keys per request - } - if delimiter { - input.SetDelimiter("/") - } - // Retrieve the list of objects in the bucket with the specified prefix - var response *s3.ListObjectsV2Output - err := s3Ctrl.S3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, _ bool) bool { - if response == nil { - response = page - } else { - response.Contents = append(response.Contents, page.Contents...) - } - - // Check if there are more pages to retrieve - if *page.IsTruncated { - // Set the continuation token for the next request - input.ContinuationToken = page.NextContinuationToken - return true // Continue to the next page - } - - return false // Stop pagination - }) - if err != nil { - return nil, err - } - - return response, nil -} - -// GetListWithCallBack is the same as GetList, except instead of returning the entire list at once, it allows processing page by page. -// This method is safer than GetList as it avoids memory overload for large datasets by processing data on the go. -func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter bool, processPage func(*s3.ListObjectsV2Output) error) error { - input := &s3.ListObjectsV2Input{ - Bucket: aws.String(bucket), - Prefix: aws.String(prefix), - MaxKeys: aws.Int64(1000), // Adjust the MaxKeys as needed + appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - if delimiter { - input.SetDelimiter("/") - } - - var lastError error // Variable to capture the last error - - // Iterate over the pages of results - err := s3Ctrl.S3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, _ bool) bool { - lastError = processPage(page) - return lastError == nil && *page.IsTruncated // Continue if no error and more pages are available - }) - - if lastError != nil { - return lastError // Return the last error encountered in the processPage function - } - return err // Return any errors encountered in the pagination process + log.Info("Successfully retrieved list by prefix:", prefix) + return configberry.HandleSuccessfulResponse(c, results) } diff --git a/blobstore/metadata.go b/blobstore/metadata.go index d2b647c..c6543f9 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -2,8 +2,8 @@ package blobstore import ( "fmt" - "net/http" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" @@ -11,60 +11,81 @@ import ( log "github.com/sirupsen/logrus" ) -func GetListSize(page *s3.ListObjectsV2Output, totalSize *uint64, fileCount *uint64) error { - if page == nil { - return fmt.Errorf("input page is nil") +func (s3Ctrl *S3Controller) GetMetaData(bucket, key string) (*s3.HeadObjectOutput, error) { + // Set up the input parameters for the list objects operation + input := &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), } - for _, file := range page.Contents { - if file.Size == nil { - return fmt.Errorf("file size is nil") - } - *totalSize += uint64(*file.Size) - *fileCount++ + result, err := s3Ctrl.S3Svc.HeadObject(input) + if err != nil { + return nil, err } - return nil + return result, nil +} + +func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { + + _, err := s3Ctrl.S3Svc.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case "NotFound": // s3.ErrCodeNoSuchKey does not work, aws is missing this error code so we hardwire a string + return false, nil + default: + return false, err + } + } + return false, err + } + return true, nil } // HandleGetSize retrieves the total size and the number of files in the specified S3 bucket with the given prefix. func (bh *BlobHandler) HandleGetSize(c echo.Context) error { prefix := c.QueryParam("prefix") if prefix == "" { - errMsg := fmt.Errorf("request must include a `prefix` parameter") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `prefix` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { - errMsg := fmt.Errorf("user does not have permission to read the %s prefix", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s prefix", prefix), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + // Check if the prefix points directly to an object isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { - errMsg := fmt.Errorf("error checking if prefix is an object: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error checking if prefix is an object", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if isObject { - errMsg := fmt.Errorf("the provided prefix %s points to a single object rather than a collection", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusTeapot, errMsg.Error()) + appErr := configberry.NewAppError(configberry.TeapotError, fmt.Sprintf("the provided prefix %s points to a single object rather than a collection", prefix), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } var totalSize uint64 @@ -74,14 +95,15 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { }) if err != nil { - errMsg := fmt.Errorf("error processing objects: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if fileCount == 0 { - errMsg := fmt.Errorf("prefix %s not found", prefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("prefix %s not found", prefix), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } response := struct { Size uint64 `json:"size"` @@ -94,120 +116,83 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { } log.Info("Successfully retrieved size for prefix:", prefix) - return c.JSON(http.StatusOK, response) + return configberry.HandleSuccessfulResponse(c, response) } func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("request must include a `key` parameter") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have permission to read the %s key", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + result, err := s3Ctrl.GetMetaData(bucket, key) if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { - errMsg := fmt.Errorf("object %s not found", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) - } - errMsg := fmt.Errorf("error getting metadata: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.HandleAWSError(err, "error getting metadata") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Info("successfully retrieved metadata for key:", key) - return c.JSON(http.StatusOK, result) + return configberry.HandleSuccessfulResponse(c, result) } func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("request must include a `key` parameter") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have permission to read the %s key", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } result, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - errMsg := fmt.Errorf("error checking if object exists: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - log.Info("successfully retrieved metadata for key:", key) - return c.JSON(http.StatusOK, result) -} - -func (s3Ctrl *S3Controller) GetMetaData(bucket, key string) (*s3.HeadObjectOutput, error) { - // Set up the input parameters for the list objects operation - input := &s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - - result, err := s3Ctrl.S3Svc.HeadObject(input) - if err != nil { - return nil, err + appErr := configberry.HandleAWSError(err, "error checking if object exists") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - return result, nil -} - -func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { - - _, err := s3Ctrl.S3Svc.HeadObject(&s3.HeadObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case "NotFound": // s3.ErrCodeNoSuchKey does not work, aws is missing this error code so we hardwire a string - return false, nil - default: - return false, fmt.Errorf("KeyExists: %s", err) - } - } - return false, fmt.Errorf("KeyExists: %s", err) - } - return true, nil + log.Info("successfully retrieved metadata for key:", key) + return configberry.HandleSuccessfulResponse(c, result) } diff --git a/blobstore/move.go b/blobstore/move.go index 07b0b49..2b6262b 100644 --- a/blobstore/move.go +++ b/blobstore/move.go @@ -1,51 +1,17 @@ package blobstore import ( + "errors" "fmt" - "net/http" "strings" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" ) -func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { - srcPrefix := c.QueryParam("src_prefix") - destPrefix := c.QueryParam("dest_prefix") - if srcPrefix == "" || destPrefix == "" { - errMsg := fmt.Errorf("parameters `src_key` and `dest_key` are required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - if !strings.HasSuffix(srcPrefix, "/") { - srcPrefix = srcPrefix + "/" - } - if !strings.HasSuffix(destPrefix, "/") { - destPrefix = destPrefix + "/" - } - - bucket := c.QueryParam("bucket") - s3Ctrl, err := bh.GetController(bucket) - if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - err = s3Ctrl.MovePrefix(bucket, srcPrefix, destPrefix) - if err != nil { - if strings.Contains(err.Error(), "source prefix not found") { - errMsg := fmt.Errorf("no objects found with source prefix: %s", srcPrefix) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) - } - return c.JSON(http.StatusInternalServerError, err.Error()) - } - - return c.JSON(http.StatusOK, fmt.Sprintf("Successfully moved prefix from %s to %s", srcPrefix, destPrefix)) -} - func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) error { var objectsFound bool @@ -67,7 +33,7 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err } _, err := s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return fmt.Errorf("error copying object %s to %s: %v", srcObjectKey, destObjectKey, err) + return err } } @@ -75,61 +41,24 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err // Ensure that your application logic requires this before proceeding err := s3Ctrl.DeleteList(page, bucket) if err != nil { - return fmt.Errorf("error deleting from source prefix %s: %v", srcPrefix, err) + return err } return nil } err := s3Ctrl.GetListWithCallBack(bucket, srcPrefix, false, processPage) if err != nil { - return fmt.Errorf("error processing objects for move: %v", err) + return err } // Check if objects were found after processing all pages if !objectsFound { - return fmt.Errorf("source prefix not found") + return errors.New("source prefix not found") } return nil } -func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { - srcObjectKey := c.QueryParam("src_key") - destObjectKey := c.QueryParam("dest_key") - if srcObjectKey == "" || destObjectKey == "" { - errMsg := fmt.Errorf("paramters `src_key` and `dest_key` are required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - bucket := c.QueryParam("bucket") - s3Ctrl, err := bh.GetController(bucket) - if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - err = s3Ctrl.CopyObject(bucket, srcObjectKey, destObjectKey) - if err != nil { - if strings.Contains(err.Error(), "keys are identical; no action taken") { - log.Error(err.Error()) - return c.JSON(http.StatusBadRequest, err.Error()) // 400 Bad Request - } else if strings.Contains(err.Error(), "already exists in the bucket; duplication will cause an overwrite") { - log.Error(err.Error()) - return c.JSON(http.StatusConflict, err.Error()) // 409 Conflict - } else if strings.Contains(err.Error(), "does not exist") { - log.Error(err.Error()) - return c.JSON(http.StatusNotFound, err.Error()) - } - errMsg := fmt.Errorf("error when copying object: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - return c.JSON(http.StatusOK, fmt.Sprintf("Succesfully moved object from %s to %s", srcObjectKey, destObjectKey)) -} - func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey string) error { // Check if the source and destination keys are the same if srcObjectKey == destObjectKey { @@ -138,15 +67,16 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin // Check if the old key exists in the bucket oldKeyExists, err := s3Ctrl.KeyExists(bucket, srcObjectKey) if err != nil { - return fmt.Errorf("error checking if object %s exists: %s", destObjectKey, err.Error()) + return err } + if !oldKeyExists { return fmt.Errorf("`srcObjectKey` " + srcObjectKey + " does not exist") } // Check if the new key already exists in the bucket newKeyExists, err := s3Ctrl.KeyExists(bucket, destObjectKey) if err != nil { - return fmt.Errorf("error checking if object %s exists: %s", destObjectKey, err.Error()) + return err } if newKeyExists { return fmt.Errorf(destObjectKey + " already exists in the bucket; duplication will cause an overwrite. Please rename dest_key to a different name") @@ -161,7 +91,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin // Copy the object to the new key (effectively renaming) _, err = s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return fmt.Errorf("error copying object" + srcObjectKey + "with the new key" + destObjectKey + ", " + err.Error()) + return err } // Delete the source object @@ -170,8 +100,71 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin Key: aws.String(srcObjectKey), }) if err != nil { - return fmt.Errorf("error deleting old object " + srcObjectKey + " in bucket " + bucket + ", " + err.Error()) + return err } return nil } + +func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { + params := map[string]string{ + "srcPrefix": c.QueryParam("src_prefix"), + "destPrefix": c.QueryParam("dest_prefix"), + } + if appErr := configberry.CheckRequiredParams(params); appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, false)) + return configberry.HandleErrorResponse(c, appErr) + } + + if !strings.HasSuffix(params["srcPrefix"], "/") { + params["srcPrefix"] = params["srcPrefix"] + "/" + } + if !strings.HasSuffix(params["destPrefix"], "/") { + params["destPrefix"] = params["destPrefix"] + "/" + } + + bucket := c.QueryParam("bucket") + s3Ctrl, err := bh.GetController(bucket) + if err != nil { + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + err = s3Ctrl.MovePrefix(bucket, params["srcPrefix"], params["destPrefix"]) + if err != nil { + appErr := configberry.HandleAWSError(err, "error moving prefix") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully moved prefix from %s to %s", params["srcPrefix"], params["destPrefix"])) +} + +func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { + params := map[string]string{ + "srcObjectKey": c.QueryParam("src_key"), + "destObjectKey": c.QueryParam("dest_key"), + } + if appErr := configberry.CheckRequiredParams(params); appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, false)) + return configberry.HandleErrorResponse(c, appErr) + } + + bucket := c.QueryParam("bucket") + s3Ctrl, err := bh.GetController(bucket) + if err != nil { + appErr := configberry.NewAppError(configberry.ValidationError, fmt.Sprintf("`bucket` %s is not available", bucket), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + err = s3Ctrl.CopyObject(bucket, params["srcObjectKey"], params["destObjectKey"]) + if err != nil { + appErr := configberry.HandleAWSError(err, "error copying prefix") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Succesfully moved object from %s to %s", params["srcObjectKey"], params["destObjectKey"])) +} diff --git a/blobstore/object_content.go b/blobstore/object_content.go index 28d5250..4866f00 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -3,9 +3,8 @@ package blobstore import ( "fmt" "io" - "net/http" - "strings" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" @@ -35,44 +34,44 @@ func (s3Ctrl *S3Controller) FetchObjectContent(bucket string, key string) (io.Re func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("parameter 'key' is required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have permission to read the %s key", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + outPutBody, err := s3Ctrl.FetchObjectContent(bucket, key) if err != nil { - errMsg := fmt.Errorf("error fetching object's content: %s", err.Error()) - log.Error(errMsg.Error()) - if strings.Contains(err.Error(), "not found") { - return c.JSON(http.StatusNotFound, errMsg.Error()) - } else { - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } + appErr := configberry.HandleAWSError(err, "error fetching object's content") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } body, err := io.ReadAll(outPutBody) if err != nil { - return c.JSON(http.StatusInternalServerError, err.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, "error reading objects body", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - log.Info("HandleObjectContents: Successfully fetched object data for key:", key) + log.Info("Successfully fetched object data for key:", key) //TODO: add contentType - return c.Blob(http.StatusOK, "", body) + return configberry.HandleSuccessfulResponse(c, body) } diff --git a/blobstore/ping.go b/blobstore/ping.go index 3fae927..92dd74a 100644 --- a/blobstore/ping.go +++ b/blobstore/ping.go @@ -1,10 +1,9 @@ package blobstore import ( - "fmt" - "net/http" "os" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" @@ -12,15 +11,16 @@ import ( ) func (bh *BlobHandler) HandlePing(c echo.Context) error { - return c.JSON(http.StatusOK, "connection without Auth is healthy") + return configberry.HandleSuccessfulResponse(c, "connection without Auth is healthy") } func (bh *BlobHandler) HandlePingWithAuth(c echo.Context) error { // Perform a HeadBucket operation to check the health of the S3 connection initAuth := os.Getenv("INIT_AUTH") if initAuth == "0" { - errMsg := fmt.Errorf("this requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality") - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, "this endpoint requires authentication information that is unavailable when authorization is disabled. Please enable authorization to use this functionality", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } bucketHealth := make(map[string]string) var valid string @@ -42,5 +42,5 @@ func (bh *BlobHandler) HandlePingWithAuth(c echo.Context) error { } } - return c.JSON(http.StatusOK, bucketHealth) + return configberry.HandleSuccessfulResponse(c, bucketHealth) } diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index b767897..745c5d0 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -3,12 +3,12 @@ package blobstore import ( "bytes" "fmt" - "net/http" "net/url" "path/filepath" "strings" "time" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" @@ -29,65 +29,67 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("parameter `key` is required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - errMsg := fmt.Errorf("user does not have permission to read the %s key", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusForbidden, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - errMsg := fmt.Errorf("checking if object exists: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error checking if object exists", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } if !keyExist { - errMsg := fmt.Errorf("object %s not found", key) - log.Error(errMsg.Error()) - return c.JSON(http.StatusNotFound, errMsg.Error()) + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("object %s not found", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - // Set the expiration time for the pre-signed URL url, err := s3Ctrl.GetDownloadPresignedURL(bucket, key, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - errMsg := fmt.Errorf("error getting presigned URL: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.HandleAWSError(err, "error getting presigned URL") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Info("successfully generated presigned URL for key:", key) - return c.JSON(http.StatusOK, url) + return configberry.HandleSuccessfulResponse(c, url) } func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { - prefix := c.QueryParam("prefix") - if prefix == "" { - errMsg := fmt.Errorf("`prefix` query params are required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("error getting controller for bucket %s: %s", bucket, err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + prefix := c.QueryParam("prefix") + if prefix == "" { + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `prefix` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } var totalSize uint64 @@ -103,10 +105,10 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { scriptBuilder.WriteString("REM 5. Windows Defender SmartScreen (Optional): If you see a message like \"Windows Defender SmartScreen prevented an unrecognized app from starting,\" click \"More info\" and then click \"Run anyway\" to proceed with the download.\n\n") scriptBuilder.WriteString(fmt.Sprintf("mkdir \"%s\"\n", basePrefix)) - permissions, fullAccess, statusCode, err := bh.getS3ReadPermissions(c, bucket) - if err != nil { - log.Error(err.Error()) - return c.JSON(statusCode, err.Error()) + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } // Define the processPage function processPage := func(page *s3.ListObjectsV2Output) error { @@ -149,9 +151,9 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { // Call GetList with the processPage function err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, processPage) if err != nil { - errMsg := fmt.Errorf("error processing objects: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } txtBatFileName := fmt.Sprintf("%s_download_script.txt", strings.TrimSuffix(prefix, "/")) @@ -166,18 +168,18 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { ContentType: aws.String("binary/octet-stream"), }) if err != nil { - errMsg := fmt.Errorf("error uploading %s to S3: %s", txtBatFileName, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error uploading %s to S3", txtBatFileName), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, 1) if err != nil { - errMsg := fmt.Errorf("error generating presigned URL for %s: %s", txtBatFileName, err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error generating presigned URL for %s", txtBatFileName), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("successfully generated download script for prefix %s in bucket %s", prefix, bucket) - return c.JSON(http.StatusOK, href) + return configberry.HandleSuccessfulResponse(c, href) } diff --git a/blobstore/upload.go b/blobstore/upload.go index de13df8..3136e51 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -4,10 +4,10 @@ import ( "bytes" "fmt" "io" - "net/http" "strconv" "time" + "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -16,6 +16,15 @@ import ( log "github.com/sirupsen/logrus" ) +type part struct { + PartNumber int `json:"partNumber"` + ETag string `json:"eTag"` +} +type completeUploadRequest struct { + UploadID string `json:"uploadId"` + Parts []part `json:"parts"` +} + func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadCloser) error { // Initialize the multipart upload to S3 params := &s3.CreateMultipartUploadInput{ @@ -25,7 +34,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC resp, err := s3Ctrl.S3Svc.CreateMultipartUpload(params) if err != nil { - return fmt.Errorf("error initializing multipart upload. %s", err.Error()) + return err } // Create the variables that will track upload progress @@ -42,7 +51,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC // This would be a true error while reading if err != nil && err != io.EOF { - return fmt.Errorf("error copying POST body to S3. %s", err.Error()) + return err } // Add the buffer data to the buffer @@ -60,7 +69,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC result, err := s3Ctrl.S3Svc.UploadPart(params) if err != nil { - return fmt.Errorf("error streaming POST body to S3. %s, %+v", err.Error(), result) + return err } totalBytes += int64(buffer.Len()) @@ -89,7 +98,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC result, err := s3Ctrl.S3Svc.UploadPart(params2) if err != nil { - return fmt.Errorf("error streaming POST body to S3. %s, %+v", err.Error(), result) + return err } totalBytes += int64(buffer.Len()) @@ -107,93 +116,12 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC } _, err = s3Ctrl.S3Svc.CompleteMultipartUpload(completeParams) if err != nil { - return fmt.Errorf("error completing multipart upload. %s", err.Error()) + return err } return nil } -func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { - // Add overwrite check and parameter - key := c.QueryParam("key") - if key == "" { - errMsg := fmt.Errorf("parameter 'key' is required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - bucket := c.QueryParam("bucket") - s3Ctrl, err := bh.GetController(bucket) - if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) - if err != nil { - errMsg := fmt.Errorf("error while checking for user permission: %s", err) - log.Error(errMsg.Error()) - return c.JSON(httpCode, errMsg.Error()) - } - - overrideParam := c.QueryParam("override") - - var override bool - - if overrideParam == "true" || overrideParam == "false" { - var err error - override, err = strconv.ParseBool(c.QueryParam("override")) - if err != nil { - errMsg := fmt.Errorf("error parsing 'override' parameter: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - } else { - errMsg := fmt.Errorf("request must include a `override`, options are `true` or `false`") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) - } - - // Check if the request body is empty - buf := make([]byte, 1) - _, err = c.Request().Body.Read(buf) - if err == io.EOF { - errMsg := fmt.Errorf("no file provided in the request body") - log.Error(errMsg.Error()) - return c.JSON(http.StatusBadRequest, errMsg.Error()) // Return 400 Bad Request - } - - // Reset the request body to its original state - c.Request().Body = io.NopCloser(io.MultiReader(bytes.NewReader(buf), c.Request().Body)) - - keyExist, err := s3Ctrl.KeyExists(bucket, key) - if err != nil { - errMsg := fmt.Errorf("error checking if object exists: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - if keyExist && !override { - errMsg := fmt.Errorf("object %s already exists and override is set to %t", key, override) - log.Errorf(errMsg.Error()) - return c.JSON(http.StatusConflict, errMsg.Error()) - } - - body := c.Request().Body - defer body.Close() - - err = s3Ctrl.UploadS3Obj(bucket, key, body) - if err != nil { - errMsg := fmt.Errorf("error uploading S3 object: %s", err.Error()) - log.Errorf(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) - } - - log.Infof("Successfully uploaded file with key: %s", key) - return c.JSON(http.StatusOK, "Successfully uploaded file") -} - // function to retrieve presigned url for a normal one time upload. You can only upload 5GB files at a time. func (s3Ctrl *S3Controller) GetUploadPresignedURL(bucket string, key string, expMin int) (string, error) { duration := time.Duration(expMin) * time.Minute @@ -257,28 +185,151 @@ func (s3Ctrl *S3Controller) GetUploadPartPresignedURL(bucket string, key string, return urlStr, nil } +// function that will return a multipart upload ID +func (s3Ctrl *S3Controller) GetMultiPartUploadID(bucket string, key string) (string, error) { + input := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + result, err := s3Ctrl.S3Svc.CreateMultipartUpload(input) + if err != nil { + return "", err + } + return *result.UploadId, nil +} + +// function that will complete a multipart upload ID when all parts are completely uploaded +func (s3Ctrl *S3Controller) CompleteMultipartUpload(bucket string, key string, uploadID string, parts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) { + input := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: parts, + }, + } + result, err := s3Ctrl.S3Svc.CompleteMultipartUpload(input) + if err != nil { + return nil, err + } + return result, nil +} + +// function that will abort a multipart upload in progress +func (s3Ctrl *S3Controller) AbortMultipartUpload(bucket string, key string, uploadID string) error { + input := &s3.AbortMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + } + _, err := s3Ctrl.S3Svc.AbortMultipartUpload(input) + if err != nil { + return err + } + return nil +} + +func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { + // Add overwrite check and parameter + key := c.QueryParam("key") + if key == "" { + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + bucket := c.QueryParam("bucket") + s3Ctrl, err := bh.GetController(bucket) + if err != nil { + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + appErr := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + overrideParam := c.QueryParam("override") + var override bool + if overrideParam == "true" || overrideParam == "false" { + var err error + override, err = strconv.ParseBool(c.QueryParam("override")) + if err != nil { + appErr := configberry.NewAppError(configberry.InternalServerError, "error parsing `override` parameter", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + } else { + appErr := configberry.NewAppError(configberry.ValidationError, "request must include a `override`, options are `true` or `false`", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + // Check if the request body is empty + buf := make([]byte, 1) + _, err = c.Request().Body.Read(buf) + if err == io.EOF { + appErr := configberry.NewAppError(configberry.ValidationError, "no file provided in the request body`", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + // Reset the request body to its original state + c.Request().Body = io.NopCloser(io.MultiReader(bytes.NewReader(buf), c.Request().Body)) + + keyExist, err := s3Ctrl.KeyExists(bucket, key) + if err != nil { + appErr := configberry.HandleAWSError(err, "error checking if object exists`") + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + if keyExist && !override { + appErr := configberry.NewAppError(configberry.ConflictError, fmt.Sprintf("object %s already exists and override is set to %t", key, override), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + + body := c.Request().Body + defer body.Close() + + err = s3Ctrl.UploadS3Obj(bucket, key, body) + if err != nil { + appErr := configberry.NewAppError(configberry.InternalServerError, "error uploading S3 object", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + + } + + log.Infof("Successfully uploaded file with key: %s", key) + return configberry.HandleSuccessfulResponse(c, "Successfully uploaded file") +} + // enpoint handler that will either return a one time presigned upload URL or multipart upload url func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("`key` parameters are required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + bucket := c.QueryParam("bucket") - //get controller for bucket s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) - if err != nil { - errMsg := fmt.Errorf("error while checking for user permission: %s", err) - log.Error(errMsg.Error()) - return c.JSON(httpCode, errMsg.Error()) + + appErr := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + uploadID := c.QueryParam("upload_id") partNumberStr := c.QueryParam("part_number") @@ -286,131 +337,97 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { //if the user provided both upload_id and part_number then we return a part presigned URL partNumber, err := strconv.Atoi(partNumberStr) if err != nil { - errMsg := fmt.Errorf("error parsing int from `part_number`: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error parsing int from `part_number`", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } presignedURL, err := s3Ctrl.GetUploadPartPresignedURL(bucket, key, uploadID, int64(partNumber), bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - errMsg := fmt.Errorf("error generating presigned part URL: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error generating presigned part URL", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("successfully generated presigned part URL for key: %s", key) - return c.JSON(http.StatusOK, presignedURL) + return configberry.HandleSuccessfulResponse(c, presignedURL) } else if (uploadID == "" && partNumberStr != "") || (uploadID != "" && partNumberStr == "") { - errMsg := fmt.Errorf("both 'uploadID' and 'partNumber' must be provided together for a multipart upload, or neither for a standard upload") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "both `uploadID` and `partNumber` must be provided together for a multipart upload, or neither for a standard upload", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } //if the user did not provided both upload_id and part_number then we returned normal presigned URL presignedURL, err := s3Ctrl.GetUploadPresignedURL(bucket, key, bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - log.Errorf("error generating presigned URL: %s", err.Error()) - return c.JSON(http.StatusInternalServerError, err.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error generating presigned URL", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("successfully generated presigned URL for key: %s", key) - return c.JSON(http.StatusOK, presignedURL) -} - -// function that will return a multipart upload ID -func (s3Ctrl *S3Controller) GetMultiPartUploadID(bucket string, key string) (string, error) { - input := &s3.CreateMultipartUploadInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - result, err := s3Ctrl.S3Svc.CreateMultipartUpload(input) - if err != nil { - return "", err - } - return *result.UploadId, nil + return configberry.HandleSuccessfulResponse(c, presignedURL) } // endpoint handler that will return a multipart upload ID func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("`key` parameters are required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + bucket := c.QueryParam("bucket") - //get controller for bucket s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) - if err != nil { - errMsg := fmt.Errorf("error while checking for user permission: %s", err) - log.Error(errMsg.Error()) - return c.JSON(httpCode, errMsg.Error()) + + appErr := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } uploadID, err := s3Ctrl.GetMultiPartUploadID(bucket, key) if err != nil { - errMsg := fmt.Errorf("error retrieving multipart Upload ID: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "error retrieving multipart Upload ID", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("successfully generated multipart Upload ID for key: %s", key) - return c.JSON(http.StatusOK, uploadID) -} - -// function that will complete a multipart upload ID when all parts are completely uploaded -func (s3Ctrl *S3Controller) CompleteMultipartUpload(bucket string, key string, uploadID string, parts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) { - input := &s3.CompleteMultipartUploadInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - UploadId: aws.String(uploadID), - MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: parts, - }, - } - result, err := s3Ctrl.S3Svc.CompleteMultipartUpload(input) - if err != nil { - return nil, err - } - return result, nil + return configberry.HandleSuccessfulResponse(c, uploadID) } // endpoint handler that will complete a multipart upload func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("`key` parameters are required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) - if err != nil { - errMsg := fmt.Errorf("error while checking for user permission: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(httpCode, errMsg.Error()) - } - type part struct { - PartNumber int `json:"partNumber"` - ETag string `json:"eTag"` - } - type completeUploadRequest struct { - UploadID string `json:"uploadId"` - Parts []part `json:"parts"` + + appErr := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + var req completeUploadRequest + if err := c.Bind(&req); err != nil { - errMsg := fmt.Errorf("error parsing request body: %s", err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusBadRequest, errMsg.Error()) + appErr := configberry.NewAppError(configberry.BadRequestError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } s3Parts := make([]*s3.CompletedPart, len(req.Parts)) @@ -423,63 +440,50 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { _, err = s3Ctrl.CompleteMultipartUpload(bucket, key, req.UploadID, s3Parts) if err != nil { - errMsg := fmt.Errorf("error completing the multipart Upload for key %s, %s", key, err) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error completing the multipart Upload for key %s", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("succesfully completed multipart upload for key %s", key) - return c.JSON(http.StatusOK, "succesfully completed multipart upload") -} - -// function that will abort a multipart upload in progress -func (s3Ctrl *S3Controller) AbortMultipartUpload(bucket string, key string, uploadID string) error { - input := &s3.AbortMultipartUploadInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - UploadId: aws.String(uploadID), - } - _, err := s3Ctrl.S3Svc.AbortMultipartUpload(input) - if err != nil { - return err - } - return nil + return configberry.HandleSuccessfulResponse(c, "succesfully completed multipart upload") } // endpoint handler that will abort a multipart upload in progress func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { key := c.QueryParam("key") if key == "" { - errMsg := fmt.Errorf("`key` parameter is required") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - errMsg := fmt.Errorf("`bucket` %s is not available, %s", bucket, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } - httpCode, err := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) - if err != nil { - errMsg := fmt.Errorf("error while checking for user permission: %s", err) - log.Error(errMsg.Error()) - return c.JSON(httpCode, errMsg.Error()) + + appErr := bh.validateUserAccessToPrefix(c, bucket, key, []string{"write"}) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } uploadID := c.QueryParam("upload_id") if uploadID == "" { - errMsg := fmt.Errorf("`upload_id` param is requires") - log.Error(errMsg.Error()) - return c.JSON(http.StatusUnprocessableEntity, errMsg.Error()) + appErr := configberry.NewAppError(configberry.ValidationError, "`upload_id` param is requires", nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } err = s3Ctrl.AbortMultipartUpload(bucket, key, uploadID) if err != nil { - errMsg := fmt.Errorf("error aborting the multipart Upload for key %s, %s", key, err.Error()) - log.Error(errMsg.Error()) - return c.JSON(http.StatusInternalServerError, errMsg.Error()) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error aborting the multipart Upload for key %s", key), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } log.Infof("succesfully aborted multipart upload for key %s", key) - return c.JSON(http.StatusOK, "succesfully aborted multipart upload") + return configberry.HandleSuccessfulResponse(c, "succesfully aborted multipart upload") } diff --git a/configberry/errors_handling.go b/configberry/errors_handling.go index a26c7ab..5e15f8d 100644 --- a/configberry/errors_handling.go +++ b/configberry/errors_handling.go @@ -26,9 +26,13 @@ var ( ValidationError = ErrorType{Value: 2, Name: "Validation Error"} NotFoundError = ErrorType{Value: 3, Name: "Not Found Error"} UnauthorizedError = ErrorType{Value: 4, Name: "Unauthorized Error"} - InternalServerError = ErrorType{Value: 5, Name: "Internal Server Error"} - AWSError = ErrorType{Value: 6, Name: "AWS Error"} - FatalError = ErrorType{Value: 7, Name: "Fatal Error"} + ForbiddenError = ErrorType{Value: 5, Name: "Forbidden Error"} + InternalServerError = ErrorType{Value: 6, Name: "Internal Server Error"} + AWSError = ErrorType{Value: 7, Name: "AWS Error"} + FatalError = ErrorType{Value: 8, Name: "Fatal Error"} + TeapotError = ErrorType{Value: 9, Name: "Teapot Error"} + ConflictError = ErrorType{Value: 10, Name: "Conflict Error"} + BadRequestError = ErrorType{Value: 11, Name: "Bad Request Error"} ) // AppError includes the error type, message, and the original error. @@ -73,12 +77,18 @@ func HandleErrorResponse(c echo.Context, err *AppError) error { statusCode := http.StatusInternalServerError // Default status switch err.Type.Value { - case ValidationError.Value: + case ValidationError.Value, BadRequestError.Value: statusCode = http.StatusBadRequest case NotFoundError.Value: statusCode = http.StatusNotFound case UnauthorizedError.Value: statusCode = http.StatusUnauthorized + case ForbiddenError.Value: + statusCode = http.StatusForbidden + case TeapotError.Value: + statusCode = http.StatusTeapot + case ConflictError.Value: + statusCode = http.StatusConflict } return c.JSON(statusCode, map[string]string{"Type": err.Type.Name, "Error": responseMessage}) } diff --git a/documentation/general.md b/documentation/general.md index 5b869d1..aa23d2e 100644 --- a/documentation/general.md +++ b/documentation/general.md @@ -6,7 +6,11 @@ Functions that return native Go errors and are used for utility logic. ### Methods Defined on `S3Ctrl` -Methods defined on `S3Ctrl` that return native Go errors. These methods encapsulate native AWS SDK Go functions to interact with S3. They can be used by both endpoint handlers and for package/library purposes. These methods can also encapsulate other `S3Ctrl` methods that perform specific utilities. For example, `RecursivelyDeleteObjects` uses `GetListWithCallBack` and `DeleteList`, which both utilize native AWS SDK Go functions to perform a common prefix (or folder) deletion. +Methods defined on `S3Ctrl` have two primary uses: + +- **Methods defined on `S3Ctrl` that return native Go errors:** These methods encapsulate native AWS SDK Go functions to interact with S3. They can be used by both endpoint handlers and for package/library purposes. These methods can also encapsulate other `S3Ctrl` methods that perform specific utilities. + +- **Methods defined on `S3Ctrl` that return return a ConfigBerry `AppError`:** These methods usually encapsulate other `S3Ctrl` methods that perform specific utilities. These methods are usually private. ### Methods Defined on `BlobHandler` @@ -14,10 +18,35 @@ Methods defined on `BlobHandler` have three primary uses: - **Endpoint Handlers:** Methods that communicate with HTTP requests and always return a ConfigBerry `AppError`. These methods' names should always start with `Handle`. -- **Utility Methods for Endpoint Handlers:** Private methods that encapsulate reusable logic for endpoints, such as `checkAndAdjustPrefix`. These methods also always return a ConfigBerry `AppError`. +- **Utility Methods for Endpoint Handlers:** Private methods that encapsulate reusable logic for endpoints. These methods also always return a ConfigBerry `AppError`. - **Utility Methods for Both Endpoint and External Use:** Methods that can be used by both endpoint handlers and external packages/libraries, returning native Go errors. (Note: Only one method currently falls under this category, `GetController`.) # deprecated functions/methods deprecated functions will be in /utils/deprecated.txt, the file should consist of file_name where the function/methods were deprectaed from. + +# errors + +Rules: + +- **Use Backticks `` ` `` to refrence parameters:** parameters refrenced in errors should be encapsulated inside backticks +- **Avoid Capital Letters:** Custom errors should not start with a capital letter (unless they begin with an acronym). +- **Error Messages Should Be Descriptive:** Ensure that error messages are clear and provide enough context to understand the issue. +- **Include Relevant Information:** Include information about the operation that failed, such as key parameters, to aid in debugging. +- **Avoid Punctuation:** Do not end error messages with punctuation marks like periods or exclamation points. +- **Use `errors.New` for Static Errors:** Use `errors.New` when the error message is static and doesn't need any additional context. + ```go + var ErrInvalidInput = errors.New("invalid input") + ``` +- **Use `fmt.Errorf` for Dynamic Errors:** Use `fmt.Errorf` when you need to include dynamic information or wrap an existing error with additional context. + ```go + err := fmt.Errorf("failed to process user ID %d: %w", userID, ErrInvalidInput) + ``` +- Methods that will be used for packages should return naitive go error + +# Logging + +Rules: + +- **Only log errors in main methods/functions**: Do not log errors in the util function and teh util caller, restrict error logging to the function that is external and communicates with client (With teh exception of non breaking errors). From f17ec0579ca647195b5af53f28dcc0566266f9d5 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 17:36:55 -0400 Subject: [PATCH 10/22] update e2e tests --- .github/workflows/e2e-test.yml | 18 +- blobstore/blobstore.go | 3 +- blobstore/creds.go | 33 + blobstore/delete.go | 42 +- blobstore/list.go | 2 +- blobstore/metadata.go | 24 +- blobstore/move.go | 25 +- blobstore/object_content.go | 9 +- blobstore/presigned_url.go | 17 +- blobstore/upload.go | 50 +- configberry/errors_handling.go | 38 +- e2e-test/e2eCollection.json | 3857 ++++++++++---------------------- e2e-test/e2eEnv.json | 74 +- 13 files changed, 1414 insertions(+), 2778 deletions(-) diff --git a/.github/workflows/e2e-test.yml b/.github/workflows/e2e-test.yml index c928046..bdfde83 100644 --- a/.github/workflows/e2e-test.yml +++ b/.github/workflows/e2e-test.yml @@ -30,15 +30,15 @@ jobs: echo AWS_S3_BUCKET='test-bucket' >> .env echo S3API_SERVICE_PORT='5005' >> .env echo AUTH_LEVEL=0 >> .env - - - name: Substitute secret variables in JSON - env: - auth_password: ${{ secrets.AUTH_PASSWORD }} - KEYCLOAK_SECRET: ${{ secrets.KEYCLOAK_SECRET }} - run: | - echo "Is auth_password set: $(if [ -z "$auth_password" ]; then echo "No"; else echo "Yes"; fi)" - echo "Is KEYCLOAK_SECRET set: $(if [ -z "$KEYCLOAK_SECRET" ]; then echo "No"; else echo "Yes"; fi)" - envsubst < e2e-test/e2eEnv.template.json > e2e-test/e2eEnv.json + echo INIT_AUTH=0 >> .env + # - name: Substitute secret variables in JSON + # env: + # auth_password: ${{ secrets.AUTH_PASSWORD }} + # KEYCLOAK_SECRET: ${{ secrets.KEYCLOAK_SECRET }} + # run: | + # echo "Is auth_password set: $(if [ -z "$auth_password" ]; then echo "No"; else echo "Yes"; fi)" + # echo "Is KEYCLOAK_SECRET set: $(if [ -z "$KEYCLOAK_SECRET" ]; then echo "No"; else echo "Yes"; fi)" + # envsubst < e2e-test/e2eEnv.template.json > e2e-test/e2eEnv.json - name: Build the docker-compose stack run: docker-compose build -d diff --git a/blobstore/blobstore.go b/blobstore/blobstore.go index 2aee3b3..9e747b3 100644 --- a/blobstore/blobstore.go +++ b/blobstore/blobstore.go @@ -84,8 +84,7 @@ func (s3Ctrl *S3Controller) checkAndAdjustPrefix(bucket, prefix string) (string, if prefix != "" && prefix != "./" && prefix != "/" { isObject, err := s3Ctrl.KeyExists(bucket, prefix) if err != nil { - fmt.Println(err) - return "", configberry.HandleAWSError(err, "error checking if object exists") + return "", configberry.HandleAWSError(err, "error checking if prefix is an object") } if isObject { objMeta, err := s3Ctrl.GetMetaData(bucket, prefix) diff --git a/blobstore/creds.go b/blobstore/creds.go index 0ecade9..7c57336 100644 --- a/blobstore/creds.go +++ b/blobstore/creds.go @@ -86,6 +86,39 @@ func (mc MinioConfig) minIOSessionManager() (*s3.S3, *session.Session, error) { log.Info("Bucket already exists") } + // Create the policy as a byte array + policyBytes := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": [ + "*" + ] + }, + "Action": [ + "s3:GetObject", + "s3:PutObject" + ], + "Resource": [ + "arn:aws:s3:::test-bucket/*" + ] + } + ]}`) + + // Set the policy on the bucket + _, err = s3SVC.PutBucketPolicy(&s3.PutBucketPolicyInput{ + Bucket: aws.String(mc.Bucket), + Policy: aws.String(string(policyBytes)), + }) + if err != nil { + log.Errorf("error setting bucket policy: %s", err.Error()) + // Handle error appropriately + } else { + log.Info("Bucket policy set successfully") + } + return s3SVC, sess, nil } diff --git a/blobstore/delete.go b/blobstore/delete.go index 4373070..ea362b8 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -12,6 +12,25 @@ import ( log "github.com/sirupsen/logrus" ) +func (s3Ctrl *S3Controller) DeleteObjectIfExists(bucket, key string) error { + // Check if the object exists + if _, err := s3Ctrl.GetMetaData(bucket, key); err != nil { + return err + } + + // Delete the object + deleteInput := &s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + _, err := s3Ctrl.S3Svc.DeleteObject(deleteInput) + if err != nil { + return err + } + + return nil +} + func (s3Ctrl *S3Controller) DeleteList(page *s3.ListObjectsV2Output, bucket string) error { if len(page.Contents) == 0 { return nil // No objects to delete in this page @@ -81,28 +100,9 @@ func (bh *BlobHandler) HandleDeleteObject(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } - // If the key is not a folder, proceed with deleting a single object - keyExist, err := s3Ctrl.KeyExists(bucket, key) - if err != nil { - appErr := configberry.HandleAWSError(err, "error checking if object exists") - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - - if !keyExist { - appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("object %s not found", key), nil) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - - deleteInput := &s3.DeleteObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - - _, err = s3Ctrl.S3Svc.DeleteObject(deleteInput) + err = s3Ctrl.DeleteObjectIfExists(bucket, key) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error deleting object. %s", err.Error()), nil) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error deleting object %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/list.go b/blobstore/list.go index 3e3d2b1..b12a813 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -153,7 +153,7 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + appErr := configberry.HandleAWSError(err, "error processing objects") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/metadata.go b/blobstore/metadata.go index c6543f9..2d699fd 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -22,7 +22,6 @@ func (s3Ctrl *S3Controller) GetMetaData(bucket, key string) (*s3.HeadObjectOutpu if err != nil { return nil, err } - return result, nil } @@ -61,29 +60,20 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - - permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, prefix) if appErr != nil { log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - - if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { - appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s prefix", prefix), err) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - - // Check if the prefix points directly to an object - isObject, err := s3Ctrl.KeyExists(bucket, prefix) - if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error checking if prefix is an object", err) + prefix = adjustedPrefix + permissions, fullAccess, appErr := bh.getS3ReadPermissions(c, bucket) + if appErr != nil { log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - if isObject { - appErr := configberry.NewAppError(configberry.TeapotError, fmt.Sprintf("the provided prefix %s points to a single object rather than a collection", prefix), err) + if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s prefix", prefix), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -149,7 +139,7 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { result, err := s3Ctrl.GetMetaData(bucket, key) if err != nil { - appErr := configberry.HandleAWSError(err, "error getting metadata") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error getting metadata for %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/move.go b/blobstore/move.go index 2b6262b..916b423 100644 --- a/blobstore/move.go +++ b/blobstore/move.go @@ -64,15 +64,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin if srcObjectKey == destObjectKey { return fmt.Errorf("source `%s` and destination `%s` keys are identical; no action taken", srcObjectKey, destObjectKey) } - // Check if the old key exists in the bucket - oldKeyExists, err := s3Ctrl.KeyExists(bucket, srcObjectKey) - if err != nil { - return err - } - if !oldKeyExists { - return fmt.Errorf("`srcObjectKey` " + srcObjectKey + " does not exist") - } // Check if the new key already exists in the bucket newKeyExists, err := s3Ctrl.KeyExists(bucket, destObjectKey) if err != nil { @@ -150,6 +142,11 @@ func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { log.Error(configberry.LogErrorFormatter(appErr, false)) return configberry.HandleErrorResponse(c, appErr) } + if params["srcObjectKey"] == params["destObjectKey"] { + appErr := configberry.NewAppError(configberry.ValidationError, fmt.Sprintf("source `%s` and destination `%s` keys are identical; no action taken", params["srcObjectKey"], params["destObjectKey"]), nil) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) @@ -159,6 +156,18 @@ func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } + newKeyExists, err := s3Ctrl.KeyExists(bucket, params["destObjectKey"]) + if err != nil { + return err + } + + if newKeyExists { + appErr := configberry.NewAppError(configberry.ConflictError, fmt.Sprintf("%s already exists in the bucket; duplication will cause an overwrite. Please rename dest_key to a different name", params["destObjectKey"]), err) + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + + } + err = s3Ctrl.CopyObject(bucket, params["srcObjectKey"], params["destObjectKey"]) if err != nil { appErr := configberry.HandleAWSError(err, "error copying prefix") diff --git a/blobstore/object_content.go b/blobstore/object_content.go index 4866f00..f6b0144 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -12,13 +12,6 @@ import ( ) func (s3Ctrl *S3Controller) FetchObjectContent(bucket string, key string) (io.ReadCloser, error) { - keyExist, err := s3Ctrl.KeyExists(bucket, key) - if err != nil { - return nil, err - } - if !keyExist { - return nil, fmt.Errorf("object %s not found", key) - } input := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), @@ -67,7 +60,7 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { } body, err := io.ReadAll(outPutBody) if err != nil { - appErr := configberry.NewAppError(configberry.ForbiddenError, "error reading objects body", err) + appErr := configberry.NewAppError(configberry.InternalServerError, "error reading objects body", err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 745c5d0..9978b91 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -18,6 +18,9 @@ import ( func (s3Ctrl *S3Controller) GetDownloadPresignedURL(bucket, key string, expDays int) (string, error) { duration := time.Duration(expDays) * 24 * time.Hour + if _, err := s3Ctrl.GetMetaData(bucket, key); err != nil { //this is to check if the object exists or not, it will return an AWS error + return "", err + } req, _ := s3Ctrl.S3Svc.GetObjectRequest(&s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), @@ -53,21 +56,9 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } - keyExist, err := s3Ctrl.KeyExists(bucket, key) - if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error checking if object exists", err) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - if !keyExist { - appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("object %s not found", key), err) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - url, err := s3Ctrl.GetDownloadPresignedURL(bucket, key, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - appErr := configberry.HandleAWSError(err, "error getting presigned URL") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error getting presigned download URL for object %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/upload.go b/blobstore/upload.go index 3136e51..4bd9839 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -125,14 +125,41 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC // function to retrieve presigned url for a normal one time upload. You can only upload 5GB files at a time. func (s3Ctrl *S3Controller) GetUploadPresignedURL(bucket string, key string, expMin int) (string, error) { duration := time.Duration(expMin) * time.Minute - req, _ := s3Ctrl.S3Svc.PutObjectRequest(&s3.PutObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - }) + var urlStr string + var err error + if s3Ctrl.S3Mock { + // Create a temporary S3 client with the modified endpoint + //this is done so that the presigned url starts with localhost:9000 instead of + //minio:9000 which would cause an error due to cors origin policy + tempS3Svc, err := session.NewSession(&aws.Config{ + Endpoint: aws.String("http://localhost:9000"), + Region: s3Ctrl.S3Svc.Config.Region, + Credentials: s3Ctrl.S3Svc.Config.Credentials, + S3ForcePathStyle: aws.Bool(true), + }) + if err != nil { + return "", fmt.Errorf("error creating temporary s3 session: %s", err.Error()) + } - urlStr, err := req.Presign(duration) - if err != nil { - return "", err + // Generate the request using the temporary client + req, _ := s3.New(tempS3Svc).PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + urlStr, err = req.Presign(duration) + if err != nil { + return "", err + } + } else { + // Generate the request using the original client + req, _ := s3Ctrl.S3Svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + }) + urlStr, err = req.Presign(duration) + if err != nil { + return "", err + } } return urlStr, nil @@ -297,7 +324,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { err = s3Ctrl.UploadS3Obj(bucket, key, body) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error uploading S3 object", err) + appErr := configberry.HandleAWSError(err, "error uploading S3 object") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) @@ -354,10 +381,11 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } + //if the user did not provided both upload_id and part_number then we returned normal presigned URL presignedURL, err := s3Ctrl.GetUploadPresignedURL(bucket, key, bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error generating presigned URL", err) + appErr := configberry.HandleAWSError(err, "error generating presigned URL") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -391,7 +419,7 @@ func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { uploadID, err := s3Ctrl.GetMultiPartUploadID(bucket, key) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error retrieving multipart Upload ID", err) + appErr := configberry.HandleAWSError(err, "error retrieving multipart Upload ID") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -440,7 +468,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { _, err = s3Ctrl.CompleteMultipartUpload(bucket, key, req.UploadID, s3Parts) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error completing the multipart Upload for key %s", key), err) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error completing the multipart Upload for key %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/configberry/errors_handling.go b/configberry/errors_handling.go index 5e15f8d..9e39243 100644 --- a/configberry/errors_handling.go +++ b/configberry/errors_handling.go @@ -2,6 +2,7 @@ package configberry import ( "database/sql" + "errors" "fmt" "net/http" "reflect" @@ -143,22 +144,37 @@ func HandleSQLError(err error, errMsg string) *AppError { // HandleAWSError processes AWS-specific errors and returns an appropriate AppError. // referencing https://github.com/aws/aws-sdk-go/blob/70ea45043fd9021c223e79de5755bc1b4b3af0aa/models/apis/cloudformation/2010-05-15/api-2.json func HandleAWSError(err error, errMsg string) *AppError { - if aerr, ok := err.(awserr.Error); ok { - formattedMessage := fmt.Sprintf("%s: %s (AWS Error Code: %s)", errMsg, aerr.Message(), aerr.Code()) - switch aerr.Code() { + originalErr := err + var awsErr awserr.Error + + // Attempt to find the AWS error in the error chain + if errors.As(err, &awsErr) { + formattedMessage := fmt.Sprintf("%s: %s (AWS Error Code: %s)", errMsg, awsErr.Message(), awsErr.Code()) + + // Check if the original error already contains context and avoid repetition + if !strings.Contains(errMsg, awsErr.Message()) { + formattedMessage = fmt.Sprintf("%s: %s (AWS Error Code: %s)", errMsg, awsErr.Message(), awsErr.Code()) + } + switch awsErr.Code() { case "AccessDenied", "InvalidCredentials": - return NewAppError(UnauthorizedError, formattedMessage, err) - case "NotFound": - return NewAppError(NotFoundError, formattedMessage, err) - case "NotUpdatable", "InvalidRequest", "AlreadyExists", "ResourceConflict", "Throttling", "ServiceLimitExceeded", "NotStabilized", "GeneralServiceException", "NetworkFailure", "InvalidTypeConfiguration", "NonCompliant", "Unknown", "UnsupportedTarget": - return NewAppError(AWSError, formattedMessage, err) + return NewAppError(UnauthorizedError, formattedMessage, originalErr) + case "NotFound", "NoSuchKey": + return NewAppError(NotFoundError, formattedMessage, originalErr) + case "AlreadyExists", "ResourceConflict": + return NewAppError(ConflictError, formattedMessage, originalErr) + case "NotUpdatable", "InvalidRequest", "Throttling", "ServiceLimitExceeded", "NotStabilized", "GeneralServiceException", "NetworkFailure", "InvalidTypeConfiguration", "NonCompliant", "Unknown", "UnsupportedTarget": + return NewAppError(AWSError, formattedMessage, originalErr) case "ServiceInternalError", "InternalFailure", "HandlerInternalFailure": - return NewAppError(InternalServerError, formattedMessage, err) + return NewAppError(InternalServerError, formattedMessage, originalErr) + case "InvalidPart": + return NewAppError(BadRequestError, formattedMessage, originalErr) default: - return NewAppError(AWSError, formattedMessage, err) + return NewAppError(AWSError, formattedMessage, originalErr) } } - return NewAppError(AWSError, errMsg, err) + + // If no AWS error is found, return a generic AppError + return NewAppError(AWSError, errMsg, originalErr) } // CheckRequiredParams checks if the required parameters are present and returns an error if any are missing. diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index 756799c..30164a5 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -1,1639 +1,311 @@ { "info": { - "_postman_id": "0a073b9d-5085-4565-9fc1-afe9a1aa4ee6", - "name": "E2E Testing Collection", + "_postman_id": "db98bd13-6aa3-4624-80fa-66e7d7801aff", + "name": "S3 API Endpoints", + "description": "Collection to test all S3 API endpoints.", "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", "_exporter_id": "18773467", - "_collection_link": "https://universal-comet-524706.postman.co/workspace/Dewberry~fe115dcb-2f48-4ca3-a618-e462c6ac4255/collection/18773467-0a073b9d-5085-4565-9fc1-afe9a1aa4ee6?action=share&source=collection_link&creator=18773467" + "_collection_link": "https://universal-comet-524706.postman.co/workspace/Dewberry~fe115dcb-2f48-4ca3-a618-e462c6ac4255/collection/18773467-db98bd13-6aa3-4624-80fa-66e7d7801aff?action=share&source=collection_link&creator=18773467" }, "item": [ { - "name": "Ping Endpoints", + "name": "Object", "item": [ { - "name": "GET /ping", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "pm.test('ping without auth success', function () {\r", - " pm.response.to.have.status(200);\r", - "});" - ], - "type": "text/javascript" - } - } - ], - "request": { - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/ping", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "ping" - ] - } - }, - "response": [] - }, - { - "name": "GET /ping_with_auth", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "pm.test('ping with auth did not work as expected', function () {\r", - " pm.response.to.have.status(401);\r", - "});" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/ping_with_auth", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "ping_with_auth" - ] - } - }, - "response": [] - }, - { - "name": "Login", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "var jsonData = JSON.parse(responseBody);\r", - "postman.setEnvironmentVariable(\"bearer_token\", jsonData.access_token);\r", - "\r", - "pm.test(\"Logging in\", function () {\r", - " if(pm.response.code !== 200){\r", - " console.log(\"Error body: \", pm.response.text());\r", - " }\r", - " pm.response.to.have.status(200);\r", - "});\r", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "urlencoded", - "urlencoded": [ - { - "key": "username", - "value": "{{auth_username}}", - "type": "text" - }, - { - "key": "password", - "value": "{{auth_password}}", - "type": "text" - }, - { - "key": "client_id", - "value": "{{auth_client_id}}", - "type": "text" - }, - { - "key": "grant_type", - "value": "{{auth_grant_type}}", - "type": "text" - }, - { - "key": "client_secret", - "value": "{{auth_client_secret}}", - "type": "text" - } - ] - }, - "url": { - "raw": "{{auth_url}}/testing/protocol/openid-connect/token", - "host": [ - "{{auth_url}}" - ], - "path": [ - "testing", - "protocol", - "openid-connect", - "token" - ] - } - }, - "response": [] - }, - { - "name": "GET /ping_with_auth 2", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "pm.test(\"Logging in\", function () {\r", - " if(pm.response.code !== 200){\r", - " console.log(\"Error body: \", pm.response.text());\r", - " console.log('Bearer Token:', pm.environment.get('bearer_token'));\r", - " }\r", - " pm.response.to.have.status(200);\r", - "});\r", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/ping_with_auth", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "ping_with_auth" - ] - } - }, - "response": [] - }, - { - "name": "list_buckets", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "pm.test(\"Listing buckets\", function () {\r", - " if(pm.response.code !== 200){\r", - " console.log(\"Error body: \", pm.response.text());\r", - " console.log('Bearer Token:', pm.environment.get('bearer_token'));\r", - " }\r", - " pm.response.to.have.status(200);\r", - "});\r", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/list_buckets", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "list_buckets" - ] - } - }, - "response": [] - } - ] - }, - { - "name": "successful run", - "item": [ - { - "name": "1/object/upload", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, - "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}&override={{e2eoverride}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "upload" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "override", - "value": "{{e2eoverride}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "2/object/exists", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/exists?key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "exists" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "3/object/metadata", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/metadata?key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "metadata" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "4/object/content", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/content?key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "content" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "5/object/download", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/download?key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "download" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "6/prefix/size", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/size?prefix={{e2ePathToObj}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "size" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "7/object/move", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "PUT", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/move?src_key={{e2ePathToObj}}{{e2eObjName}}&dest_key={{e2ePathToObj}}{{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "move" - ], - "query": [ - { - "key": "src_key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "dest_key", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "8/prefix/list", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/list?prefix={{e2ePathToObj}}&delimiter={{e2eoverride}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "list" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "delimiter", - "value": "{{e2eoverride}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "9/prefix/list_with_details", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/list_with_details?prefix={{e2ePathToObj}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "list_with_details" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "10.2/prefix/download_script", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/download/script?prefix={{e2ePathToObj}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "download", - "script" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "11/object/upload 2", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, - "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}&override={{e2eoverride}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "upload" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "override", - "value": "{{e2eoverride}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "12/prefix/move", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "PUT", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/move?src_prefix={{e2ePathToObj}}{{e2ePathToObj}}&dest_prefix={{e2ePathToObj}}{{e2ePathToObj}}{{e2ePathToObj}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "move" - ], - "query": [ - { - "key": "src_prefix", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}" - }, - { - "key": "dest_prefix", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "13/object/delete", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/delete?key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "delete" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "14/prefix/delete", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/prefix/delete?prefix={{e2ePathToObj}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "delete" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "15/object/upload 3", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, - "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}1&override={{e2eoverride}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "upload" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}1" - }, - { - "key": "override", - "value": "{{e2eoverride}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "16/object/upload 4", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, - "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}2&override={{e2eoverride}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "upload" - ], - "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}2" - }, - { - "key": "override", - "value": "{{e2eoverride}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "17/delete_keys", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"keys\": [\"{{e2ePathToObj}}{{e2eObjName}}1\", \"{{e2ePathToObj}}{{e2eObjName}}2\"]\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, - "url": { - "raw": "{{s3_api_root_url}}/delete_keys?bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "delete_keys" - ], - "query": [ - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "18/object/presigned_upload", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/presigned_upload?key={{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "presigned_upload" - ], - "query": [ - { - "key": "key", - "value": "{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "19/object/multipart_upload_id", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "var jsonData = pm.response.json();\r", - "pm.environment.set(\"uploadId\", jsonData);\r", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/multipart_upload_id?key={{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "multipart_upload_id" - ], - "query": [ - { - "key": "key", - "value": "{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "19/object/presigned_upload_multipart", - "event": [ + "name": "upload object", + "item": [ { - "listen": "test", - "script": { - "exec": [ - "var jsonData = pm.response.json()//.replace('minio', 'localhost');\r", - "pm.environment.set(\"presignedUploadUrl\", jsonData);\r", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ + "name": "Upload Object - 200,400", + "event": [ { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if override is missing\", function () {", + " if(pm.request.url.query.indexOf('override') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if file is missing\", function () {", + " if(pm.request.body.file === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" + ], + "type": "text/javascript", + "packages": {} + } } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/presigned_upload?key={{e2eObjName}}&bucket={{bucket}}&upload_id={{uploadId}}&part_number=1", - "host": [ - "{{s3_api_root_url}}" ], - "path": [ - "object", - "presigned_upload" - ], - "query": [ - { - "key": "key", - "value": "{{e2eObjName}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - }, - { - "key": "upload_id", - "value": "{{uploadId}}" + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "file", + "file": { + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + } }, - { - "key": "part_number", - "value": "1" + "url": { + "raw": "{{baseUrl}}/object/upload?key={{key}}&bucket={{bucket}}&override=false", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "upload" + ], + "query": [ + { + "key": "key", + "value": "{{key}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "override", + "value": "false" + } + ] } - ] - } - }, - "response": [] - }, - { - "name": "presigned_url_test", - "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } + }, + "response": [] }, { - "listen": "test", - "script": { - "exec": [ - "// Extract ETag from the response header\r", - "var etag = pm.response.headers.get(\"ETag\");\r", - "\r", - "\r", - "// Remove extra quotation marks if present\r", - "etag = etag.replace(/\"/g, '');\r", - "\r", - "\r", - "// Set the ETag as an environment variable\r", - "pm.environment.set(\"etag\", etag);\r", - "" - ], - "type": "text/javascript" - } - } - ], - "protocolProfileBehavior": { - "disabledSystemHeaders": {} - }, - "request": { - "method": "PUT", - "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, - "url": { - "raw": "{{presignedUploadUrl}}", - "host": [ - "{{presignedUploadUrl}}" - ] - } - }, - "response": [] - }, - { - "name": "20/object/complete_multipart_upload", - "request": { - "auth": { - "type": "bearer", - "bearer": [ + "name": "Upload Object - 409", + "event": [ { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"uploadId\": \"{{uploadId}}\",\r\n \"parts\": [{\r\n \"partNumber\":1,\r\n \"eTag\":\"{{etag}}\"\r\n }]\r\n}\r\n", - "options": { - "raw": { - "language": "json" + "listen": "test", + "script": { + "exec": [ + "", + "pm.test(\"Status code is 409 if key is a conflict\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(409);", + " }", + "});" + ], + "type": "text/javascript", + "packages": {} + } } - } - }, - "url": { - "raw": "{{s3_api_root_url}}/object/complete_multipart_upload?key={{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "complete_multipart_upload" ], - "query": [ - { - "key": "key", - "value": "{{e2eObjName}}" + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "file", + "file": { + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + } }, - { - "key": "bucket", - "value": "{{bucket}}" + "url": { + "raw": "{{baseUrl}}/object/upload?key={{key}}&bucket={{bucket}}&override=false", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "upload" + ], + "query": [ + { + "key": "key", + "value": "{{key}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "override", + "value": "false" + } + ] } - ] + }, + "response": [] } - }, - "response": [] + ] }, { - "name": "20/object/multipart_upload_id", - "event": [ + "name": "Get Metadata", + "item": [ { - "listen": "test", - "script": { - "exec": [ - "var jsonData = pm.response.json();\r", - "pm.environment.set(\"uploadId\", jsonData);\r", - "" - ], - "type": "text/javascript" - } - } - ], - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/multipart_upload_id?key={{e2eObjName}}&bucket={{bucket}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "multipart_upload_id" - ], - "query": [ + "name": "Get Object Metadata - 200, 400", + "event": [ { - "key": "key", - "value": "{{e2eObjName}}" + "listen": "prerequest", + "script": { + "exec": [ + "" + ], + "type": "text/javascript", + "packages": {} + } }, { - "key": "bucket", - "value": "{{bucket}}" - } - ] - } - }, - "response": [] - }, - { - "name": "21/object/abort_multipart_upload", - "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" + ], + "type": "text/javascript", + "packages": {} + } } - ] - }, - "method": "POST", - "header": [], - "url": { - "raw": "{{s3_api_root_url}}/object/abort_multipart_upload?key={{e2eObjName}}&bucket={{bucket}}&upload_id={{uploadId}}", - "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "object", - "abort_multipart_upload" ], - "query": [ - { - "key": "key", - "value": "{{e2eObjName}}" + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "{{baseUrl}}/object/metadata?bucket={{bucket}}&key={{key}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "metadata" + ], + "query": [ + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "key", + "value": "{{key}}" + } + ] }, + "description": "Get metadata for a valid object key." + }, + "response": [] + }, + { + "name": "Get Object Metadata - 404", + "event": [ { - "key": "bucket", - "value": "{{bucket}}" + "listen": "prerequest", + "script": { + "exec": [ + "" + ], + "type": "text/javascript", + "packages": {} + } }, { - "key": "upload_id", - "value": "{{uploadId}}" + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 404\", function () {", + " pm.response.to.have.status(404);", + "});", + "", + "" + ], + "type": "text/javascript", + "packages": {} + } } - ] + ], + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "{{baseUrl}}/object/metadata?bucket={{bucket}}&key={{non_existent_key}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "metadata" + ], + "query": [ + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "key", + "value": "{{non_existent_key}}" + } + ] + }, + "description": "Get metadata for a non-existent key." + }, + "response": [] } - }, - "response": [] - } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } + ] }, { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('successfully executed endpoint', function () {", - " pm.response.to.have.status(200);", - "});" - ] - } - } - ] - }, - { - "name": "unsuccesful runs", - "item": [ - { - "name": "SuccesfulTestingInput", + "name": "Get Content", "item": [ { - "name": "1/object/upload", + "name": "Get Content - 200, 400", "event": [ { "listen": "test", "script": { "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], "type": "text/javascript" } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", + "method": "GET", "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}&override={{e2eoverride}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/content?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "upload" + "content" ], "query": [ { "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "override", - "value": "{{e2eoverride}}" + "value": "{{key}}" }, { "key": "bucket", @@ -1645,15 +317,19 @@ "response": [] }, { - "name": "1/object/upload Copy", + "name": "Get Content - 404", "event": [ { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 404\", function () {", + " pm.response.to.have.status(404);", + "});", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } }, { @@ -1662,50 +338,27 @@ "exec": [ "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", + "method": "GET", "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] - }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}2&override={{e2eoverride}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/content?key={{non_existent_key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "upload" + "content" ], "query": [ { "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}2" - }, - { - "key": "override", - "value": "{{e2eoverride}}" + "value": "{{non_existent_key}}" }, { "key": "bucket", @@ -1716,100 +369,60 @@ }, "response": [] } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } - }, - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('Succesfully uploaded test data', function () {", - " pm.response.to.have.status(200);", - "});" - ] - } - } ] }, { - "name": "UnprocessableEntity", + "name": "Move Object", "item": [ { - "name": "1/object/upload", + "name": "Move Object - 404", "event": [ { "listen": "test", "script": { "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" + "pm.test(\"Status code is 404 for object not found\", function () {", + " pm.response.to.have.status(404);", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] + "mode": "raw", + "raw": "{\"src_key\": \"{{src_key}}\", \"dest_key\": \"{{dest_key}}\"}" }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?override={{e2eoverride}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/move?bucket={{bucket}}&src_key={{non_existent_key}}&dest_key={{key}}/{{key}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "upload" + "move" ], "query": [ { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true + "key": "bucket", + "value": "{{bucket}}" }, { - "key": "override", - "value": "{{e2eoverride}}" + "key": "src_key", + "value": "{{non_existent_key}}" }, { - "key": "bucket", - "value": "{{bucket}}" + "key": "dest_key", + "value": "{{key}}/{{key}}" } ] } @@ -1817,75 +430,54 @@ "response": [] }, { - "name": "11/object/upload 2", + "name": "Move Object - 400", "event": [ { "listen": "test", "script": { "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "let keyValue = pm.environment.get(\"e2ekey\"); \r", - "if (keyValue === \"invalid\") {\r", - " pm.request.url.query.remove(\"override\");\r", - "}" + "pm.test(\"Status code is 400 for identical keys\", function () {", + " pm.response.to.have.status(400);", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] + "mode": "raw", + "raw": "{\"src_key\": \"{{src_key}}\", \"dest_key\": \"{{dest_key}}\"}" }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/move?bucket={{bucket}}&src_key={{key}}/{{key}}&dest_key={{key}}/{{key}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "upload" + "move" ], "query": [ { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" + "key": "bucket", + "value": "{{bucket}}" }, { - "key": "override", - "value": "{{e2eoverride}}", - "disabled": true + "key": "src_key", + "value": "{{key}}/{{key}}" }, { - "key": "bucket", - "value": "{{bucket}}" + "key": "dest_key", + "value": "{{key}}/{{key}}" } ] } @@ -1893,73 +485,125 @@ "response": [] }, { - "name": "15/object/upload 3", + "name": "Move Object - 200,400", "event": [ { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if src_key is missing\", function () {", + " if(pm.request.url.query.indexOf('src_key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if dest_key is missing\", function () {", + " if(pm.request.url.query.indexOf('dest_key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" } + ], + "body": { + "mode": "raw", + "raw": "{\"src_key\": \"{{src_key}}\", \"dest_key\": \"{{dest_key}}\"}" }, + "url": { + "raw": "{{baseUrl}}/object/move?bucket={{bucket}}&src_key={{key}}&dest_key={{prefix}}{{key}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "move" + ], + "query": [ + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "src_key", + "value": "{{key}}" + }, + { + "key": "dest_key", + "value": "{{prefix}}{{key}}" + } + ] + } + }, + "response": [] + }, + { + "name": "Move Object - 409", + "event": [ { - "listen": "prerequest", + "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 400 for conflict with the dest_key\", function () {", + " pm.response.to.have.status(409);", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", - "header": [], + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": "missing_huc8s.xlsx" - } - ] + "mode": "raw", + "raw": "{\"src_key\": \"{{src_key}}\", \"dest_key\": \"{{dest_key}}\"}" }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/move?bucket={{bucket}}&src_key={{key}}&dest_key={{prefix}}{{key}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "upload" + "move" ], "query": [ { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}1", - "disabled": true + "key": "bucket", + "value": "{{bucket}}" }, { - "key": "override", - "value": "{{e2eoverride}}", - "disabled": true + "key": "src_key", + "value": "{{key}}" }, { - "key": "bucket", - "value": "{{bucket}}" + "key": "dest_key", + "value": "{{prefix}}{{key}}" } ] } @@ -1967,100 +611,112 @@ "response": [] }, { - "name": "2/object/exists", + "name": "Move Object - proc", "event": [ { "listen": "test", "script": { "exec": [ - "" + "//no script test this is a procedural call for further endpoints", + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\"src_key\": \"{{src_key}}\", \"dest_key\": \"{{dest_key}}\"}" }, - "method": "GET", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/exists?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/move?bucket={{bucket}}&src_key={{prefix}}{{key}}&dest_key={{key}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "exists" + "move" ], "query": [ - { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true - }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "src_key", + "value": "{{prefix}}{{key}}" + }, + { + "key": "dest_key", + "value": "{{key}}" } ] } }, "response": [] - }, + } + ] + }, + { + "name": "Download Object", + "item": [ { - "name": "8/prefix/list", + "name": "Download Object - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/list?bucket=", + "raw": "{{baseUrl}}/object/download?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "list" + "object", + "download" ], "query": [ { - "key": "prefix", - "value": "{{e2ePathToObj}}", - "disabled": true + "key": "key", + "value": "{{key}}" }, { - "key": "delimiter", - "value": "{{e2eoverride}}", - "disabled": true + "key": "bucket", + "value": "{{bucket}}" } ] } @@ -2068,45 +724,38 @@ "response": [] }, { - "name": "6/prefix/size", + "name": "Download Object - 404", "event": [ { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 404 for object not found\", function () {", + " pm.response.to.have.status(404);", + "});", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/size?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/download?key={{non_existent_key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "size" + "object", + "download" ], "query": [ { - "key": "prefix", - "value": "{{e2ePathToObj}}", - "disabled": true + "key": "key", + "value": "{{non_existent_key}}" }, { "key": "bucket", @@ -2116,50 +765,50 @@ } }, "response": [] - }, + } + ] + }, + { + "name": "Delete Object", + "item": [ { - "name": "3/object/metadata", + "name": "Delete Object - 404", "event": [ { "listen": "test", "script": { "exec": [ - "" + "", + "pm.test(\"Status code is 404 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(404);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", + "method": "DELETE", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/metadata?bukcet={{bucket}}", + "raw": "{{baseUrl}}/object/delete?key={{non_existent_key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "metadata" + "delete" ], "query": [ { "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true + "value": "{{non_existent_key}}" }, { - "key": "bukcet", + "key": "bucket", "value": "{{bucket}}" } ] @@ -2168,45 +817,42 @@ "response": [] }, { - "name": "4/object/content", + "name": "Delete Object - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "GET", + "method": "DELETE", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/content?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/delete?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "content" + "delete" ], "query": [ { "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true + "value": "{{key}}" }, { "key": "bucket", @@ -2216,47 +862,54 @@ } }, "response": [] - }, + } + ] + }, + { + "name": "Object Exists", + "item": [ { - "name": "5/object/download", + "name": "Check Object Exists - 200, 400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/download?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/exists?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "download" + "exists" ], "query": [ { "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true + "value": "{{key}}" }, { "key": "bucket", @@ -2266,51 +919,59 @@ } }, "response": [] - }, + } + ] + }, + { + "name": "Presigned upload", + "item": [ { - "name": "17/delete_keys", + "name": "Get Presigned Upload URL - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Presigned URL is returned\", function () {", + " var presignedUrl = pm.response.text().trim();", + " pm.environment.set(\"presigned_url\", presignedUrl);", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "GET", "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"keys\": []\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, "url": { - "raw": "{{s3_api_root_url}}/delete_keys?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/presigned_upload?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "delete_keys" + "object", + "presigned_upload" ], "query": [ + { + "key": "key", + "value": "{{key}}" + }, { "key": "bucket", "value": "{{bucket}}" @@ -2321,100 +982,117 @@ "response": [] }, { - "name": "14/prefix/delete", + "name": "use upload presigned URL -200", "event": [ { - "listen": "test", + "listen": "prerequest", "script": { "exec": [ + "// Get the presigned URL from the environment variable\r", + "var presignedUrl = pm.environment.get(\"presigned_url\");\r", + "\r", + "// Remove surrounding quotes if present\r", + "if (presignedUrl.startsWith('\"') && presignedUrl.endsWith('\"')) {\r", + " presignedUrl = presignedUrl.substring(1, presignedUrl.length - 1);\r", + "}\r", + "\r", + "// Set the trimmed presigned URL back to the environment variable\r", + "pm.environment.set(\"presigned_url\", presignedUrl);\r", + "\r", + "// Set the URL of the request to the trimmed presigned URL\r", + "pm.request.url.update(presignedUrl);\r", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} + } + }, + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {\r", + " pm.response.to.have.status(200);\r", + "});" + ], + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "PUT", "header": [], + "body": { + "mode": "file", + "file": { + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + } + }, "url": { - "raw": "{{s3_api_root_url}}/prefix/delete?bucket={{bucket}}", + "raw": "{{presigned_url}}", "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "delete" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}", - "disabled": true - }, - { - "key": "bucket", - "value": "{{bucket}}" - } + "{{presigned_url}}" ] } }, "response": [] }, { - "name": "7/object/move", + "name": "Get Multipart Upload ID - 200,400", "event": [ { "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "", + "var jsonData = pm.response.json();", + "pm.environment.set(\"uploadId\", jsonData);" + ], + "type": "text/javascript", + "packages": {} + } + }, + { + "listen": "prerequest", "script": { "exec": [ "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "PUT", + "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/move?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/multipart_upload_id?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "move" + "multipart_upload_id" ], "query": [ { - "key": "src_key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true - }, - { - "key": "dest_key", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true + "key": "key", + "value": "{{key}}" }, { "key": "bucket", @@ -2426,49 +1104,50 @@ "response": [] }, { - "name": "13/object/delete", + "name": "Get Presigned Upload URL - 200,400 Copy", "event": [ { "listen": "test", "script": { "exec": [ + "", + "var jsonData = pm.response.json()//.replace('minio', 'localhost');", + "pm.environment.set(\"presignedUploadUrl\", jsonData);", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/delete?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/presigned_upload?key={{key}}&bucket={{bucket}}&upload_id={{uploadId}}&part_number=1", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "delete" + "presigned_upload" ], "query": [ { "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}", - "disabled": true + "value": "{{key}}" }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "upload_id", + "value": "{{uploadId}}" + }, + { + "key": "part_number", + "value": "1" } ] } @@ -2476,101 +1155,120 @@ "response": [] }, { - "name": "10.2/prefix/download/script", + "name": "presigned_url_test", "event": [ + { + "listen": "prerequest", + "script": { + "exec": [ + "" + ], + "type": "text/javascript", + "packages": {} + } + }, { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 200\", function () {\r", + " pm.response.to.have.status(200);\r", + "});\r", + "\r", + "// Extract ETag from the response header\r", + "var etag = pm.response.headers.get(\"ETag\");\r", + "\r", + "\r", + "// Remove extra quotation marks if present\r", + "etag = etag.replace(/\"/g, '');\r", + "\r", + "\r", + "// Set the ETag as an environment variable\r", + "pm.environment.set(\"etag\", etag);\r", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], + "protocolProfileBehavior": { + "disabledSystemHeaders": {} + }, "request": { - "auth": { - "type": "bearer", - "bearer": [ + "method": "PUT", + "header": [], + "body": { + "mode": "formdata", + "formdata": [ { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" + "key": "", + "type": "file", + "src": "FFRMS/missing_huc8s.xlsx" } ] }, - "method": "GET", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/download/script?bucket={{bucket}}", + "raw": "{{presignedUploadUrl}}", "host": [ - "{{s3_api_root_url}}" - ], - "path": [ - "prefix", - "download", - "script" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}", - "disabled": true - }, - { - "key": "bucket", - "value": "{{bucket}}" - } + "{{presignedUploadUrl}}" ] } }, "response": [] }, { - "name": "12/prefix/move", + "name": "Complete the Multipart Upload", "event": [ + { + "listen": "prerequest", + "script": { + "exec": [ + "" + ], + "type": "text/javascript", + "packages": {} + } + }, { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 200\", function () {\r", + " pm.response.to.have.status(200);\r", + "});\r", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" + "method": "POST", + "header": [], + "body": { + "mode": "raw", + "raw": "{\r\n \"uploadId\": \"{{uploadId}}\",\r\n \"parts\": [{\r\n \"partNumber\":1,\r\n \"eTag\":\"{{etag}}\"\r\n }]\r\n}\r\n", + "options": { + "raw": { + "language": "json" } - ] + } }, - "method": "PUT", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/move?bucket={{bucket}}", + "raw": "{{baseUrl}}/object/complete_multipart_upload?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "move" + "object", + "complete_multipart_upload" ], "query": [ { - "key": "src_prefix", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}", - "disabled": true - }, - { - "key": "dest_prefix", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}{{e2ePathToObj}}", - "disabled": true + "key": "key", + "value": "{{key}}" }, { "key": "bucket", @@ -2580,73 +1278,64 @@ } }, "response": [] - } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } }, { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('Unprocessable entity executed endpoint', function () {", - " pm.response.to.have.status(422);", - "});" - ] - } - } - ] - }, - { - "name": "NotFound", - "item": [ - { - "name": "14/prefix/delete", + "name": "Get Multipart Upload ID - config", "event": [ { - "listen": "test", + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "", + "pm.test(\"Upload ID is returned\", function () {", + " var uploadId = pm.response.text().trim();", + " pm.environment.set(\"upload_id\", uploadId.replace(/\"/g, ''));", + "});" + ], + "type": "text/javascript", + "packages": {} + } + }, + { + "listen": "prerequest", "script": { "exec": [ "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/delete?prefix=invalid&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/multipart_upload_id?key={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "delete" + "object", + "multipart_upload_id" ], "query": [ { - "key": "prefix", - "value": "invalid" + "key": "key", + "value": "{{key}}" }, { "key": "bucket", @@ -2658,97 +1347,131 @@ "response": [] }, { - "name": "13/object/delete", + "name": "Abort Multipart Upload", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if key is missing\", function () {", + " if(pm.request.url.query.indexOf('key') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if upload_id is missing\", function () {", + " if(pm.request.url.query.indexOf('upload_id') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\"key\": \"{{key}}\", \"bucket\": \"{{bucket}}\", \"uploadID\": \"{{uploadID}}\"}" }, - "method": "DELETE", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/delete?key=invalid&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/abort_multipart_upload?key={{key}}&bucket={{bucket}}&upload_id={{upload_id}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "object", - "delete" + "abort_multipart_upload" ], "query": [ { "key": "key", - "value": "invalid" + "value": "{{key}}" }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "upload_id", + "value": "{{upload_id}}" } ] } }, "response": [] - }, + } + ] + } + ] + }, + { + "name": "Prefix", + "item": [ + { + "name": "List Prefix", + "item": [ { - "name": "4/object/content", + "name": "List Prefix - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/content?key=invalid&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/list?bucket={{bucket}}&delimiter=true", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "content" + "prefix", + "list" ], "query": [ { - "key": "key", - "value": "invalid" + "key": "prefix", + "value": "{{prefix}}", + "disabled": true }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "delimiter", + "value": "true" } ] } @@ -2756,97 +1479,96 @@ "response": [] }, { - "name": "17/delete_keys", + "name": "List Prefix - 418", "event": [ { "listen": "test", "script": { "exec": [ - "" + "", + "pm.test(\"Status code is 418 if prefix is poimting to an object\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(418);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "GET", "header": [], - "body": { - "mode": "raw", - "raw": "{\r\n \"keys\": [\"invalid\"]\r\n}", - "options": { - "raw": { - "language": "json" - } - } - }, "url": { - "raw": "{{s3_api_root_url}}/delete_keys?bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/list?prefix={{key}}&bucket={{bucket}}&delimiter=true", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "delete_keys" + "prefix", + "list" ], "query": [ + { + "key": "prefix", + "value": "{{key}}" + }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "delimiter", + "value": "true" } ] } }, "response": [] - }, + } + ] + }, + { + "name": "List Prefix With Detail", + "item": [ { - "name": "5/object/download", + "name": "List Prefix with Details - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/download?key=invalid&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/list_with_details?prefix={{prefix}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "download" + "prefix", + "list_with_details" ], "query": [ { - "key": "key", - "value": "invalid" + "key": "prefix", + "value": "{{prefix}}" }, { "key": "bucket", @@ -2858,97 +1580,101 @@ "response": [] }, { - "name": "3/object/metadata", + "name": "List Prefix - 418", "event": [ { "listen": "test", "script": { "exec": [ - "" + "", + "pm.test(\"Status code is 418 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(418);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/metadata?key=invalid&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/list_with_details?bucket={{bucket}}&prefix={{key}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "metadata" + "prefix", + "list_with_details" ], "query": [ - { - "key": "key", - "value": "invalid" - }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "prefix", + "value": "{{key}}" } ] } }, "response": [] - }, + } + ] + }, + { + "name": "Generate Download Script", + "item": [ { - "name": "7/object/move", + "name": "Generate Download Script", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if prefix is missing\", function () {", + " if(pm.request.url.query.indexOf('prefix') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "", + "", + "//unable to test size overload" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "PUT", + "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/move?src_key=invalid&dest_key={{e2ePathToObj}}{{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/download/script?prefix={{prefix}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "move" + "prefix", + "download", + "script" ], "query": [ { - "key": "src_key", - "value": "invalid" - }, - { - "key": "dest_key", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}{{e2eObjName}}" + "key": "prefix", + "value": "{{prefix}}" }, { "key": "bucket", @@ -2958,54 +1684,64 @@ } }, "response": [] - }, + } + ] + }, + { + "name": "Move Prefix", + "item": [ { - "name": "12/prefix/move", + "name": "Upload Object - config", "event": [ { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "file", + "file": { + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + } }, - "method": "PUT", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/move?src_prefix=invalid&dest_prefix={{e2ePathToObj}}{{e2ePathToObj}}{{e2ePathToObj}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/object/upload?key={{prefix}}{{key}}&bucket={{bucket}}&override=false", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "move" + "object", + "upload" ], "query": [ { - "key": "src_prefix", - "value": "invalid" - }, - { - "key": "dest_prefix", - "value": "{{e2ePathToObj}}{{e2ePathToObj}}{{e2ePathToObj}}" + "key": "key", + "value": "{{prefix}}{{key}}" }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "override", + "value": "false" } ] } @@ -3013,124 +1749,113 @@ "response": [] }, { - "name": "6/size", + "name": "Move Prefix - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if src_prefix is missing\", function () {", + " if(pm.request.url.query.indexOf('src_prefix') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if dest_prefix is missing\", function () {", + " if(pm.request.url.query.indexOf('dest_prefix') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "", + "//might need more testing when this is multibucketd " ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "" }, - "method": "GET", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/size?prefix=invalid&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/move?bucket={{bucket}}&src_prefix={{prefix}}&dest_prefix={{prefix}}{{prefix}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ "prefix", - "size" + "move" ], "query": [ - { - "key": "prefix", - "value": "invalid" - }, { "key": "bucket", "value": "{{bucket}}" + }, + { + "key": "src_prefix", + "value": "{{prefix}}" + }, + { + "key": "dest_prefix", + "value": "{{prefix}}{{prefix}}" } ] } }, "response": [] } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } - }, - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('successfully attempted a not found', function () {", - " pm.response.to.have.status(404);", - "});" - ] - } - } ] }, { - "name": "BadRequest", + "name": "Get Prefix Size", "item": [ { - "name": "/object/move", + "name": "Get Size - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if prefix is missing\", function () {", + " if(pm.request.url.query.indexOf('prefix') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], "type": "text/javascript" } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "PUT", + "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/move?src_key={{e2ePathToObj}}{{e2eObjName}}&dest_key={{e2ePathToObj}}{{e2eObjName}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/size?prefix={{prefix}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "move" + "prefix", + "size" ], "query": [ { - "key": "src_key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "dest_key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" + "key": "prefix", + "value": "{{prefix}}" }, { "key": "bucket", @@ -3142,49 +1867,41 @@ "response": [] }, { - "name": "17/delete_keys", + "name": "Get Size - 419", "event": [ { "listen": "test", "script": { "exec": [ - "" + "\r", + "pm.test(\"Status code is 418 if prefix is poimting to an object\", function () {\r", + " if(pm.request.url.query.indexOf('bucket') === -1) {\r", + " pm.response.to.have.status(418);\r", + " }\r", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "GET", "header": [], - "body": { - "mode": "raw", - "raw": "key:{\"stuff,\"}", - "options": { - "raw": { - "language": "json" - } - } - }, "url": { - "raw": "{{s3_api_root_url}}/delete_keys?bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/size?prefix={{key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "delete_keys" + "prefix", + "size" ], "query": [ + { + "key": "prefix", + "value": "{{key}}" + }, { "key": "bucket", "value": "{{bucket}}" @@ -3195,68 +1912,40 @@ "response": [] }, { - "name": "1/object/upload Copy", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - }, + "name": "Get Size - 404", + "event": [ { - "listen": "prerequest", + "listen": "test", "script": { "exec": [ - "" + "\r", + "pm.test(\"Status code is 404 if prefix is not found\", function () {\r", + " if(pm.request.url.query.indexOf('bucket') === -1) {\r", + " pm.response.to.have.status(404);\r", + " }\r", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", + "method": "GET", "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "value": "", - "type": "text", - "disabled": true - } - ] - }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}&override={{e2eoverride}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/size?prefix={{prefix}}22&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "upload" + "prefix", + "size" ], "query": [ { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "override", - "value": "{{e2eoverride}}" + "key": "prefix", + "value": "{{prefix}}22" }, { "key": "bucket", @@ -3267,76 +1956,53 @@ }, "response": [] } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } - }, - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('Bad Request Success', function () {", - " pm.response.to.have.status(400);", - "});" - ] - } - } ] }, { - "name": "conflict", + "name": "Delete Prefix", "item": [ { - "name": "/object/move 2", + "name": "Delete Prefix - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if prefix is missing\", function () {", + " if(pm.request.url.query.indexOf('prefix') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "PUT", + "method": "DELETE", "header": [], "url": { - "raw": "{{s3_api_root_url}}/object/move?src_key={{e2ePathToObj}}{{e2eObjName}}&dest_key={{e2ePathToObj}}{{e2eObjName}}2&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/delete?prefix={{prefix}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "move" + "prefix", + "delete" ], "query": [ { - "key": "src_key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "dest_key", - "value": "{{e2ePathToObj}}{{e2eObjName}}2" + "key": "prefix", + "value": "{{prefix}}" }, { "key": "bucket", @@ -3348,70 +2014,39 @@ "response": [] }, { - "name": "1/object/upload", + "name": "Delete Prefix - 404", "event": [ { "listen": "test", "script": { "exec": [ - "" - ], - "type": "text/javascript" - } - }, - { - "listen": "prerequest", - "script": { - "exec": [ - "" + "pm.test(\"Status code is 404 if bucket is missing\", function () {\r", + " if(pm.request.url.query.indexOf('bucket') === -1) {\r", + " pm.response.to.have.status(404);\r", + " }\r", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "POST", + "method": "DELETE", "header": [], - "body": { - "mode": "formdata", - "formdata": [ - { - "key": "", - "type": "file", - "src": [ - "missing_huc8s.xlsx", - "missing_huc8s.xlsx" - ] - } - ] - }, "url": { - "raw": "{{s3_api_root_url}}/object/upload?key={{e2ePathToObj}}{{e2eObjName}}&override={{e2eoverride}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/prefix/delete?prefix={{non_existent_key}}&bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "object", - "upload" + "prefix", + "delete" ], "query": [ { - "key": "key", - "value": "{{e2ePathToObj}}{{e2eObjName}}" - }, - { - "key": "override", - "value": "{{e2eoverride}}" + "key": "prefix", + "value": "{{non_existent_key}}" }, { "key": "bucket", @@ -3422,77 +2057,58 @@ }, "response": [] } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } - }, - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('conflict Success', function () {", - " pm.response.to.have.status(409);", - "});" - ] - } - } ] - }, + } + ] + }, + { + "name": "Universal", + "item": [ { - "name": "TeaPot", + "name": "Delete Keys", "item": [ { - "name": "8/prefix/list", + "name": "Delete Keys - 200,400", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "pm.test(\"Status code is 400 if bucket is missing\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(400);", + " }", + "});" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "DELETE", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\"keys\": [\"{{key}}\"]}" }, - "method": "GET", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/list?prefix={{e2ePathToObj}}{{e2eObjName}}2&delimiter={{e2eoverride}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/delete_keys?bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "list" + "delete_keys" ], "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}{{e2eObjName}}2" - }, - { - "key": "delimiter", - "value": "{{e2eoverride}}" - }, { "key": "bucket", "value": "{{bucket}}" @@ -3503,45 +2119,43 @@ "response": [] }, { - "name": "9/prefix/list_with_details", + "name": "Delete Keys - 400", "event": [ { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 400\", function () {", + " pm.response.to.have.status(400);", + "});", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "DELETE", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\"keys\": []}" }, - "method": "GET", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/list_with_details?prefix={{e2ePathToObj}}{{e2eObjName}}2&bucket={{bucket}}", + "raw": "{{baseUrl}}/delete_keys?bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "list_with_details" + "delete_keys" ], "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}{{e2eObjName}}2" - }, { "key": "bucket", "value": "{{bucket}}" @@ -3552,45 +2166,43 @@ "response": [] }, { - "name": "6/prefix/size", + "name": "Delete Keys - 404", "event": [ { "listen": "test", "script": { "exec": [ + "pm.test(\"Status code is 404\", function () {", + " pm.response.to.have.status(404);", + "});", "" ], - "type": "text/javascript" + "type": "text/javascript", + "packages": {} } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] + "method": "DELETE", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\"keys\":[\"{{non_existent_key}}\"]}" }, - "method": "GET", - "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/size?prefix={{e2ePathToObj}}{{e2eObjName}}2&bucket={{bucket}}", + "raw": "{{baseUrl}}/delete_keys?bucket={{bucket}}", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "size" + "delete_keys" ], "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}{{e2eObjName}}2" - }, { "key": "bucket", "value": "{{bucket}}" @@ -3600,77 +2212,41 @@ }, "response": [] } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } - }, - { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "pm.test('TeaPot error Success', function () {", - " pm.response.to.have.status(418);", - "});" - ] - } - } ] - }, + } + ] + }, + { + "name": "Multi-Bucket", + "item": [ { - "name": "clean up", + "name": "List Buckets", "item": [ { - "name": "14/prefix/delete", + "name": "List Buckets", "event": [ { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});" ], "type": "text/javascript" } } ], "request": { - "auth": { - "type": "bearer", - "bearer": [ - { - "key": "token", - "value": "{{bearer_token}}", - "type": "string" - } - ] - }, - "method": "DELETE", + "method": "GET", "header": [], "url": { - "raw": "{{s3_api_root_url}}/prefix/delete?prefix={{e2ePathToObj}}&bucket={{bucket}}", + "raw": "{{baseUrl}}/list_buckets", "host": [ - "{{s3_api_root_url}}" + "{{baseUrl}}" ], "path": [ - "prefix", - "delete" - ], - "query": [ - { - "key": "prefix", - "value": "{{e2ePathToObj}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - } + "list_buckets" ] } }, @@ -3678,33 +2254,58 @@ } ] } - ], - "event": [ - { - "listen": "prerequest", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } - }, + ] + }, + { + "name": "Auth", + "item": [ { - "listen": "test", - "script": { - "type": "text/javascript", - "exec": [ - "" - ] - } + "name": "Check User Permission", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "{{baseUrl}}/check_user_permission?bucket={{bucket}}&prefix={{prefix}}&operation={{operation}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "check_user_permission" + ], + "query": [ + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "prefix", + "value": "{{prefix}}" + }, + { + "key": "operation", + "value": "{{operation}}" + } + ] + } + }, + "response": [] } ] } - ], - "variable": [ - { - "key": "presignedUploadUrl", - "value": "\"\"" - } ] } \ No newline at end of file diff --git a/e2e-test/e2eEnv.json b/e2e-test/e2eEnv.json index f5b8e48..a2cc7b6 100644 --- a/e2e-test/e2eEnv.json +++ b/e2e-test/e2eEnv.json @@ -3,97 +3,73 @@ "name": "S3API", "values": [ { - "key": "s3_api_root_url", + "key": "baseUrl", "value": "localhost:5005", "type": "default", "enabled": true }, { - "key": "auth_url", - "value": "https://auth.dewberryanalytics.com/auth/realms", + "key": "s3_api_root_url", + "value": "localhost:5005", "type": "default", "enabled": true }, { - "key": "auth_username", - "value": "akopti@dewberry.com", + "key": "url", + "value": "localhost:5005", "type": "default", "enabled": true }, { - "key": "auth_password", - "value": "{{auth_password}}", - "type": "secret", - "enabled": true - }, - { - "key": "auth_client_id", - "value": "s3api", + "key": "bucket", + "value": "test-bucket", "type": "default", "enabled": true }, { - "key": "auth_client_secret", - "value": "{{KEYCLOAK_SECRET}}", - "type": "secret", + "key": "uploadId", + "value": "", + "type": "any", "enabled": true }, { - "key": "auth_grant_type", - "value": "password", - "type": "default", + "key": "presignedUploadUrl", + "value": "", + "type": "any", "enabled": true }, { - "key": "bearer_token", + "key": "etag", "value": "", - "type": "secret", + "type": "any", "enabled": true }, { - "key": "e2eObjName", + "key": "key", "value": "missing_huc8s.xlsx", "type": "default", "enabled": true }, { - "key": "e2ePathToObj", - "value": "e2e_testing/", - "type": "default", - "enabled": true - }, - { - "key": "e2eoverride", - "value": "false", - "type": "default", - "enabled": true - }, - { - "key": "bucket", - "value": "test-bucket", - "type": "default", - "enabled": true - }, - { - "key": "uploadId", + "key": "non_existent_key", "value": "", "type": "any", "enabled": true }, { - "key": "presignedUploadUrl", - "value": "", - "type": "any", + "key": "prefix", + "value": "temp/", + "type": "default", "enabled": true }, { - "key": "etag", - "value": "", - "type": "any", + "key": "operation", + "value": "write", + "type": "default", "enabled": true } ], "_postman_variable_scope": "environment", - "_postman_exported_at": "2023-10-02T16:40:29.335Z", - "_postman_exported_using": "Postman/10.18.9" + "_postman_exported_at": "2024-06-23T21:30:45.668Z", + "_postman_exported_using": "Postman/11.2.13" } \ No newline at end of file From 959f68350b86d138f2d8683745e4089d32e24adf Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 17:43:19 -0400 Subject: [PATCH 11/22] update file location --- e2e-test/e2eCollection.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index 30164a5..b62fd3a 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -56,7 +56,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -116,7 +116,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -1026,7 +1026,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -1718,7 +1718,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { From 24216b9671619417aa42a22ce51173655a2bd067 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 18:37:19 -0400 Subject: [PATCH 12/22] readd non_existent_key as a value with env --- e2e-test/e2eEnv.json | 2 +- e2e-test/e2eEnv.template.json | 81 ----------------------------------- 2 files changed, 1 insertion(+), 82 deletions(-) delete mode 100644 e2e-test/e2eEnv.template.json diff --git a/e2e-test/e2eEnv.json b/e2e-test/e2eEnv.json index a2cc7b6..1e2d621 100644 --- a/e2e-test/e2eEnv.json +++ b/e2e-test/e2eEnv.json @@ -52,7 +52,7 @@ }, { "key": "non_existent_key", - "value": "", + "value": "non_existent_key", "type": "any", "enabled": true }, diff --git a/e2e-test/e2eEnv.template.json b/e2e-test/e2eEnv.template.json deleted file mode 100644 index c3d5218..0000000 --- a/e2e-test/e2eEnv.template.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "id": "16dc1b6d-ceea-4d23-8c02-c162f5d684c6", - "name": "S3API", - "values": [ - { - "key": "s3_api_root_url", - "value": "localhost:5005", - "type": "default", - "enabled": true - }, - { - "key": "auth_url", - "value": "https://auth.dewberryanalytics.com/auth/realms", - "type": "default", - "enabled": true - }, - { - "key": "auth_username", - "value": "akopti@dewberry.com", - "type": "default", - "enabled": true - }, - { - "key": "auth_password", - "value": "${auth_password}", - "type": "secret", - "enabled": true - }, - { - "key": "auth_client_id", - "value": "s3api", - "type": "default", - "enabled": true - }, - { - "key": "auth_client_secret", - "value": "${KEYCLOAK_SECRET}", - "type": "secret", - "enabled": true - }, - { - "key": "auth_grant_type", - "value": "password", - "type": "default", - "enabled": true - }, - { - "key": "bearer_token", - "value": "", - "type": "secret", - "enabled": true - }, - { - "key": "e2eObjName", - "value": "staff_table3.txt", - "type": "default", - "enabled": true - }, - { - "key": "e2ePathToObj", - "value": "e2e_testing/", - "type": "default", - "enabled": true - }, - { - "key": "e2eoverride", - "value": "false", - "type": "default", - "enabled": true - }, - { - "key": "bucket", - "value": "test-bucket", - "type": "default", - "enabled": true - } - ], - "_postman_variable_scope": "environment", - "_postman_exported_at": "2023-10-02T16:40:29.335Z", - "_postman_exported_using": "Postman/10.18.9" -} \ No newline at end of file From 36f8fa49ef0d296ad1668d717b82a5c263a570bd Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 19:17:00 -0400 Subject: [PATCH 13/22] add checkAndAdjustPrefix to all prefix endpoints --- blobstore/delete.go | 7 +- blobstore/move.go | 20 ++- blobstore/presigned_url.go | 7 + e2e-test/e2eCollection.json | 274 +++++++++++++++++++++++++++++++++++- 4 files changed, 294 insertions(+), 14 deletions(-) diff --git a/blobstore/delete.go b/blobstore/delete.go index ea362b8..677d1ea 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -129,9 +129,12 @@ func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } - if !strings.HasSuffix(prefix, "/") { - prefix = prefix + "/" + adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, prefix) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) } + prefix = adjustedPrefix var objectsFound bool diff --git a/blobstore/move.go b/blobstore/move.go index 916b423..37f6121 100644 --- a/blobstore/move.go +++ b/blobstore/move.go @@ -108,13 +108,6 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } - if !strings.HasSuffix(params["srcPrefix"], "/") { - params["srcPrefix"] = params["srcPrefix"] + "/" - } - if !strings.HasSuffix(params["destPrefix"], "/") { - params["destPrefix"] = params["destPrefix"] + "/" - } - bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { @@ -122,6 +115,19 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } + adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, params["srcPrefix"]) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + params["srcPrefix"] = adjustedPrefix + fmt.Println(params["srcPrefix"]) + adjustedPrefix, appErr = s3Ctrl.checkAndAdjustPrefix(bucket, params["destPrefix"]) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + params["destPrefix"] = adjustedPrefix err = s3Ctrl.MovePrefix(bucket, params["srcPrefix"], params["destPrefix"]) if err != nil { diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 9978b91..d47fba6 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -83,6 +83,13 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } + adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, prefix) + if appErr != nil { + log.Error(configberry.LogErrorFormatter(appErr, true)) + return configberry.HandleErrorResponse(c, appErr) + } + prefix = adjustedPrefix + var totalSize uint64 var scriptBuilder strings.Builder createdDirs := make(map[string]bool) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index b62fd3a..5127b3f 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -56,7 +56,7 @@ "body": { "mode": "file", "file": { - "src": "missing_huc8s.xlsx" + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" } }, "url": { @@ -116,7 +116,7 @@ "body": { "mode": "file", "file": { - "src": "missing_huc8s.xlsx" + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" } }, "url": { @@ -1026,7 +1026,7 @@ "body": { "mode": "file", "file": { - "src": "missing_huc8s.xlsx" + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" } }, "url": { @@ -1630,7 +1630,7 @@ "name": "Generate Download Script", "item": [ { - "name": "Generate Download Script", + "name": "Generate Download Script -200,400", "event": [ { "listen": "test", @@ -1684,6 +1684,52 @@ } }, "response": [] + }, + { + "name": "Generate Download Script -200,400 Copy", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 418 if prefix is an object\", function () {", + " if(pm.request.url.query.indexOf('bucket') === -1) {", + " pm.response.to.have.status(418);", + " }", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "{{baseUrl}}/prefix/download/script?prefix={{key}}&bucket={{bucket}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "prefix", + "download", + "script" + ], + "query": [ + { + "key": "prefix", + "value": "{{key}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] } ] }, @@ -1718,7 +1764,7 @@ "body": { "mode": "file", "file": { - "src": "missing_huc8s.xlsx" + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" } }, "url": { @@ -1748,6 +1794,122 @@ }, "response": [] }, + { + "name": "Move Prefix - 418", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 418 because src_prefix is an object\", function () {", + " pm.response.to.have.status(418);", + "});", + "", + "", + "//might need more testing when this is multibucketd " + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "" + }, + "url": { + "raw": "{{baseUrl}}/prefix/move?bucket={{bucket}}&src_prefix={{prefix}}{{key}}&dest_prefix={{prefix}}{{prefix}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "prefix", + "move" + ], + "query": [ + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "src_prefix", + "value": "{{prefix}}{{key}}" + }, + { + "key": "dest_prefix", + "value": "{{prefix}}{{prefix}}" + } + ] + } + }, + "response": [] + }, + { + "name": "Move Prefix - 418", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 418 because src_prefix is an object\", function () {", + " pm.response.to.have.status(418);", + "});", + "", + "", + "//might need more testing when this is multibucketd " + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "PUT", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "" + }, + "url": { + "raw": "{{baseUrl}}/prefix/move?bucket={{bucket}}&src_prefix={{prefix}}{{key}}&dest_prefix={{prefix}}{{key}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "prefix", + "move" + ], + "query": [ + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "src_prefix", + "value": "{{prefix}}{{key}}" + }, + { + "key": "dest_prefix", + "value": "{{prefix}}{{key}}" + } + ] + } + }, + "response": [] + }, { "name": "Move Prefix - 200,400", "event": [ @@ -2056,6 +2218,108 @@ } }, "response": [] + }, + { + "name": "Upload Object - config", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 200\", function () {", + " pm.response.to.have.status(200);", + "});", + "" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "file", + "file": { + "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + } + }, + "url": { + "raw": "{{baseUrl}}/object/upload?key={{key}}&bucket={{bucket}}&override=false", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "upload" + ], + "query": [ + { + "key": "key", + "value": "{{key}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + }, + { + "key": "override", + "value": "false" + } + ] + } + }, + "response": [] + }, + { + "name": "Delete Prefix - 418", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Status code is 418 if prefix is object\", function () {\r", + " if(pm.request.url.query.indexOf('bucket') === -1) {\r", + " pm.response.to.have.status(418);\r", + " }\r", + "});" + ], + "type": "text/javascript", + "packages": {} + } + } + ], + "request": { + "method": "DELETE", + "header": [], + "url": { + "raw": "{{baseUrl}}/prefix/delete?prefix={{key}}&bucket={{bucket}}", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "prefix", + "delete" + ], + "query": [ + { + "key": "prefix", + "value": "{{key}}" + }, + { + "key": "bucket", + "value": "{{bucket}}" + } + ] + } + }, + "response": [] } ] } From 8b81daeda27c69ec438d9773d321e46c0c7446a7 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 22:01:50 -0400 Subject: [PATCH 14/22] return aws errors from process page --- blobstore/delete.go | 7 ++----- blobstore/presigned_url.go | 9 +++++---- configberry/errors_handling.go | 5 +++++ 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/blobstore/delete.go b/blobstore/delete.go index 677d1ea..ea362b8 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -129,12 +129,9 @@ func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } - adjustedPrefix, appErr := s3Ctrl.checkAndAdjustPrefix(bucket, prefix) - if appErr != nil { - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) + if !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" } - prefix = adjustedPrefix var objectsFound bool diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index d47fba6..123efd6 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -10,6 +10,7 @@ import ( "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/labstack/echo/v4" @@ -117,7 +118,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { if item.Size != nil { totalSize += uint64(*item.Size) if totalSize > uint64(bh.Config.DefaultScriptDownloadSizeLimit*1024*1024*1024) { - return fmt.Errorf("size limit of %d GB exceeded", bh.Config.DefaultScriptDownloadSizeLimit) + return awserr.New("EntityTooLarge", "Script size exceeds limit", fmt.Errorf("size limit of %d GB exceeded", bh.Config.DefaultScriptDownloadSizeLimit)) } } @@ -136,8 +137,8 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { return fmt.Errorf("error generating presigned URL for object %s: %v", *item.Key, err) } url, err := url.QueryUnescape(presignedURL) - if err != nil { - return fmt.Errorf("error unescaping URL encoding: %v", err) + if err != nil { //ServiceInternalError + return awserr.New("ServiceInternalError", "Script size exceeds limit", fmt.Errorf("error unescaping URL encoding: %v", err)) } encodedURL := strings.ReplaceAll(url, " ", "%20") scriptBuilder.WriteString(fmt.Sprintf("if exist \"%s\" (echo skipping existing file) else (curl -v -o \"%s\" \"%s\")\n", fullPath, fullPath, encodedURL)) @@ -149,7 +150,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { // Call GetList with the processPage function err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, processPage) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + appErr := configberry.HandleAWSError(err, "error processing objects") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/configberry/errors_handling.go b/configberry/errors_handling.go index 9e39243..a2c178d 100644 --- a/configberry/errors_handling.go +++ b/configberry/errors_handling.go @@ -34,6 +34,7 @@ var ( TeapotError = ErrorType{Value: 9, Name: "Teapot Error"} ConflictError = ErrorType{Value: 10, Name: "Conflict Error"} BadRequestError = ErrorType{Value: 11, Name: "Bad Request Error"} + EntityTooLargeError = ErrorType{Value: 12, Name: "Entity Too Large Error"} ) // AppError includes the error type, message, and the original error. @@ -90,6 +91,8 @@ func HandleErrorResponse(c echo.Context, err *AppError) error { statusCode = http.StatusTeapot case ConflictError.Value: statusCode = http.StatusConflict + case EntityTooLargeError.Value: + statusCode = http.StatusRequestEntityTooLarge } return c.JSON(statusCode, map[string]string{"Type": err.Type.Name, "Error": responseMessage}) } @@ -168,6 +171,8 @@ func HandleAWSError(err error, errMsg string) *AppError { return NewAppError(InternalServerError, formattedMessage, originalErr) case "InvalidPart": return NewAppError(BadRequestError, formattedMessage, originalErr) + case "EntityTooLarge": + return NewAppError(EntityTooLargeError, formattedMessage, originalErr) default: return NewAppError(AWSError, formattedMessage, originalErr) } From 1fa19639cbd74f309cf15d1e9f47f1516b6dd115 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 22:04:30 -0400 Subject: [PATCH 15/22] rename to 18 checks --- e2e-test/e2eCollection.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index 5127b3f..f0082a9 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -1686,7 +1686,7 @@ "response": [] }, { - "name": "Generate Download Script -200,400 Copy", + "name": "Generate Download Script -", "event": [ { "listen": "test", @@ -2029,7 +2029,7 @@ "response": [] }, { - "name": "Get Size - 419", + "name": "Get Size - 418", "event": [ { "listen": "test", From 57ca9a77480a694a236f664a02b033df3f425de3 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 23:23:45 -0400 Subject: [PATCH 16/22] repoint to local file --- e2e-test/e2eCollection.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index f0082a9..1cd5feb 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -56,7 +56,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -116,7 +116,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -1026,7 +1026,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -1686,7 +1686,7 @@ "response": [] }, { - "name": "Generate Download Script -", + "name": "Generate Download Script - 418", "event": [ { "listen": "test", @@ -1764,7 +1764,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { @@ -2247,7 +2247,7 @@ "body": { "mode": "file", "file": { - "src": "postman-cloud:///1ef2e68b-caf8-4270-80ad-9e03a8f77311" + "src": "missing_huc8s.xlsx" } }, "url": { From c6886322a1308196edb75ef25f54d8fbef9540f1 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 23:30:10 -0400 Subject: [PATCH 17/22] remove unnecessary upload endpoint --- e2e-test/e2eCollection.json | 60 +------------------------------------ 1 file changed, 1 insertion(+), 59 deletions(-) diff --git a/e2e-test/e2eCollection.json b/e2e-test/e2eCollection.json index 1cd5feb..099f2f7 100644 --- a/e2e-test/e2eCollection.json +++ b/e2e-test/e2eCollection.json @@ -1,7 +1,7 @@ { "info": { "_postman_id": "db98bd13-6aa3-4624-80fa-66e7d7801aff", - "name": "S3 API Endpoints", + "name": "S3API E2E Test", "description": "Collection to test all S3 API endpoints.", "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", "_exporter_id": "18773467", @@ -2219,64 +2219,6 @@ }, "response": [] }, - { - "name": "Upload Object - config", - "event": [ - { - "listen": "test", - "script": { - "exec": [ - "pm.test(\"Status code is 200\", function () {", - " pm.response.to.have.status(200);", - "});", - "" - ], - "type": "text/javascript", - "packages": {} - } - } - ], - "request": { - "method": "POST", - "header": [ - { - "key": "Content-Type", - "value": "application/json" - } - ], - "body": { - "mode": "file", - "file": { - "src": "missing_huc8s.xlsx" - } - }, - "url": { - "raw": "{{baseUrl}}/object/upload?key={{key}}&bucket={{bucket}}&override=false", - "host": [ - "{{baseUrl}}" - ], - "path": [ - "object", - "upload" - ], - "query": [ - { - "key": "key", - "value": "{{key}}" - }, - { - "key": "bucket", - "value": "{{bucket}}" - }, - { - "key": "override", - "value": "false" - } - ] - } - }, - "response": [] - }, { "name": "Delete Prefix - 418", "event": [ From 0a6c8c3b74b0878dd0a6ab9ca86e598ad5b93a7e Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Sun, 23 Jun 2024 23:39:57 -0400 Subject: [PATCH 18/22] add handleAwsError where appropriate --- blobstore/delete.go | 2 +- blobstore/list.go | 2 +- blobstore/metadata.go | 2 +- blobstore/presigned_url.go | 2 +- blobstore/upload.go | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/blobstore/delete.go b/blobstore/delete.go index ea362b8..3c1a322 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -235,7 +235,7 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { // Delete the objects using the deleteKeys function err = s3Ctrl.DeleteKeys(bucket, keys) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error deleting objects", err) + appErr := configberry.HandleAWSError(err, "error deleting objects") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/list.go b/blobstore/list.go index b12a813..068ec5f 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -245,7 +245,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + appErr := configberry.HandleAWSError(err, "error processing objects") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 2d699fd..604e81d 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -85,7 +85,7 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { }) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error processing objects", err) + appErr := configberry.HandleAWSError(err, "error processing objects") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 123efd6..a3c1626 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -174,7 +174,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, 1) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error generating presigned URL for %s", txtBatFileName), err) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error generating presigned URL for %s", txtBatFileName)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/upload.go b/blobstore/upload.go index 4bd9839..b8525b0 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -370,7 +370,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { } presignedURL, err := s3Ctrl.GetUploadPartPresignedURL(bucket, key, uploadID, int64(partNumber), bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error generating presigned part URL", err) + appErr := configberry.HandleAWSError(err, "error generating presigned part URL") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -508,7 +508,7 @@ func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { err = s3Ctrl.AbortMultipartUpload(bucket, key, uploadID) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error aborting the multipart Upload for key %s", key), err) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error aborting the multipart Upload for key %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } From 9a53bc278d55e067e25632c6de45d8d8039fa0e1 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 24 Jun 2024 09:56:39 -0400 Subject: [PATCH 19/22] add more context to AWS errors --- blobstore/buckets.go | 3 ++- blobstore/config.go | 2 +- blobstore/delete.go | 8 ++++---- blobstore/list.go | 5 +++-- blobstore/metadata.go | 6 +++--- blobstore/move.go | 8 ++++---- blobstore/object_content.go | 2 +- blobstore/presigned_url.go | 2 +- blobstore/upload.go | 8 ++++---- 9 files changed, 23 insertions(+), 21 deletions(-) diff --git a/blobstore/buckets.go b/blobstore/buckets.go index 3082aa4..01cb431 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -3,6 +3,7 @@ package blobstore // Not implemented import ( + "fmt" "sort" "github.com/Dewberry/s3api/configberry" @@ -27,7 +28,7 @@ func (s3Ctrl *S3Controller) ListBuckets() (*s3.ListBucketsOutput, error) { // Retrieve the list of buckets result, err = s3Ctrl.S3Svc.ListBuckets(input) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing buckets: %w", err) } return result, nil } diff --git a/blobstore/config.go b/blobstore/config.go index 9574eb1..7ffba7b 100644 --- a/blobstore/config.go +++ b/blobstore/config.go @@ -46,7 +46,7 @@ func getIntEnvOrDefault(envKey string, defaultValue int) int { } value, err := strconv.Atoi(valueStr) if err != nil { - log.Errorf("error parsing %s, defaulting to %v: %v", envKey, defaultValue, err) + log.Errorf("error parsing %s, defaulting to %v: %v", envKey, defaultValue, err.Error()) return defaultValue } return value diff --git a/blobstore/delete.go b/blobstore/delete.go index 3c1a322..a2760c8 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -15,7 +15,7 @@ import ( func (s3Ctrl *S3Controller) DeleteObjectIfExists(bucket, key string) error { // Check if the object exists if _, err := s3Ctrl.GetMetaData(bucket, key); err != nil { - return err + return fmt.Errorf("error getting metadata for the process of deleting object %s, %w", key, err) } // Delete the object @@ -25,7 +25,7 @@ func (s3Ctrl *S3Controller) DeleteObjectIfExists(bucket, key string) error { } _, err := s3Ctrl.S3Svc.DeleteObject(deleteInput) if err != nil { - return err + return fmt.Errorf("error deleting object %s, %w", key, err) } return nil @@ -50,7 +50,7 @@ func (s3Ctrl *S3Controller) DeleteList(page *s3.ListObjectsV2Output, bucket stri }, }) if err != nil { - return err + return fmt.Errorf("error deleting objects while attempting delete list, %w", err) } return nil @@ -76,7 +76,7 @@ func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { _, err := s3Ctrl.S3Svc.DeleteObjects(input) if err != nil { - return err + return fmt.Errorf("error deleting objects while attempting delete list, %w", err) } return nil } diff --git a/blobstore/list.go b/blobstore/list.go index 068ec5f..7fb7b77 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -1,6 +1,7 @@ package blobstore import ( + "fmt" "path/filepath" "strconv" "strings" @@ -59,7 +60,7 @@ func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3. return false // Stop pagination }) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing objects: %w", err) } return response, nil @@ -89,7 +90,7 @@ func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter if lastError != nil { return lastError // Return the last error encountered in the processPage function } - return err // Return any errors encountered in the pagination process + return fmt.Errorf("error listing objects: %w", err) // Return any errors encountered in the pagination process } // HandleListByPrefix handles the API endpoint for listing objects by prefix in an S3 bucket. diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 604e81d..3b1ae84 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -20,7 +20,7 @@ func (s3Ctrl *S3Controller) GetMetaData(bucket, key string) (*s3.HeadObjectOutpu result, err := s3Ctrl.S3Svc.HeadObject(input) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting object's metadata %s, %w", key, err) } return result, nil } @@ -37,10 +37,10 @@ func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { case "NotFound": // s3.ErrCodeNoSuchKey does not work, aws is missing this error code so we hardwire a string return false, nil default: - return false, err + return false, fmt.Errorf("error checking if object %s exists, %w", key, err) } } - return false, err + return false, fmt.Errorf("error checking if object %s exists, %w", key, err) } return true, nil } diff --git a/blobstore/move.go b/blobstore/move.go index 37f6121..5038522 100644 --- a/blobstore/move.go +++ b/blobstore/move.go @@ -33,7 +33,7 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err } _, err := s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return err + return fmt.Errorf("error copying list, %w", err) } } @@ -41,7 +41,7 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err // Ensure that your application logic requires this before proceeding err := s3Ctrl.DeleteList(page, bucket) if err != nil { - return err + return fmt.Errorf("error deleting list, %w", err) } return nil } @@ -83,7 +83,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin // Copy the object to the new key (effectively renaming) _, err = s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return err + return fmt.Errorf("error copying object %s, %w", srcObjectKey, err) } // Delete the source object @@ -92,7 +92,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin Key: aws.String(srcObjectKey), }) if err != nil { - return err + return fmt.Errorf("error deleting object %s, %w", srcObjectKey, err) } return nil diff --git a/blobstore/object_content.go b/blobstore/object_content.go index f6b0144..9ce194a 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -18,7 +18,7 @@ func (s3Ctrl *S3Controller) FetchObjectContent(bucket string, key string) (io.Re } output, err := s3Ctrl.S3Svc.GetObject(input) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting object's content %s, %w", key, err) } return output.Body, nil diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index a3c1626..3a706be 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -20,7 +20,7 @@ import ( func (s3Ctrl *S3Controller) GetDownloadPresignedURL(bucket, key string, expDays int) (string, error) { duration := time.Duration(expDays) * 24 * time.Hour if _, err := s3Ctrl.GetMetaData(bucket, key); err != nil { //this is to check if the object exists or not, it will return an AWS error - return "", err + return "", fmt.Errorf("error checking if object %s exists, %w", key, err) } req, _ := s3Ctrl.S3Svc.GetObjectRequest(&s3.GetObjectInput{ Bucket: aws.String(bucket), diff --git a/blobstore/upload.go b/blobstore/upload.go index b8525b0..c06fb25 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -34,7 +34,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC resp, err := s3Ctrl.S3Svc.CreateMultipartUpload(params) if err != nil { - return err + return fmt.Errorf("error creating multipart upload for object %s, %w", key, err) } // Create the variables that will track upload progress @@ -69,7 +69,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC result, err := s3Ctrl.S3Svc.UploadPart(params) if err != nil { - return err + return fmt.Errorf("error creating uploading part %v, %w", params, err) } totalBytes += int64(buffer.Len()) @@ -98,7 +98,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC result, err := s3Ctrl.S3Svc.UploadPart(params2) if err != nil { - return err + return fmt.Errorf("error creating uploading part %v, %w", params2, err) } totalBytes += int64(buffer.Len()) @@ -116,7 +116,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC } _, err = s3Ctrl.S3Svc.CompleteMultipartUpload(completeParams) if err != nil { - return err + return fmt.Errorf("error completing multipart upload %w", err) } return nil From d0cf165a8adafbabceceb24ce7f1aaa40cd673b8 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 24 Jun 2024 12:33:05 -0400 Subject: [PATCH 20/22] format errors accoridng to best practices --- blobstore/blobstore.go | 9 ++++ blobstore/buckets.go | 5 +- blobstore/delete.go | 94 ++++++++++++++++++---------------- blobstore/fgac.go | 10 ++-- blobstore/list.go | 19 ++++--- blobstore/metadata.go | 36 ++++++------- blobstore/move.go | 48 ++++++----------- blobstore/object_content.go | 14 ++--- blobstore/ping.go | 2 +- blobstore/presigned_url.go | 22 ++++---- blobstore/upload.go | 40 +++++++-------- configberry/errors_handling.go | 6 +-- documentation/general.md | 48 ++++++++++++----- 13 files changed, 186 insertions(+), 167 deletions(-) diff --git a/blobstore/blobstore.go b/blobstore/blobstore.go index 9e747b3..c132262 100644 --- a/blobstore/blobstore.go +++ b/blobstore/blobstore.go @@ -185,3 +185,12 @@ func GetListSize(page *s3.ListObjectsV2Output, totalSize *uint64, fileCount *uin return nil } + +//repetitive errors refactored: + +const unableToGetController string = "unable to get `s3controller`" +const parameterKeyRequired string = "parameter `key` is required" +const parameterPrefixRequired string = "parameter `prefix` is required" +const parseingBodyRequestError string = "error parsing request body" +const parsingDelimeterParamError string = "error parsing `delimiter` param" +const listingObjectsAndPrefixError string = "error listing objects and common prefixes" diff --git a/blobstore/buckets.go b/blobstore/buckets.go index 01cb431..a9d5616 100644 --- a/blobstore/buckets.go +++ b/blobstore/buckets.go @@ -3,7 +3,6 @@ package blobstore // Not implemented import ( - "fmt" "sort" "github.com/Dewberry/s3api/configberry" @@ -28,7 +27,7 @@ func (s3Ctrl *S3Controller) ListBuckets() (*s3.ListBucketsOutput, error) { // Retrieve the list of buckets result, err = s3Ctrl.S3Svc.ListBuckets(input) if err != nil { - return nil, fmt.Errorf("error listing buckets: %w", err) + return nil, err } return result, nil } @@ -50,7 +49,7 @@ func (bh *BlobHandler) HandleListBuckets(c echo.Context) error { if bh.AllowAllBuckets { result, err := controller.ListBuckets() if err != nil { - appErr := configberry.HandleAWSError(err, "error retunring list of buckets") + appErr := configberry.HandleAWSError(err, "error listing buckets") log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/delete.go b/blobstore/delete.go index a2760c8..b544118 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -1,21 +1,24 @@ package blobstore import ( + "errors" "fmt" "strings" "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/go-playground/validator" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" ) -func (s3Ctrl *S3Controller) DeleteObjectIfExists(bucket, key string) error { +func (s3Ctrl *S3Controller) DeleteObject(bucket, key string) error { // Check if the object exists if _, err := s3Ctrl.GetMetaData(bucket, key); err != nil { - return fmt.Errorf("error getting metadata for the process of deleting object %s, %w", key, err) + //wrapping error since it does not pertain to the method's main functionality which in this case is deletion + return fmt.Errorf("error checking object's existence while attempting to delete, %w", err) } // Delete the object @@ -25,7 +28,7 @@ func (s3Ctrl *S3Controller) DeleteObjectIfExists(bucket, key string) error { } _, err := s3Ctrl.S3Svc.DeleteObject(deleteInput) if err != nil { - return fmt.Errorf("error deleting object %s, %w", key, err) + return err } return nil @@ -50,7 +53,7 @@ func (s3Ctrl *S3Controller) DeleteList(page *s3.ListObjectsV2Output, bucket stri }, }) if err != nil { - return fmt.Errorf("error deleting objects while attempting delete list, %w", err) + return err } return nil @@ -58,14 +61,39 @@ func (s3Ctrl *S3Controller) DeleteList(page *s3.ListObjectsV2Output, bucket stri func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { objects := make([]*s3.ObjectIdentifier, 0, len(key)) + var nonExistentKeys []string + for _, p := range key { s3Path := strings.TrimPrefix(p, "/") + exists, err := s3Ctrl.KeyExists(bucket, p) + if err != nil { + // Wrap error with awserr based on the specific error type + var awsErr error + if errors.As(err, &awsErr) { + // If it's an awserr.Error, return it directly + return awsErr + } else { + // Otherwise, create a new awserr with generic message + return awserr.New("S3KeyCheckError", "Error checking object existence", err) + } + } + + if !exists { + nonExistentKeys = append(nonExistentKeys, s3Path) + } + object := &s3.ObjectIdentifier{ Key: aws.String(s3Path), } + objects = append(objects, object) } + if len(nonExistentKeys) > 0 { + // Don't delete anything, return error for non-existent keys + return awserr.New("NoSuchKey", "Objects Not Found", fmt.Errorf("following keys do not exist: %+q", nonExistentKeys)) + } + input := &s3.DeleteObjectsInput{ Bucket: aws.String(bucket), Delete: &s3.Delete{ @@ -76,7 +104,7 @@ func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { _, err := s3Ctrl.S3Svc.DeleteObjects(input) if err != nil { - return fmt.Errorf("error deleting objects while attempting delete list, %w", err) + return err } return nil } @@ -88,27 +116,27 @@ func (bh *BlobHandler) HandleDeleteObject(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - err = s3Ctrl.DeleteObjectIfExists(bucket, key) + err = s3Ctrl.DeleteObject(bucket, key) if err != nil { - appErr := configberry.HandleAWSError(err, fmt.Sprintf("error deleting object %s", key)) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error deleting object with `key`: %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Infof("successfully deleted file with key: %s", key) - return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully deleted object: %s", key)) + log.Infof("successfully deleted file with `key`: %s", key) + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully deleted object with `key`: %s", key)) } func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { @@ -117,14 +145,14 @@ func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } prefix := c.QueryParam("prefix") if prefix == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `prefix` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterPrefixRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -164,19 +192,19 @@ func (bh *BlobHandler) HandleDeletePrefix(c echo.Context) error { }) if err != nil { - appErr := configberry.HandleAWSError(err, fmt.Sprintf("failed to delete objects with prefix %s", prefix)) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("failed to delete objects that belong to the `prefix`: %s", prefix)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } if !objectsFound { - appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("prefix %s not found", prefix), nil) + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("`prefix` %s not found", prefix), nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Infof("Successfully deleted prefix and its contents for prefix: %s", prefix) - return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully deleted prefix and its contents for prefix: %s", prefix)) + log.Infof("Successfully deleted `prefix` %s and its content: ", prefix) + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully deleted `prefix` %s and its content: ", prefix)) } func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { @@ -189,7 +217,7 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { } var deleteRequest DeleteRequest if err := c.Bind(&deleteRequest); err != nil { - appErr := configberry.NewAppError(configberry.ValidationError, "error parsing request body", err) + appErr := configberry.NewAppError(configberry.ValidationError, parseingBodyRequestError, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -204,42 +232,20 @@ func (bh *BlobHandler) HandleDeleteObjectsByList(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - // Prepare the keys for deletion - keys := make([]string, 0, len(deleteRequest.Keys)) - for _, p := range deleteRequest.Keys { - s3Path := strings.TrimPrefix(p, "/") - key := aws.String(s3Path) - - // Check if the key exists before appending it to the keys list - keyExists, err := s3Ctrl.KeyExists(bucket, s3Path) - if err != nil { - appErr := configberry.HandleAWSError(err, "error checking if object exists") - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - if !keyExists { - appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("object %s not found", s3Path), nil) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - - keys = append(keys, *key) - } - // Delete the objects using the deleteKeys function - err = s3Ctrl.DeleteKeys(bucket, keys) + err = s3Ctrl.DeleteKeys(bucket, deleteRequest.Keys) if err != nil { - appErr := configberry.HandleAWSError(err, "error deleting objects") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error deleting objects: %+q", deleteRequest.Keys)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Infof("Successfully deleted objects: %v", deleteRequest.Keys) + log.Infof("Successfully deleted objects: %+q", deleteRequest.Keys) return configberry.HandleSuccessfulResponse(c, "Successfully deleted objects") } diff --git a/blobstore/fgac.go b/blobstore/fgac.go index 7fc74fc..211f890 100644 --- a/blobstore/fgac.go +++ b/blobstore/fgac.go @@ -20,7 +20,7 @@ func (bh *BlobHandler) getS3ReadPermissions(c echo.Context, bucket string) ([]st return nil, false, appError } if !fullAccess && len(permissions) == 0 { - return nil, false, configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s bucket", bucket), nil) + return nil, false, configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s `bucket`", bucket), nil) } return permissions, fullAccess, nil } @@ -36,7 +36,7 @@ func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string fullAccess := false claims, ok := c.Get("claims").(*auth.Claims) if !ok { - return permissions, fullAccess, configberry.NewAppError(configberry.InternalServerError, "could not get claims from request context", nil) + return permissions, fullAccess, configberry.NewAppError(configberry.InternalServerError, "could not get `claims` from request context", nil) } roles := claims.RealmAccess["roles"] @@ -53,7 +53,7 @@ func (bh *BlobHandler) getUserS3ReadListPermission(c echo.Context, bucket string ue := claims.Email permissions, err := bh.DB.GetUserAccessiblePrefixes(ue, bucket, []string{"read", "write"}) if err != nil { - return permissions, fullAccess, configberry.HandleSQLError(err, "error getting common prefix that the user can read and write to") + return permissions, fullAccess, configberry.HandleSQLError(err, "error getting `prefix` that the user can read and write to") } return permissions, fullAccess, nil } @@ -69,7 +69,7 @@ func (bh *BlobHandler) validateUserAccessToPrefix(c echo.Context, bucket, prefix } claims, ok := c.Get("claims").(*auth.Claims) if !ok { - return configberry.NewAppError(configberry.InternalServerError, "could not get claims from request context", nil) + return configberry.NewAppError(configberry.InternalServerError, "could not get `claims` from request context", nil) } roles := claims.RealmAccess["roles"] ue := claims.Email @@ -114,7 +114,7 @@ func (bh *BlobHandler) HandleCheckS3UserPermission(c echo.Context) error { } claims, ok := c.Get("claims").(*auth.Claims) if !ok { - appErr := configberry.NewAppError(configberry.InternalServerError, "could not get claims from request context", nil) + appErr := configberry.NewAppError(configberry.InternalServerError, "could not get `claims` from request context", nil) log.Error(configberry.LogErrorFormatter(appErr, false)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/list.go b/blobstore/list.go index 7fb7b77..6d790c4 100644 --- a/blobstore/list.go +++ b/blobstore/list.go @@ -1,7 +1,6 @@ package blobstore import ( - "fmt" "path/filepath" "strconv" "strings" @@ -60,7 +59,7 @@ func (s3Ctrl *S3Controller) GetList(bucket, prefix string, delimiter bool) (*s3. return false // Stop pagination }) if err != nil { - return nil, fmt.Errorf("error listing objects: %w", err) + return nil, err } return response, nil @@ -90,7 +89,7 @@ func (s3Ctrl *S3Controller) GetListWithCallBack(bucket, prefix string, delimiter if lastError != nil { return lastError // Return the last error encountered in the processPage function } - return fmt.Errorf("error listing objects: %w", err) // Return any errors encountered in the pagination process + return err // Return any errors encountered in the pagination process } // HandleListByPrefix handles the API endpoint for listing objects by prefix in an S3 bucket. @@ -99,7 +98,7 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -116,7 +115,7 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { if delimiterParam != "" { delimiter, err = strconv.ParseBool(delimiterParam) if err != nil { - appErr := configberry.NewAppError(configberry.ValidationError, "error parsing `delimiter` param", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parsingDelimeterParamError, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -154,7 +153,7 @@ func (bh *BlobHandler) HandleListByPrefix(c echo.Context) error { err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - appErr := configberry.HandleAWSError(err, "error processing objects") + appErr := configberry.HandleAWSError(err, listingObjectsAndPrefixError) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -169,7 +168,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -186,7 +185,7 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { if delimiterParam != "" { delimiter, err = strconv.ParseBool(delimiterParam) if err != nil { - appErr := configberry.NewAppError(configberry.ValidationError, "error parsing `delimiter` param", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parsingDelimeterParamError, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -246,11 +245,11 @@ func (bh *BlobHandler) HandleListByPrefixWithDetail(c echo.Context) error { } err = s3Ctrl.GetListWithCallBack(bucket, prefix, delimiter, processPage) if err != nil { - appErr := configberry.HandleAWSError(err, "error processing objects") + appErr := configberry.HandleAWSError(err, listingObjectsAndPrefixError) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Info("Successfully retrieved list by prefix:", prefix) + log.Info("successfully retrieved list by prefix:", prefix) return configberry.HandleSuccessfulResponse(c, results) } diff --git a/blobstore/metadata.go b/blobstore/metadata.go index 3b1ae84..62ffdd0 100644 --- a/blobstore/metadata.go +++ b/blobstore/metadata.go @@ -20,7 +20,7 @@ func (s3Ctrl *S3Controller) GetMetaData(bucket, key string) (*s3.HeadObjectOutpu result, err := s3Ctrl.S3Svc.HeadObject(input) if err != nil { - return nil, fmt.Errorf("error getting object's metadata %s, %w", key, err) + return nil, err } return result, nil } @@ -37,10 +37,10 @@ func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { case "NotFound": // s3.ErrCodeNoSuchKey does not work, aws is missing this error code so we hardwire a string return false, nil default: - return false, fmt.Errorf("error checking if object %s exists, %w", key, err) + return false, err } } - return false, fmt.Errorf("error checking if object %s exists, %w", key, err) + return false, err } return true, nil } @@ -49,14 +49,14 @@ func (s3Ctrl *S3Controller) KeyExists(bucket string, key string) (bool, error) { func (bh *BlobHandler) HandleGetSize(c echo.Context) error { prefix := c.QueryParam("prefix") if prefix == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `prefix` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterPrefixRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -73,7 +73,7 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { } if !fullAccess && !isPermittedPrefix(bucket, prefix, permissions) { - appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s prefix", prefix), err) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the `prefix` %s", prefix), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -85,12 +85,12 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { }) if err != nil { - appErr := configberry.HandleAWSError(err, "error processing objects") + appErr := configberry.HandleAWSError(err, listingObjectsAndPrefixError) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } if fileCount == 0 { - appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("prefix %s not found", prefix), err) + appErr := configberry.NewAppError(configberry.NotFoundError, fmt.Sprintf("`prefix` %s not found", prefix), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) @@ -105,14 +105,14 @@ func (bh *BlobHandler) HandleGetSize(c echo.Context) error { Prefix: prefix, } - log.Info("Successfully retrieved size for prefix:", prefix) + log.Infof("Successfully retrieved size for `prefix` %s", prefix) return configberry.HandleSuccessfulResponse(c, response) } func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -120,7 +120,7 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -132,7 +132,7 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read object with `key` %s ", key), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -144,14 +144,14 @@ func (bh *BlobHandler) HandleGetMetaData(c echo.Context) error { return configberry.HandleErrorResponse(c, appErr) } - log.Info("successfully retrieved metadata for key:", key) + log.Infof("successfully retrieved metadata for `key` %s exists", key) return configberry.HandleSuccessfulResponse(c, result) } func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -159,7 +159,7 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -171,18 +171,18 @@ func (bh *BlobHandler) HandleGetObjExist(c echo.Context) error { } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read object with `key` %s", key), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } result, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - appErr := configberry.HandleAWSError(err, "error checking if object exists") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error checking if object with `key` %s exists", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Info("successfully retrieved metadata for key:", key) + log.Infof("successfully checked if object with `key` %s exists", key) return configberry.HandleSuccessfulResponse(c, result) } diff --git a/blobstore/move.go b/blobstore/move.go index 5038522..d253ec4 100644 --- a/blobstore/move.go +++ b/blobstore/move.go @@ -7,6 +7,7 @@ import ( "github.com/Dewberry/s3api/configberry" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" log "github.com/sirupsen/logrus" @@ -33,12 +34,10 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err } _, err := s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return fmt.Errorf("error copying list, %w", err) + return err } } - // Deleting the source objects should be handled carefully - // Ensure that your application logic requires this before proceeding err := s3Ctrl.DeleteList(page, bucket) if err != nil { return fmt.Errorf("error deleting list, %w", err) @@ -53,7 +52,7 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err // Check if objects were found after processing all pages if !objectsFound { - return errors.New("source prefix not found") + return errors.New("`src_prefix` not found") } return nil @@ -62,7 +61,9 @@ func (s3Ctrl *S3Controller) MovePrefix(bucket, srcPrefix, destPrefix string) err func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey string) error { // Check if the source and destination keys are the same if srcObjectKey == destObjectKey { - return fmt.Errorf("source `%s` and destination `%s` keys are identical; no action taken", srcObjectKey, destObjectKey) + return awserr.New("InvalidParameter", "Source and Destination Keys are Identical", + fmt.Errorf("`src_key` %s and `dest_key` %s cannot be the same for move operation", srcObjectKey, destObjectKey)) + } // Check if the new key already exists in the bucket @@ -71,8 +72,10 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin return err } if newKeyExists { - return fmt.Errorf(destObjectKey + " already exists in the bucket; duplication will cause an overwrite. Please rename dest_key to a different name") + return awserr.New("AlreadyExists", "Destination Key Already Exists", + fmt.Errorf("%s already exists in the bucket; consider renaming `dest_key`", destObjectKey)) } + // Set up input parameters for the CopyObject API to rename the object copyInput := &s3.CopyObjectInput{ Bucket: aws.String(bucket), @@ -83,7 +86,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin // Copy the object to the new key (effectively renaming) _, err = s3Ctrl.S3Svc.CopyObject(copyInput) if err != nil { - return fmt.Errorf("error copying object %s, %w", srcObjectKey, err) + return err } // Delete the source object @@ -92,7 +95,7 @@ func (s3Ctrl *S3Controller) CopyObject(bucket, srcObjectKey, destObjectKey strin Key: aws.String(srcObjectKey), }) if err != nil { - return fmt.Errorf("error deleting object %s, %w", srcObjectKey, err) + return fmt.Errorf("error deleting object with `src_key` %s, %w", srcObjectKey, err) } return nil @@ -111,7 +114,7 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -131,12 +134,12 @@ func (bh *BlobHandler) HandleMovePrefix(c echo.Context) error { err = s3Ctrl.MovePrefix(bucket, params["srcPrefix"], params["destPrefix"]) if err != nil { - appErr := configberry.HandleAWSError(err, "error moving prefix") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error moving `src_prefix` %s, to `dest_prefix` %s", params["srcPrefix"], params["destPrefix"])) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully moved prefix from %s to %s", params["srcPrefix"], params["destPrefix"])) + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Successfully moved `src_prefix` %s, to `dest_prefix` %s", params["srcPrefix"], params["destPrefix"])) } func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { @@ -148,38 +151,21 @@ func (bh *BlobHandler) HandleMoveObject(c echo.Context) error { log.Error(configberry.LogErrorFormatter(appErr, false)) return configberry.HandleErrorResponse(c, appErr) } - if params["srcObjectKey"] == params["destObjectKey"] { - appErr := configberry.NewAppError(configberry.ValidationError, fmt.Sprintf("source `%s` and destination `%s` keys are identical; no action taken", params["srcObjectKey"], params["destObjectKey"]), nil) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.ValidationError, fmt.Sprintf("`bucket` %s is not available", bucket), err) - log.Error(configberry.LogErrorFormatter(appErr, true)) - return configberry.HandleErrorResponse(c, appErr) - } - - newKeyExists, err := s3Ctrl.KeyExists(bucket, params["destObjectKey"]) - if err != nil { - return err - } - - if newKeyExists { - appErr := configberry.NewAppError(configberry.ConflictError, fmt.Sprintf("%s already exists in the bucket; duplication will cause an overwrite. Please rename dest_key to a different name", params["destObjectKey"]), err) + appErr := configberry.NewAppError(configberry.ValidationError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) - } err = s3Ctrl.CopyObject(bucket, params["srcObjectKey"], params["destObjectKey"]) if err != nil { - appErr := configberry.HandleAWSError(err, "error copying prefix") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error copying object with `src_key` %s to `dest_key` %s", params["srcObjectKey"], params["destObjectKey"])) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Succesfully moved object from %s to %s", params["srcObjectKey"], params["destObjectKey"])) + return configberry.HandleSuccessfulResponse(c, fmt.Sprintf("Succesfully moved object with `src_key` %s to `dest_key` %s", params["srcObjectKey"], params["destObjectKey"])) } diff --git a/blobstore/object_content.go b/blobstore/object_content.go index 9ce194a..bf8ad13 100644 --- a/blobstore/object_content.go +++ b/blobstore/object_content.go @@ -18,7 +18,7 @@ func (s3Ctrl *S3Controller) FetchObjectContent(bucket string, key string) (io.Re } output, err := s3Ctrl.S3Svc.GetObject(input) if err != nil { - return nil, fmt.Errorf("error getting object's content %s, %w", key, err) + return nil, err } return output.Body, nil @@ -27,7 +27,7 @@ func (s3Ctrl *S3Controller) FetchObjectContent(bucket string, key string) (io.Re func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -35,7 +35,7 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -47,24 +47,24 @@ func (bh *BlobHandler) HandleObjectContents(c echo.Context) error { } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read object with the `key` %s", key), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } outPutBody, err := s3Ctrl.FetchObjectContent(bucket, key) if err != nil { - appErr := configberry.HandleAWSError(err, "error fetching object's content") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error fetching object's content with `key` %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } body, err := io.ReadAll(outPutBody) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "error reading objects body", err) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error reading object's body with `key` %s", key), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Info("Successfully fetched object data for key:", key) + log.Info("successfully fetched object data for object with `key`:", key) //TODO: add contentType return configberry.HandleSuccessfulResponse(c, body) } diff --git a/blobstore/ping.go b/blobstore/ping.go index 92dd74a..e32601b 100644 --- a/blobstore/ping.go +++ b/blobstore/ping.go @@ -11,7 +11,7 @@ import ( ) func (bh *BlobHandler) HandlePing(c echo.Context) error { - return configberry.HandleSuccessfulResponse(c, "connection without Auth is healthy") + return configberry.HandleSuccessfulResponse(c, "connection without `Auth` is healthy") } func (bh *BlobHandler) HandlePingWithAuth(c echo.Context) error { diff --git a/blobstore/presigned_url.go b/blobstore/presigned_url.go index 3a706be..35ee0c2 100644 --- a/blobstore/presigned_url.go +++ b/blobstore/presigned_url.go @@ -20,7 +20,7 @@ import ( func (s3Ctrl *S3Controller) GetDownloadPresignedURL(bucket, key string, expDays int) (string, error) { duration := time.Duration(expDays) * 24 * time.Hour if _, err := s3Ctrl.GetMetaData(bucket, key); err != nil { //this is to check if the object exists or not, it will return an AWS error - return "", fmt.Errorf("error checking if object %s exists, %w", key, err) + return "", fmt.Errorf("error checking if object with `key` %s exists, %w", key, err) } req, _ := s3Ctrl.S3Svc.GetObjectRequest(&s3.GetObjectInput{ Bucket: aws.String(bucket), @@ -33,14 +33,14 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -52,14 +52,14 @@ func (bh *BlobHandler) HandleGetPresignedDownloadURL(c echo.Context) error { } if !fullAccess && !isPermittedPrefix(bucket, key, permissions) { - appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the %s key", key), err) + appErr := configberry.NewAppError(configberry.ForbiddenError, fmt.Sprintf("user does not have permission to read the object with `key` %s", key), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } url, err := s3Ctrl.GetDownloadPresignedURL(bucket, key, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - appErr := configberry.HandleAWSError(err, fmt.Sprintf("error getting presigned download URL for object %s", key)) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error getting presigned download URL for object eith `key` %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -72,14 +72,14 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } prefix := c.QueryParam("prefix") if prefix == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `prefix` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterPrefixRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -134,7 +134,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { fullPath := filepath.Join(basePrefix, relativePath) presignedURL, err := s3Ctrl.GetDownloadPresignedURL(bucket, *item.Key, bh.Config.DefaultDownloadPresignedUrlExpiration) if err != nil { - return fmt.Errorf("error generating presigned URL for object %s: %v", *item.Key, err) + return fmt.Errorf("error generating presigned URL for object with `key` %s: %v", *item.Key, err) } url, err := url.QueryUnescape(presignedURL) if err != nil { //ServiceInternalError @@ -150,7 +150,7 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { // Call GetList with the processPage function err = s3Ctrl.GetListWithCallBack(bucket, prefix, false, processPage) if err != nil { - appErr := configberry.HandleAWSError(err, "error processing objects") + appErr := configberry.HandleAWSError(err, listingObjectsAndPrefixError) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -167,14 +167,14 @@ func (bh *BlobHandler) HandleGenerateDownloadScript(c echo.Context) error { ContentType: aws.String("binary/octet-stream"), }) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error uploading %s to S3", txtBatFileName), err) + appErr := configberry.NewAppError(configberry.InternalServerError, fmt.Sprintf("error uploading object with `key` %s to S3", txtBatFileName), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } href, err := s3Ctrl.GetDownloadPresignedURL(bucket, outputFile, 1) if err != nil { - appErr := configberry.HandleAWSError(err, fmt.Sprintf("error generating presigned URL for %s", txtBatFileName)) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error generating presigned URL for object with `key` %s", txtBatFileName)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } diff --git a/blobstore/upload.go b/blobstore/upload.go index c06fb25..a3173f3 100644 --- a/blobstore/upload.go +++ b/blobstore/upload.go @@ -34,7 +34,7 @@ func (s3Ctrl *S3Controller) UploadS3Obj(bucket string, key string, body io.ReadC resp, err := s3Ctrl.S3Svc.CreateMultipartUpload(params) if err != nil { - return fmt.Errorf("error creating multipart upload for object %s, %w", key, err) + return fmt.Errorf("error creating multipart upload for object with `key` %s, %w", key, err) } // Create the variables that will track upload progress @@ -260,7 +260,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { // Add overwrite check and parameter key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -268,7 +268,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -309,12 +309,12 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { keyExist, err := s3Ctrl.KeyExists(bucket, key) if err != nil { - appErr := configberry.HandleAWSError(err, "error checking if object exists`") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error checking if object with `key` %s already exists`", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } if keyExist && !override { - appErr := configberry.NewAppError(configberry.ConflictError, fmt.Sprintf("object %s already exists and override is set to %t", key, override), err) + appErr := configberry.NewAppError(configberry.ConflictError, fmt.Sprintf("object with `key` %s already exists and `override` is set to %t", key, override), err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -330,7 +330,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { } - log.Infof("Successfully uploaded file with key: %s", key) + log.Infof("Successfully uploaded object with `key`: %s", key) return configberry.HandleSuccessfulResponse(c, "Successfully uploaded file") } @@ -338,7 +338,7 @@ func (bh *BlobHandler) HandleMultipartUpload(c echo.Context) error { func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -346,7 +346,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -370,7 +370,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { } presignedURL, err := s3Ctrl.GetUploadPartPresignedURL(bucket, key, uploadID, int64(partNumber), bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - appErr := configberry.HandleAWSError(err, "error generating presigned part URL") + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error generating presigned part URL for object with `key` %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -385,12 +385,12 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { //if the user did not provided both upload_id and part_number then we returned normal presigned URL presignedURL, err := s3Ctrl.GetUploadPresignedURL(bucket, key, bh.Config.DefaultUploadPresignedUrlExpiration) if err != nil { - appErr := configberry.HandleAWSError(err, "error generating presigned URL") + appErr := configberry.HandleAWSError(err, (fmt.Sprintf("error generating presigned URL for object with `key` %s", key))) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Infof("successfully generated presigned URL for key: %s", key) + log.Infof("successfully generated presigned URL for object with `key`: %s", key) return configberry.HandleSuccessfulResponse(c, presignedURL) } @@ -398,7 +398,7 @@ func (bh *BlobHandler) HandleGetPresignedUploadURL(c echo.Context) error { func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -406,7 +406,7 @@ func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -431,7 +431,7 @@ func (bh *BlobHandler) HandleGetMultipartUploadID(c echo.Context) error { func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -439,7 +439,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -453,7 +453,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { var req completeUploadRequest if err := c.Bind(&req); err != nil { - appErr := configberry.NewAppError(configberry.BadRequestError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.BadRequestError, parseingBodyRequestError, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -468,7 +468,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { _, err = s3Ctrl.CompleteMultipartUpload(bucket, key, req.UploadID, s3Parts) if err != nil { - appErr := configberry.HandleAWSError(err, fmt.Sprintf("error completing the multipart Upload for key %s", key)) + appErr := configberry.HandleAWSError(err, fmt.Sprintf("error completing the multipart Upload for object with `key` %s", key)) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -480,7 +480,7 @@ func (bh *BlobHandler) HandleCompleteMultipartUpload(c echo.Context) error { func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { key := c.QueryParam("key") if key == "" { - appErr := configberry.NewAppError(configberry.ValidationError, "parameter `key` is required", nil) + appErr := configberry.NewAppError(configberry.ValidationError, parameterKeyRequired, nil) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -488,7 +488,7 @@ func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { bucket := c.QueryParam("bucket") s3Ctrl, err := bh.GetController(bucket) if err != nil { - appErr := configberry.NewAppError(configberry.InternalServerError, "unable to get S3 controller", err) + appErr := configberry.NewAppError(configberry.InternalServerError, unableToGetController, err) log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } @@ -512,6 +512,6 @@ func (bh *BlobHandler) HandleAbortMultipartUpload(c echo.Context) error { log.Error(configberry.LogErrorFormatter(appErr, true)) return configberry.HandleErrorResponse(c, appErr) } - log.Infof("succesfully aborted multipart upload for key %s", key) + log.Infof("succesfully aborted multipart upload for object with `key` %s", key) return configberry.HandleSuccessfulResponse(c, "succesfully aborted multipart upload") } diff --git a/configberry/errors_handling.go b/configberry/errors_handling.go index a2c178d..d427ceb 100644 --- a/configberry/errors_handling.go +++ b/configberry/errors_handling.go @@ -167,9 +167,9 @@ func HandleAWSError(err error, errMsg string) *AppError { return NewAppError(ConflictError, formattedMessage, originalErr) case "NotUpdatable", "InvalidRequest", "Throttling", "ServiceLimitExceeded", "NotStabilized", "GeneralServiceException", "NetworkFailure", "InvalidTypeConfiguration", "NonCompliant", "Unknown", "UnsupportedTarget": return NewAppError(AWSError, formattedMessage, originalErr) - case "ServiceInternalError", "InternalFailure", "HandlerInternalFailure": + case "ServiceInternalError", "InternalFailure", "HandlerInternalFailure", "S3KeyCheckError": return NewAppError(InternalServerError, formattedMessage, originalErr) - case "InvalidPart": + case "InvalidPart", "InvalidParameter": return NewAppError(BadRequestError, formattedMessage, originalErr) case "EntityTooLarge": return NewAppError(EntityTooLargeError, formattedMessage, originalErr) @@ -191,7 +191,7 @@ func CheckRequiredParams(params map[string]string) *AppError { } } if len(missingParams) > 0 { - errMsg := fmt.Sprintf("The following required parameters are missing: %s", strings.Join(missingParams, ", ")) + errMsg := fmt.Sprintf("The following required parameters are missing: `%s", strings.Join(missingParams, "`, `")) return NewAppError(ValidationError, errMsg, nil) } return nil diff --git a/documentation/general.md b/documentation/general.md index aa23d2e..85db53c 100644 --- a/documentation/general.md +++ b/documentation/general.md @@ -28,22 +28,42 @@ deprecated functions will be in /utils/deprecated.txt, the file should consist o # errors -Rules: +## Error Handling Best Practices + +Here are the error handling best practices you described, formatted in markdown and with a focus on clarity and conciseness: + +**Formatting:** - **Use Backticks `` ` `` to refrence parameters:** parameters refrenced in errors should be encapsulated inside backticks -- **Avoid Capital Letters:** Custom errors should not start with a capital letter (unless they begin with an acronym). -- **Error Messages Should Be Descriptive:** Ensure that error messages are clear and provide enough context to understand the issue. -- **Include Relevant Information:** Include information about the operation that failed, such as key parameters, to aid in debugging. -- **Avoid Punctuation:** Do not end error messages with punctuation marks like periods or exclamation points. -- **Use `errors.New` for Static Errors:** Use `errors.New` when the error message is static and doesn't need any additional context. - ```go - var ErrInvalidInput = errors.New("invalid input") - ``` -- **Use `fmt.Errorf` for Dynamic Errors:** Use `fmt.Errorf` when you need to include dynamic information or wrap an existing error with additional context. - ```go - err := fmt.Errorf("failed to process user ID %d: %w", userID, ErrInvalidInput) - ``` -- Methods that will be used for packages should return naitive go error +- **Lowercase Custom Errors:** Start custom error messages with lowercase letters (unless the message is an acronym). + +**Content:** + +- **Descriptive Messages:** Craft error messages that are clear and informative, explaining the issue encountered. +- **Relevant Information:** Include details about the failing operation, such as key parameters, to assist with debugging. +- **No Punctuation at End:** Avoid ending error messages with periods or exclamation points. + +**Error Creation:** + +- **`errors.New` for Static Errors:** Use `errors.New` for static error messages that don't require dynamic information. + +```go +var errInvalidInput = errors.New("invalid input") +``` + +- **`fmt.Errorf` for Dynamic Errors:** Use `fmt.Errorf` when you need to include dynamic information or wrap an existing error with additional context. + +```go +err := fmt.Errorf("failed to process user ID %d: %w", userID, errInvalidInput) +``` + +**Error Return in Packages:** + +- Methods in packages should return native Go error types. + +**Wrapping Errors:** + +- Don't wrap errors returned from your `s3Ctrl` functions unless the error originates from functionality unrelated to the method's main purpose. For example, in `s3Ctrl.DeleteObject`, wrap the existence check error because it doesn't directly concern deletion. # Logging From e9816f7a922124ea5b510a368799b64e3c2fc21d Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 24 Jun 2024 12:51:56 -0400 Subject: [PATCH 21/22] format error returned from blobhandler --- main.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/main.go b/main.go index 9c696a7..9b4b602 100644 --- a/main.go +++ b/main.go @@ -73,7 +73,8 @@ func main() { bh, err := blobstore.NewBlobHandler(envJson, authLvl) if err != nil { - log.Fatalf("error initializing a new blobhandler: %v", err) + appErr := configberry.NewAppError(configberry.FatalError, "error initializing a new blobhandler", err) + log.Fatal(configberry.LogErrorFormatter(appErr, true)) } e := echo.New() From 9d4e160468408968bc8acd4edb8df2afe0fac937 Mon Sep 17 00:00:00 2001 From: Anton Kopti Date: Mon, 24 Jun 2024 13:02:18 -0400 Subject: [PATCH 22/22] correct error comparison --- blobstore/delete.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blobstore/delete.go b/blobstore/delete.go index b544118..3ebfcf4 100644 --- a/blobstore/delete.go +++ b/blobstore/delete.go @@ -68,7 +68,7 @@ func (s3Ctrl *S3Controller) DeleteKeys(bucket string, key []string) error { exists, err := s3Ctrl.KeyExists(bucket, p) if err != nil { // Wrap error with awserr based on the specific error type - var awsErr error + var awsErr awserr.Error if errors.As(err, &awsErr) { // If it's an awserr.Error, return it directly return awsErr