diff --git a/.github/integration/sda-integration.yml b/.github/integration/sda-integration.yml index 60f3bf445..11406c866 100644 --- a/.github/integration/sda-integration.yml +++ b/.github/integration/sda-integration.yml @@ -24,9 +24,9 @@ services: - POSTGRES_PASSWORD=rootpasswd healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] - interval: 5s - timeout: 20s - retries: 3 + interval: 10s + timeout: 2s + retries: 6 image: ghcr.io/neicnordic/sensitive-data-archive:PR${PR_NUMBER}-postgres ports: - "15432:5432" @@ -46,9 +46,9 @@ services: "-c", "rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms", ] - interval: 5s - timeout: 20s - retries: 3 + interval: 10s + timeout: 2s + retries: 6 image: ghcr.io/neicnordic/sensitive-data-archive:PR${PR_NUMBER}-rabbitmq ports: - "15672:15672" @@ -66,9 +66,9 @@ services: - MINIO_SERVER_URL=http://127.0.0.1:9000 healthcheck: test: ["CMD", "curl", "-fkq", "http://localhost:9000/minio/health/live"] - interval: 5s - timeout: 20s - retries: 3 + interval: 10s + timeout: 2s + retries: 6 ports: - "19000:9000" - "19001:9001" @@ -122,9 +122,9 @@ services: condition: service_completed_successfully healthcheck: test: ["CMD", "python3", "-c", 'import requests; print(requests.get(url = "http://localhost:8080/jwk").text)'] - interval: 5s - timeout: 20s - retries: 3 + interval: 10s + timeout: 2s + retries: 6 image: python:3.10-slim ports: - "8080:8080" diff --git a/.github/integration/sda/config.yaml b/.github/integration/sda/config.yaml index a1da0ea7d..9669ca001 100644 --- a/.github/integration/sda/config.yaml +++ b/.github/integration/sda/config.yaml @@ -1,7 +1,8 @@ log: format: "json" -aws: - url: "http://s3:9000" +inbox: + url: "http://s3" + port: 9000 readypath: "/minio/health/ready" accessKey: "access" secretKey: "secretKey" diff --git a/charts/sda-svc/templates/_helpers.yaml b/charts/sda-svc/templates/_helpers.yaml index 4e32654f6..c2c5c6e23 100644 --- a/charts/sda-svc/templates/_helpers.yaml +++ b/charts/sda-svc/templates/_helpers.yaml @@ -289,14 +289,6 @@ Create chart name and version as used by the chart label. {{- end -}} {{- end -}} -{{- define "S3InboxURL" -}} - {{- if .Values.global.inbox.s3Port }} - {{- printf "%s:%v" .Values.global.inbox.s3Url .Values.global.inbox.s3Port }} - {{- else }} - {{- printf "%s" .Values.global.inbox.s3Url }} - {{- end }} -{{- end -}} - {{- define "S3ArchiveURL" -}} {{- if .Values.global.inbox.s3Port }} {{- printf "%s:%v" .Values.global.inbox.s3Url .Values.global.inbox.s3Port }} diff --git a/charts/sda-svc/templates/s3-inbox-deploy.yaml b/charts/sda-svc/templates/s3-inbox-deploy.yaml index 405e526d2..fa072dbe0 100644 --- a/charts/sda-svc/templates/s3-inbox-deploy.yaml +++ b/charts/sda-svc/templates/s3-inbox-deploy.yaml @@ -86,12 +86,12 @@ spec: {{- end }} env: {{- if not .Values.global.vaultSecrets }} - - name: AWS_ACCESSKEY + - name: INBOX_ACCESSKEY valueFrom: secretKeyRef: name: {{ template "sda.fullname" . }}-inbox key: s3InboxAccessKey - - name: AWS_SECRETKEY + - name: INBOX_SECRETKEY valueFrom: secretKeyRef: name: {{ template "sda.fullname" . }}-inbox @@ -117,20 +117,24 @@ spec: - name: SERVER_CONFFILE value: {{ include "confFile" .}} {{- end }} - - name: AWS_URL - value: {{ template "S3InboxURL" . }} + - name: INBOX_URL + value: {{ .Values.global.inbox.s3Url | quote }} + {{- if .Values.global.inbox.s3Port }} + - name: INBOX_PORT + value: {{ .Values.global.inbox.s3Port | quote }} + {{- end }} {{- if and .Values.global.inbox.s3CaFile .Values.global.tls.enabled }} - - name: AWS_CACERT + - name: INBOX_CACERT value: "{{ include "tlsPath" . }}/ca.crt" {{- end }} {{- if .Values.global.inbox.s3Region }} - - name: AWS_REGION + - name: INBOX_REGION value: {{ .Values.global.inbox.s3Region | quote }} {{- end }} - - name: AWS_BUCKET + - name: INBOX_BUCKET value: {{ .Values.global.inbox.s3Bucket | quote }} {{- if .Values.global.inbox.s3ReadyPath }} - - name: AWS_READYPATH + - name: INBOX_READYPATH value: {{ .Values.global.inbox.s3ReadyPath }} {{- end }} - name: BROKER_HOST diff --git a/sda/cmd/s3inbox/bucket.go b/sda/cmd/s3inbox/bucket.go deleted file mode 100644 index c18bcb680..000000000 --- a/sda/cmd/s3inbox/bucket.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "crypto/tls" - "crypto/x509" - "net/http" - "os" - "reflect" - "strings" - - "github.com/neicnordic/sensitive-data-archive/internal/config" - - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" -) - -func checkS3Bucket(config config.S3Config) error { - s3Transport := transportConfigS3(config) - client := http.Client{Transport: s3Transport} - s3Session := session.Must(session.NewSession( - &aws.Config{ - Endpoint: aws.String(config.URL), - Region: aws.String(config.Region), - HTTPClient: &client, - S3ForcePathStyle: aws.Bool(true), - DisableSSL: aws.Bool(strings.HasPrefix(config.URL, "http:")), - Credentials: credentials.NewStaticCredentials(config.AccessKey, config.SecretKey, ""), - }, - )) - - _, err := s3.New(s3Session).CreateBucket(&s3.CreateBucketInput{ - Bucket: aws.String(config.Bucket), - }) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - if aerr.Code() != s3.ErrCodeBucketAlreadyOwnedByYou && - aerr.Code() != s3.ErrCodeBucketAlreadyExists { - return errors.Errorf("Unexpected issue while creating bucket: %v", err) - } - - return nil - } - - return errors.New("Verifying bucket failed, check S3 configuration") - } - - return nil -} - -// transportConfigS3 is a helper method to setup TLS for the S3 client. -func transportConfigS3(config config.S3Config) http.RoundTripper { - cfg := new(tls.Config) - - // Enforce TLS1.2 or higher - cfg.MinVersion = 2 - - // Read system CAs - var systemCAs, _ = x509.SystemCertPool() - if reflect.DeepEqual(systemCAs, x509.NewCertPool()) { - log.Debug("creating new CApool") - systemCAs = x509.NewCertPool() - } - cfg.RootCAs = systemCAs - - if config.CAcert != "" { - cacert, e := os.ReadFile(config.CAcert) // #nosec this file comes from our config - if e != nil { - log.Fatalf("failed to append %q to RootCAs: %v", cacert, e) - } - if ok := cfg.RootCAs.AppendCertsFromPEM(cacert); !ok { - log.Debug("no certs appended, using system certs only") - } - } - - var trConfig http.RoundTripper = &http.Transport{ - TLSClientConfig: cfg, - ForceAttemptHTTP2: true} - - return trConfig -} diff --git a/sda/cmd/s3inbox/bucket_test.go b/sda/cmd/s3inbox/bucket_test.go deleted file mode 100644 index 4a9b4c769..000000000 --- a/sda/cmd/s3inbox/bucket_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import ( - "net/http/httptest" - "os" - "testing" - - "github.com/neicnordic/sensitive-data-archive/internal/config" - - log "github.com/sirupsen/logrus" - - "github.com/johannesboyne/gofakes3" - "github.com/johannesboyne/gofakes3/backend/s3mem" - "github.com/spf13/viper" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" -) - -var ts *httptest.Server - -type BucketTestSuite struct { - suite.Suite -} - -func (suite *BucketTestSuite) SetupTest() { - err := setupFakeS3() - if err != nil { - log.Error("Setup of fake s3 failed, bailing out") - os.Exit(1) - } - - viper.Set("broker.host", "localhost") - viper.Set("broker.port", "1234") - viper.Set("broker.user", "guest") - viper.Set("broker.password", "guest") - viper.Set("broker.routingkey", "ingest") - viper.Set("broker.exchange", "amq.topic") - viper.Set("broker.vhost", "/") - viper.Set("aws.url", ts.URL) - viper.Set("aws.accesskey", "testaccess") - viper.Set("aws.secretkey", "testsecret") - viper.Set("aws.bucket", "testbucket") - viper.Set("server.jwtpubkeypath", "testpath") -} - -func setupFakeS3() (err error) { - // fake s3 - - if ts != nil { - // Setup done already? - return - } - - backend := s3mem.New() - faker := gofakes3.New(backend) - ts = httptest.NewServer(faker.Server()) - - if err != nil { - log.Error("Unexpected error while setting up fake s3") - - return err - } - - return err -} - -func TestBucketTestSuite(t *testing.T) { - suite.Run(t, new(BucketTestSuite)) -} - -func (suite *BucketTestSuite) TestBucketPass() { - config, err := config.NewConfig() - assert.NotNil(suite.T(), config) - assert.NoError(suite.T(), err) - - err = checkS3Bucket(config.S3) - assert.NoError(suite.T(), err) -} - -func (suite *BucketTestSuite) TestBucketFail() { - viper.Set("aws.url", "http://localhost:12345") - config, err := config.NewConfig() - assert.NotNil(suite.T(), config) - assert.NoError(suite.T(), err) - - err = checkS3Bucket(config.S3) - assert.Error(suite.T(), err) -} diff --git a/sda/cmd/s3inbox/healthchecks.go b/sda/cmd/s3inbox/healthchecks.go index e61247d76..6f3e6c166 100644 --- a/sda/cmd/s3inbox/healthchecks.go +++ b/sda/cmd/s3inbox/healthchecks.go @@ -8,9 +8,8 @@ import ( "strconv" "time" - "github.com/neicnordic/sensitive-data-archive/internal/config" - "github.com/heptiolabs/healthcheck" + "github.com/neicnordic/sensitive-data-archive/internal/config" ) // HealthCheck registers and endpoint for healthchecking the service @@ -25,9 +24,12 @@ type HealthCheck struct { // NewHealthCheck creates a new healthchecker. It needs to know where to find // the backend S3 storage and the Message Broker so it can report readiness. func NewHealthCheck(port int, db *sql.DB, conf *config.Config, tlsConfig *tls.Config) *HealthCheck { - s3URL := conf.S3.URL - if conf.S3.Readypath != "" { - s3URL = conf.S3.URL + conf.S3.Readypath + s3URL := conf.Inbox.S3.URL + if conf.Inbox.S3.Port != 0 { + s3URL = fmt.Sprintf("%s:%d", s3URL, conf.Inbox.S3.Port) + } + if conf.Inbox.S3.Readypath != "" { + s3URL += conf.Inbox.S3.Readypath } brokerURL := fmt.Sprintf("%s:%d", conf.Broker.Host, conf.Broker.Port) diff --git a/sda/cmd/s3inbox/healthchecks_test.go b/sda/cmd/s3inbox/healthchecks_test.go index 80b38d156..85633d274 100644 --- a/sda/cmd/s3inbox/healthchecks_test.go +++ b/sda/cmd/s3inbox/healthchecks_test.go @@ -34,16 +34,16 @@ func (suite *HealthcheckTestSuite) SetupTest() { viper.Set("broker.routingkey", "ingest") viper.Set("broker.exchange", "sda") viper.Set("broker.vhost", "sda") - viper.Set("aws.url", "http://localhost:8080") - viper.Set("aws.accesskey", "testaccess") - viper.Set("aws.secretkey", "testsecret") - viper.Set("aws.bucket", "testbucket") + viper.Set("inbox.url", "http://localhost:8080") + viper.Set("inbox.accesskey", "testaccess") + viper.Set("inbox.secretkey", "testsecret") + viper.Set("inbox.bucket", "testbucket") viper.Set("server.jwtpubkeypath", "testpath") } func (suite *HealthcheckTestSuite) TestHttpsGetCheck() { db, _, _ := sqlmock.New() - conf, err := config.NewConfig() + conf, err := config.NewConfig("s3inbox") assert.NoError(suite.T(), err) assert.NotNil(suite.T(), conf) h := NewHealthCheck(8888, @@ -68,7 +68,7 @@ func (suite *HealthcheckTestSuite) TestHealthchecks() { db, mock, _ := sqlmock.New(sqlmock.MonitorPingsOption(true)) mock.ExpectPing() - conf, err := config.NewConfig() + conf, err := config.NewConfig("s3inbox") assert.NoError(suite.T(), err) assert.NotNil(suite.T(), conf) h := NewHealthCheck(8888, diff --git a/sda/cmd/s3inbox/proxy.go b/sda/cmd/s3inbox/proxy.go index 1af44b742..e02b9248a 100644 --- a/sda/cmd/s3inbox/proxy.go +++ b/sda/cmd/s3inbox/proxy.go @@ -10,14 +10,14 @@ import ( "net/http" "net/url" "os" + "regexp" "strconv" "strings" "time" "github.com/neicnordic/sensitive-data-archive/internal/broker" - "github.com/neicnordic/sensitive-data-archive/internal/config" "github.com/neicnordic/sensitive-data-archive/internal/database" - "github.com/neicnordic/sensitive-data-archive/internal/helper" + "github.com/neicnordic/sensitive-data-archive/internal/storage" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -31,7 +31,7 @@ import ( // Proxy represents the toplevel object in this application type Proxy struct { - s3 config.S3Config + s3 storage.S3Conf auth Authenticator messenger *broker.AMQPBroker database *database.SDAdb @@ -72,7 +72,7 @@ const ( ) // NewProxy creates a new S3Proxy. This implements the ServerHTTP interface. -func NewProxy(s3conf config.S3Config, auth Authenticator, messenger *broker.AMQPBroker, database *database.SDAdb, tls *tls.Config) *Proxy { +func NewProxy(s3conf storage.S3Conf, auth Authenticator, messenger *broker.AMQPBroker, database *database.SDAdb, tls *tls.Config) *Proxy { tr := &http.Transport{TLSClientConfig: tls} client := &http.Client{Transport: tr, Timeout: 30 * time.Second} @@ -125,7 +125,7 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request) { username := fmt.Sprintf("%v", claims["sub"]) rawFilepath := strings.Replace(r.URL.Path, "/"+p.s3.Bucket+"/", "", 1) - filepath, err := helper.FormatUploadFilePath(rawFilepath) + filepath, err := formatUploadFilePath(rawFilepath) if err != nil { log.Debugf(err.Error()) w.WriteHeader(http.StatusNotAcceptable) @@ -177,7 +177,7 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request) { } case false: - if err = p.messenger.SendMessage(p.fileIds[r.URL.Path], p.messenger.Conf.Exchange, p.messenger.Conf.RoutingKey, true, jsonMessage); err != nil { + if err = p.messenger.SendMessage(p.fileIds[r.URL.Path], p.messenger.Conf.Exchange, p.messenger.Conf.RoutingKey, jsonMessage); err != nil { log.Debug("error when sending message") log.Error(err) } @@ -245,10 +245,10 @@ func (p *Proxy) uploadFinishedSuccessfully(req *http.Request, response *http.Res func (p *Proxy) forwardToBackend(r *http.Request) (*http.Response, error) { - p.resignHeader(r, p.s3.AccessKey, p.s3.SecretKey, p.s3.URL) + p.resignHeader(r, p.s3.AccessKey, p.s3.SecretKey, fmt.Sprintf("%s:%d", p.s3.URL, p.s3.Port)) // Redirect request - nr, err := http.NewRequest(r.Method, p.s3.URL+r.URL.String(), r.Body) + nr, err := http.NewRequest(r.Method, fmt.Sprintf("%s:%d", p.s3.URL, p.s3.Port)+r.URL.String(), r.Body) if err != nil { log.Debug("error when redirecting the request") log.Debug(err) @@ -454,7 +454,7 @@ func (p *Proxy) newSession() (*session.Session, error) { CustomCABundle: cacert, Config: aws.Config{ Region: aws.String(p.s3.Region), - Endpoint: aws.String(p.s3.URL), + Endpoint: aws.String(fmt.Sprintf("%s:%d", p.s3.URL, p.s3.Port)), DisableSSL: aws.Bool(strings.HasPrefix(p.s3.URL, "http:")), S3ForcePathStyle: aws.Bool(true), Credentials: credentials.NewStaticCredentials(p.s3.AccessKey, p.s3.SecretKey, ""), @@ -465,7 +465,7 @@ func (p *Proxy) newSession() (*session.Session, error) { } else { mySession, err = session.NewSession(&aws.Config{ Region: aws.String(p.s3.Region), - Endpoint: aws.String(p.s3.URL), + Endpoint: aws.String(fmt.Sprintf("%s:%d", p.s3.URL, p.s3.Port)), DisableSSL: aws.Bool(strings.HasPrefix(p.s3.URL, "http:")), S3ForcePathStyle: aws.Bool(true), Credentials: credentials.NewStaticCredentials(p.s3.AccessKey, p.s3.SecretKey, ""), @@ -477,3 +477,27 @@ func (p *Proxy) newSession() (*session.Session, error) { return mySession, nil } + +// FormatUploadFilePath ensures that path separators are "/", and returns error if the +// filepath contains a disallowed character matched with regex +func formatUploadFilePath(filePath string) (string, error) { + + // Check for mixed "\" and "/" in filepath. Stop and throw an error if true so that + // we do not end up with unintended folder structure when applying ReplaceAll below + if strings.Contains(filePath, "\\") && strings.Contains(filePath, "/") { + return filePath, fmt.Errorf("filepath contains mixed '\\' and '/' characters") + } + + // make any windows path separators linux compatible + outPath := strings.ReplaceAll(filePath, "\\", "/") + + // [\x00-\x1F\x7F] is the control character set + re := regexp.MustCompile(`[\\:\*\?"<>\|\x00-\x1F\x7F]`) + + dissallowedChars := re.FindAllString(outPath, -1) + if dissallowedChars != nil { + return outPath, fmt.Errorf("filepath contains disallowed characters: %+v", strings.Join(dissallowedChars, ", ")) + } + + return outPath, nil +} diff --git a/sda/cmd/s3inbox/proxy_test.go b/sda/cmd/s3inbox/proxy_test.go index fb995b0ed..0b58d8989 100644 --- a/sda/cmd/s3inbox/proxy_test.go +++ b/sda/cmd/s3inbox/proxy_test.go @@ -12,8 +12,8 @@ import ( "testing" "github.com/neicnordic/sensitive-data-archive/internal/broker" - "github.com/neicnordic/sensitive-data-archive/internal/config" "github.com/neicnordic/sensitive-data-archive/internal/database" + "github.com/neicnordic/sensitive-data-archive/internal/storage" "github.com/golang-jwt/jwt/v4" "github.com/stretchr/testify/assert" @@ -22,7 +22,7 @@ import ( type ProxyTests struct { suite.Suite - S3conf config.S3Config + S3conf storage.S3Conf DBConf database.DBConf fakeServer *FakeServer MQConf broker.MQConf @@ -40,8 +40,9 @@ func (suite *ProxyTests) SetupTest() { suite.fakeServer = startFakeServer("9024") // Create an s3config for the fake server - suite.S3conf = config.S3Config{ - URL: "http://127.0.0.1:9024", + suite.S3conf = storage.S3Conf{ + URL: "http://127.0.0.1", + Port: 9024, AccessKey: "someAccess", SecretKey: "someSecret", Bucket: "buckbuck", @@ -210,7 +211,7 @@ func (suite *ProxyTests) TestServeHTTP_disallowed() { } func (suite *ProxyTests) TestServeHTTPS3Unresponsive() { - s3conf := config.S3Config{ + s3conf := storage.S3Conf{ URL: "http://localhost:40211", AccessKey: "someAccess", SecretKey: "someSecret", @@ -401,3 +402,21 @@ func (suite *ProxyTests) TestDatabaseConnection() { assert.Equal(suite.T(), exists, 1, "File '%v' event does not exist", status) } } + +func (suite *ProxyTests) TestFormatUploadFilePath() { + unixPath := "a/b/c.c4gh" + testPath := "a\\b\\c.c4gh" + uploadPath, err := formatUploadFilePath(testPath) + assert.NoError(suite.T(), err) + assert.Equal(suite.T(), unixPath, uploadPath) + + // mixed "\" and "/" + weirdPath := `dq\sw:*?"<>|\t\s/df.c4gh` + _, err = formatUploadFilePath(weirdPath) + assert.EqualError(suite.T(), err, "filepath contains mixed '\\' and '/' characters") + + // no mixed "\" and "/" but not allowed + weirdPath = `dq\sw:*?"<>|\t\sdf.c4gh` + _, err = formatUploadFilePath(weirdPath) + assert.EqualError(suite.T(), err, "filepath contains disallowed characters: :, *, ?, \", <, >, |") +} diff --git a/sda/cmd/s3inbox/main.go b/sda/cmd/s3inbox/s3inbox.go similarity index 89% rename from sda/cmd/s3inbox/main.go rename to sda/cmd/s3inbox/s3inbox.go index 9daf013fe..c6e62f692 100644 --- a/sda/cmd/s3inbox/main.go +++ b/sda/cmd/s3inbox/s3inbox.go @@ -10,6 +10,7 @@ import ( "github.com/neicnordic/sensitive-data-archive/internal/broker" "github.com/neicnordic/sensitive-data-archive/internal/config" "github.com/neicnordic/sensitive-data-archive/internal/database" + "github.com/neicnordic/sensitive-data-archive/internal/storage" log "github.com/sirupsen/logrus" ) @@ -28,13 +29,12 @@ func main() { } }() - c, err := config.NewConfig() + Conf, err := config.NewConfig("s3inbox") if err != nil { log.Error(err) sigc <- syscall.SIGINT panic(err) } - Conf = c tlsProxy, err := config.TLSConfigProxy(Conf) if err != nil { @@ -43,7 +43,7 @@ func main() { panic(err) } - sdaDB, err := database.NewSDAdb(Conf.DB) + sdaDB, err := database.NewSDAdb(Conf.Database) if err != nil { log.Error(err) sigc <- syscall.SIGINT @@ -57,7 +57,7 @@ func main() { log.Debugf("Connected to sda-db (v%v)", sdaDB.Version) - err = checkS3Bucket(Conf.S3) + err = storage.CheckS3Bucket(Conf.Inbox.S3.Bucket, storage.CreateS3Session(Conf.Inbox.S3)) if err != nil { log.Error(err) sigc <- syscall.SIGINT @@ -94,7 +94,7 @@ func main() { log.Panicf("Error while getting key %s: %v", Conf.Server.Jwtpubkeypath, err) } } - proxy := NewProxy(Conf.S3, auth, messenger, sdaDB, tlsProxy) + proxy := NewProxy(Conf.Inbox.S3, auth, messenger, sdaDB, tlsProxy) log.Debug("got the proxy ", proxy) diff --git a/sda/cmd/s3inbox/main_test.go b/sda/cmd/s3inbox/s3inbox_test.go similarity index 100% rename from sda/cmd/s3inbox/main_test.go rename to sda/cmd/s3inbox/s3inbox_test.go diff --git a/sda/go.mod b/sda/go.mod index 49b0d081f..55b10dcc8 100644 --- a/sda/go.mod +++ b/sda/go.mod @@ -4,31 +4,39 @@ go 1.20 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 - github.com/aws/aws-sdk-go v1.44.253 + github.com/aws/aws-sdk-go v1.45.10 + github.com/gliderlabs/ssh v0.3.5 github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/google/uuid v1.1.2 github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb github.com/johannesboyne/gofakes3 v0.0.0-20230310080033-c0edf658332b github.com/lestrrat-go/jwx v1.2.26 github.com/lib/pq v1.10.9 github.com/minio/minio-go/v6 v6.0.57 + github.com/neicnordic/crypt4gh v1.8.2 github.com/ory/dockertest/v3 v3.10.0 github.com/pkg/errors v0.9.1 - github.com/rabbitmq/amqp091-go v1.8.0 - github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 - github.com/sirupsen/logrus v1.9.0 - github.com/spf13/viper v1.15.0 + github.com/pkg/sftp v1.13.6 + github.com/rabbitmq/amqp091-go v1.8.1 + github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/viper v1.16.0 github.com/stretchr/testify v1.8.4 + golang.org/x/crypto v0.13.0 ) require ( + filippo.io/edwards25519 v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/docker/cli v20.10.17+incompatible // indirect github.com/docker/docker v20.10.24+incompatible // indirect @@ -37,46 +45,47 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/kr/fs v0.1.0 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect github.com/lestrrat-go/blackmagic v1.0.1 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/option v1.0.1 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/minio/sha256-simd v0.1.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.1.7 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.13.1 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - golang.org/x/crypto v0.9.0 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.7.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/sda/go.sum b/sda/go.sum index c94ed9fa4..60f288132 100644 --- a/sda/go.sum +++ b/sda/go.sum @@ -36,6 +36,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= @@ -52,9 +54,13 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go v1.33.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.44.253 h1:iqDd0okcH4ShfFexz2zzf4VmeDFf6NOMm07pHnEb8iY= github.com/aws/aws-sdk-go v1.44.253/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.45.10 h1:GoqAm25t0qrs4rrXAjqt3luZnM9mV0lzfNwzgaCKpm4= +github.com/aws/aws-sdk-go v1.45.10/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -65,6 +71,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -79,6 +87,8 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a h1:saTgr5tMLFnmy/yg3qDTft4rE5DY2uJ/cCxCe3q0XTU= +github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a/go.mod h1:Bw9BbhOJVNR+t0jCqx2GC6zv0TGBsShs56Y3gfSCvl0= github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= @@ -100,6 +110,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -149,6 +161,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -179,6 +193,7 @@ github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -214,9 +229,13 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -243,11 +262,15 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v6 v6.0.57 h1:ixPkbKkyD7IhnluRgQpGSpHdpvNVaW6OD5R9IAO/9Tw= github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -260,6 +283,10 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/neicnordic/crypt4gh v1.7.6 h1:Vqcb8Yb950oaBBJFepDK1oLeu9rZzpywYWVHLmO0oI8= +github.com/neicnordic/crypt4gh v1.7.6/go.mod h1:rqmVXsprDFBRRLJkm1cK9kLETBPGEZmft9lHD/V40wk= +github.com/neicnordic/crypt4gh v1.8.2 h1:KNqYBBDU0qW296I6yLoA7l0GoNA/lfzhpy9RDkzNrRM= +github.com/neicnordic/crypt4gh v1.8.2/go.mod h1:VftsV+iUntv40/EB9TbnBnQ3/IDH40zEAqcMajrFVVg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -270,11 +297,16 @@ github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4a github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -284,17 +316,23 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c= github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -302,14 +340,20 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rabbitmq/amqp091-go v1.8.0 h1:GBFy5PpLQ5jSVVSYv8ecHGqeX7UTLYR4ItQbDCss9MM= github.com/rabbitmq/amqp091-go v1.8.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= +github.com/rabbitmq/amqp091-go v1.8.1 h1:RejT1SBUim5doqcL6s7iN6SBmsQqyTgXb1xMlH0h1hA= +github.com/rabbitmq/amqp091-go v1.8.1/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 h1:uIkTLo0AGRc8l7h5l9r+GcYi9qfVPt6lD4/bhmzfiKo= github.com/santhosh-tekuri/jsonschema/v5 v5.3.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0= github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -318,13 +362,19 @@ github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -332,6 +382,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -349,6 +401,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -379,8 +433,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -453,9 +513,11 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= @@ -534,16 +596,23 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -554,8 +623,11 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -711,6 +783,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/sda/internal/broker/broker.go b/sda/internal/broker/broker.go index 2186e0448..4145fdf92 100644 --- a/sda/internal/broker/broker.go +++ b/sda/internal/broker/broker.go @@ -22,23 +22,22 @@ type AMQPBroker struct { // MQConf stores information about the message broker type MQConf struct { - Host string - Port int - User string - Password string - Vhost string - Queue string - Exchange string - RoutingKey string - RoutingError string - Ssl bool - VerifyPeer bool - CACert string - ClientCert string - ClientKey string - ServerName string - Durable bool - SchemasPath string + Host string + Port int + User string + Password string + Vhost string + Queue string + Exchange string + RoutingKey string + Ssl bool + VerifyPeer bool + CACert string + ClientCert string + ClientKey string + ServerName string + SchemasPath string + PrefetchCount int } // buildMQURI builds the MQ connection URI @@ -129,20 +128,6 @@ func NewMQ(config MQConf) (*AMQPBroker, error) { if err != nil { return nil, err } - if config.Queue != "" { - // The queues already exists so we can safely do a passive declaration - _, err = channel.QueueDeclarePassive( - config.Queue, // name - true, // durable - false, // auto-deleted - false, // internal - false, // noWait - nil, // arguments - ) - if err != nil { - return nil, err - } - } if e := channel.Confirm(false); e != nil { fmt.Printf("channel could not be put into confirm mode: %s", e) @@ -150,6 +135,14 @@ func NewMQ(config MQConf) (*AMQPBroker, error) { return nil, fmt.Errorf("channel could not be put into confirm mode: %s", e) } + if config.PrefetchCount > 0 { + // limit the number of messages retrieved from the queue + log.Debugf("prefetch count: %v", config.PrefetchCount) + if err := channel.Qos(config.PrefetchCount, 0, false); err != nil { + log.Errorf("failed to set Channel QoS to %d, reason: %v", config.PrefetchCount, err) + } + } + confirms := channel.NotifyPublish(make(chan amqp.Confirmation, 1)) return &AMQPBroker{connection, channel, config, confirms}, nil @@ -162,6 +155,12 @@ func (broker *AMQPBroker) ConnectionWatcher() *amqp.Error { return amqpError } +func (broker *AMQPBroker) ChannelWatcher() *amqp.Error { + amqpError := <-broker.Channel.NotifyClose(make(chan *amqp.Error)) + + return amqpError +} + // GetMessages reads messages from the queue func (broker *AMQPBroker) GetMessages(queue string) (<-chan amqp.Delivery, error) { ch := broker.Channel @@ -178,7 +177,7 @@ func (broker *AMQPBroker) GetMessages(queue string) (<-chan amqp.Delivery, error } // SendMessage sends a message to RabbitMQ -func (broker *AMQPBroker) SendMessage(corrID, exchange, routingKey string, _ bool, body []byte) error { +func (broker *AMQPBroker) SendMessage(corrID, exchange, routingKey string, body []byte) error { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() diff --git a/sda/internal/broker/broker_test.go b/sda/internal/broker/broker_test.go index 509a61525..890e64a19 100644 --- a/sda/internal/broker/broker_test.go +++ b/sda/internal/broker/broker_test.go @@ -120,15 +120,14 @@ func (suite *BrokerTestSuite) SetupTest() { "ingest", "amq.default", "ingest", - "error", false, false, certPath + "/ca.crt", certPath + "/tls.crt", certPath + "/tls.key", "mq", - true, "", + 2, } } @@ -231,7 +230,7 @@ func (suite *BrokerTestSuite) TestSendMessage() { assert.NotNil(suite.T(), b, "NewMQ without ssl did not return a broker") assert.False(suite.T(), b.Connection.IsClosed()) - err = b.SendMessage("1", "", "ingest", true, []byte("test message")) + err = b.SendMessage("1", "", "ingest", []byte("test message")) assert.NoError(suite.T(), err) b.Channel.Close() diff --git a/sda/internal/config/config.go b/sda/internal/config/config.go index 6b8aeea5c..132e7772c 100644 --- a/sda/internal/config/config.go +++ b/sda/internal/config/config.go @@ -6,34 +6,24 @@ import ( "fmt" "os" "reflect" - "strconv" "strings" + "time" "github.com/neicnordic/sensitive-data-archive/internal/broker" "github.com/neicnordic/sensitive-data-archive/internal/database" + "github.com/neicnordic/sensitive-data-archive/internal/storage" + "github.com/neicnordic/crypt4gh/keys" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) -var ( - requiredConfVars = []string{ - "aws.url", "aws.accessKey", "aws.secretKey", "aws.bucket", - "broker.host", "broker.port", "broker.user", "broker.password", "broker.vhost", "broker.exchange", "broker.routingKey", - } -) +const POSIX = "posix" +const S3 = "s3" +const SFTP = "sftp" -// S3Config stores information about the S3 backend -type S3Config struct { - URL string - Readypath string - AccessKey string - SecretKey string - Bucket string - Region string - CAcert string -} +var requiredConfVars []string // ServerConfig stores general server information type ServerConfig struct { @@ -45,40 +35,282 @@ type ServerConfig struct { // Config is a parent object for all the different configuration parts type Config struct { - S3 S3Config - Broker broker.MQConf - Server ServerConfig - DB database.DBConf + Archive storage.Conf + Broker broker.MQConf + Database database.DBConf + Inbox storage.Conf + Backup storage.Conf + Server ServerConfig + API APIConf + Notify SMTPConf + Orchestrator OrchestratorConf +} + +type APIConf struct { + CACert string + ServerCert string + ServerKey string + Host string + Port int + Session SessionConfig + DB *database.SDAdb + MQ *broker.AMQPBroker +} + +type SessionConfig struct { + Expiration time.Duration + Domain string + Secure bool + HTTPOnly bool + Name string +} + +type SMTPConf struct { + Password string + FromAddr string + Host string + Port int +} + +type OrchestratorConf struct { + ProjectFQDN string + QueueVerify string + QueueInbox string + QueueComplete string + QueueBackup string + QueueMapping string + QueueIngest string + QueueAccession string + ReleaseDelay time.Duration } // NewConfig initializes and parses the config file and/or environment using // the viper library. -func NewConfig() (*Config, error) { +func NewConfig(app string) (*Config, error) { viper.SetConfigName("config") viper.AddConfigPath(".") viper.AutomaticEnv() viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.SetConfigType("yaml") - if viper.IsSet("server.confPath") { - cp := viper.GetString("server.confPath") + viper.SetDefault("schema.type", "federated") + + if viper.IsSet("configPath") { + cp := viper.GetString("configPath") + log.Infof("configPath: %s", cp) if !strings.HasSuffix(cp, "/") { cp += "/" } viper.AddConfigPath(cp) } - if viper.IsSet("server.confFile") { - viper.SetConfigFile(viper.GetString("server.confFile")) + if viper.IsSet("configFile") { + viper.SetConfigFile(viper.GetString("configFile")) } + log.Infoln("reading config") if err := viper.ReadInConfig(); err != nil { + log.Infoln(err.Error()) if _, ok := err.(viper.ConfigFileNotFoundError); ok { log.Infoln("No config file found, using ENVs only") } else { + log.Infoln("ReadInConfig Error") + return nil, err } } - requiredConfVars = []string{ - "broker.host", "broker.port", "broker.user", "broker.password", "broker.exchange", "broker.routingkey", "aws.url", "aws.accesskey", "aws.secretkey", "aws.bucket", + switch app { + case "api": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.routingkey", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + } + case "backup": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "broker.routingkey", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + } + + switch viper.GetString("archive.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"archive.url", "archive.accesskey", "archive.secretkey", "archive.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"archive.location"}...) + default: + return nil, fmt.Errorf("archive.type not set") + } + + switch viper.GetString("backup.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"backup.url", "backup.accesskey", "backup.secretkey", "backup.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"backup.location"}...) + case SFTP: + requiredConfVars = append(requiredConfVars, []string{"backup.sftp.host", "backup.sftp.port", "backup.sftp.userName", "backup.sftp.pemKeyPath", "backup.sftp.pemKeyPass"}...) + default: + return nil, fmt.Errorf("backup.type not set") + } + case "ingest": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "broker.routingkey", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + } + + switch viper.GetString("archive.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"archive.url", "archive.accesskey", "archive.secretkey", "archive.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"archive.location"}...) + default: + return nil, fmt.Errorf("archive.type not set") + } + + switch viper.GetString("inbox.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"inbox.url", "inbox.accesskey", "inbox.secretkey", "inbox.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"inbox.location"}...) + default: + return nil, fmt.Errorf("inbox.type not set") + } + case "finalize": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "broker.routingkey", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + } + case "intercept": + // Intercept does not require these extra settings + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + } + case "mapper": + // Mapper does not require broker.routingkey thus we remove it + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + } + + switch viper.GetString("inbox.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"inbox.url", "inbox.accesskey", "inbox.secretkey", "inbox.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"inbox.location"}...) + } + case "notify": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "smtp.host", + "smtp.port", + "smtp.password", + "smtp.from", + } + case "orchestrate": + // Orchestrate requires broker connection, a series of + // queues, and the project FQDN. + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "project.fqdn", + } + case "s3inbox": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.routingkey", + "inbox.url", + "inbox.accesskey", + "inbox.secretkey", + "inbox.bucket", + } + viper.Set("inbox.type", S3) + case "verify": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "broker.routingkey", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + } + + switch viper.GetString("archive.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"archive.url", "archive.accesskey", "archive.secretkey", "archive.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"archive.location"}...) + default: + return nil, fmt.Errorf("archive.type not set") + } + + switch viper.GetString("inbox.type") { + case S3: + requiredConfVars = append(requiredConfVars, []string{"inbox.url", "inbox.accesskey", "inbox.secretkey", "inbox.bucket"}...) + case POSIX: + requiredConfVars = append(requiredConfVars, []string{"inbox.location"}...) + default: + return nil, fmt.Errorf("inbox.type not set") + } + + default: + return nil, fmt.Errorf("application '%s' doesn't exist", app) } for _, s := range requiredConfVars { @@ -106,99 +338,461 @@ func NewConfig() (*Config, error) { } c := &Config{} - err := c.readConfig() - if err != nil { - return nil, err + switch app { + case "api": + err := c.configBroker() + if err != nil { + return nil, err + } + + err = c.configDatabase() + if err != nil { + return nil, err + } + + err = c.configAPI() + if err != nil { + return nil, err + } + case "backup": + c.configArchive() + c.configBackup() + + err := c.configBroker() + if err != nil { + return nil, err + } + + err = c.configDatabase() + if err != nil { + return nil, err + } + + c.configSchemas() + case "finalize": + err := c.configBroker() + if err != nil { + return nil, err + } + err = c.configDatabase() + if err != nil { + return nil, err + } + + c.configSchemas() + case "ingest": + c.configArchive() + err := c.configBroker() + if err != nil { + return nil, err + } + err = c.configDatabase() + if err != nil { + return nil, err + } + + c.configInbox() + c.configSchemas() + case "intercept": + err := c.configBroker() + if err != nil { + return nil, err + } + + c.configSchemas() + case "mapper": + err := c.configBroker() + if err != nil { + return nil, err + } + + err = c.configDatabase() + if err != nil { + return nil, err + } + + c.configInbox() + c.configSchemas() + case "notify": + c.configSMTP() + case "orchestrate": + err := c.configBroker() + if err != nil { + return nil, err + } + + c.configOrchestrator() + case "s3inbox": + err := c.configBroker() + if err != nil { + return nil, err + } + + err = c.configDatabase() + if err != nil { + return nil, err + } + + c.configInbox() + + err = c.configServer() + if err != nil { + return nil, err + } + case "verify": + c.configArchive() + + err := c.configBroker() + if err != nil { + return nil, err + } + + err = c.configDatabase() + if err != nil { + return nil, err + } + + c.configSchemas() } return c, nil } -func (c *Config) readConfig() error { - s3 := S3Config{} +// configDatabase provides configuration for the database +func (c *Config) configAPI() error { + c.apiDefaults() + api := APIConf{} - // All these are required - s3.URL = viper.GetString("aws.url") - s3.AccessKey = viper.GetString("aws.accessKey") - s3.SecretKey = viper.GetString("aws.secretKey") - s3.Bucket = viper.GetString("aws.bucket") + api.Session.Expiration = time.Duration(viper.GetInt("api.session.expiration")) * time.Second + api.Session.Domain = viper.GetString("api.session.domain") + api.Session.Secure = viper.GetBool("api.session.secure") + api.Session.HTTPOnly = viper.GetBool("api.session.httponly") + api.Session.Name = viper.GetString("api.session.name") - // Optional settings - if viper.IsSet("aws.readypath") { - s3.Readypath = viper.GetString("aws.readypath") - } - if viper.IsSet("aws.region") { - s3.Region = viper.GetString("aws.region") + api.Host = viper.GetString("api.host") + api.Port = viper.GetInt("api.port") + api.ServerKey = viper.GetString("api.serverKey") + api.ServerCert = viper.GetString("api.serverCert") + api.CACert = viper.GetString("api.CACert") + + c.API = api + + return nil +} + +// apiDefaults set default values for web server and session +func (c *Config) apiDefaults() { + viper.SetDefault("api.host", "0.0.0.0") + viper.SetDefault("api.port", 8080) + viper.SetDefault("api.session.expiration", -1) + viper.SetDefault("api.session.secure", true) + viper.SetDefault("api.session.httponly", true) + viper.SetDefault("api.session.name", "api_session_key") +} + +// configArchive provides configuration for the archive storage +func (c *Config) configArchive() { + if viper.GetString("archive.type") == S3 { + c.Archive.Type = S3 + c.Archive.S3 = configS3Storage("archive") } else { - s3.Region = "us-east-1" - } - if viper.IsSet("aws.cacert") { - s3.CAcert = viper.GetString("aws.cacert") + c.Archive.Type = POSIX + c.Archive.Posix.Location = viper.GetString("archive.location") } +} - c.S3 = s3 +// configBackup provides configuration for the backup storage +func (c *Config) configBackup() { + switch viper.GetString("backup.type") { + case S3: + c.Backup.Type = S3 + c.Backup.S3 = configS3Storage("backup") + case SFTP: + c.Backup.Type = SFTP + c.Backup.SFTP = configSFTP("backup") + default: + c.Backup.Type = POSIX + c.Backup.Posix.Location = viper.GetString("backup.location") + } +} +// configBroker provides configuration for the message broker +func (c *Config) configBroker() error { // Setup broker - b := broker.MQConf{} + broker := broker.MQConf{} - b.Host = viper.GetString("broker.host") - b.Port, _ = strconv.Atoi(viper.GetString("broker.port")) - b.User = viper.GetString("broker.user") - b.Password = viper.GetString("broker.password") - b.Exchange = viper.GetString("broker.exchange") - b.RoutingKey = viper.GetString("broker.routingKey") - b.ServerName = viper.GetString("broker.serverName") + broker.Host = viper.GetString("broker.host") + broker.Port = viper.GetInt("broker.port") + broker.User = viper.GetString("broker.user") + broker.Password = viper.GetString("broker.password") + + broker.Queue = viper.GetString("broker.queue") + + if viper.IsSet("broker.serverName") { + broker.ServerName = viper.GetString("broker.serverName") + } + + if viper.IsSet("broker.routingkey") { + broker.RoutingKey = viper.GetString("broker.routingkey") + } + + if viper.IsSet("broker.exchange") { + broker.Exchange = viper.GetString("broker.exchange") + } if viper.IsSet("broker.vhost") { if strings.HasPrefix(viper.GetString("broker.vhost"), "/") { - b.Vhost = viper.GetString("broker.vhost") + broker.Vhost = viper.GetString("broker.vhost") } else { - b.Vhost = "/" + viper.GetString("broker.vhost") + broker.Vhost = "/" + viper.GetString("broker.vhost") } } else { - b.Vhost = "/" + broker.Vhost = "/" } if viper.IsSet("broker.ssl") { - b.Ssl = viper.GetBool("broker.ssl") + broker.Ssl = viper.GetBool("broker.ssl") } + if viper.IsSet("broker.verifyPeer") { - b.VerifyPeer = viper.GetBool("broker.verifyPeer") - if b.VerifyPeer { + broker.VerifyPeer = viper.GetBool("broker.verifyPeer") + if broker.VerifyPeer { // Since verifyPeer is specified, these are required. if !(viper.IsSet("broker.clientCert") && viper.IsSet("broker.clientKey")) { return errors.New("when broker.verifyPeer is set both broker.clientCert and broker.clientKey is needed") } - b.ClientCert = viper.GetString("broker.clientCert") - b.ClientKey = viper.GetString("broker.clientKey") + broker.ClientCert = viper.GetString("broker.clientCert") + broker.ClientKey = viper.GetString("broker.clientKey") } } if viper.IsSet("broker.cacert") { - b.CACert = viper.GetString("broker.cacert") + broker.CACert = viper.GetString("broker.cacert") } - c.Broker = b - - // Setup psql db - c.DB.Host = viper.GetString("db.host") - c.DB.Port = viper.GetInt("db.port") - c.DB.User = viper.GetString("db.user") - c.DB.Password = viper.GetString("db.password") - c.DB.Database = viper.GetString("db.database") - if viper.IsSet("db.cacert") { - c.DB.CACert = viper.GetString("db.cacert") + broker.PrefetchCount = 2 + if viper.IsSet("broker.prefetchCount") { + broker.PrefetchCount = viper.GetInt("broker.prefetchCount") } - c.DB.SslMode = viper.GetString("db.sslmode") - if c.DB.SslMode == "verify-full" { + + c.Broker = broker + + return nil +} + +// configDatabase provides configuration for the database +func (c *Config) configDatabase() error { + db := database.DBConf{} + + // All these are required + db.Host = viper.GetString("db.host") + db.Port = viper.GetInt("db.port") + db.User = viper.GetString("db.user") + db.Password = viper.GetString("db.password") + db.Database = viper.GetString("db.database") + db.SslMode = viper.GetString("db.sslmode") + + // Optional settings + if db.SslMode == "verify-full" { // Since verify-full is specified, these are required. if !(viper.IsSet("db.clientCert") && viper.IsSet("db.clientKey")) { return errors.New("when db.sslMode is set to verify-full both db.clientCert and db.clientKey are needed") } - c.DB.ClientCert = viper.GetString("db.clientcert") - c.DB.ClientKey = viper.GetString("db.clientkey") + } + if viper.IsSet("db.clientKey") { + db.ClientKey = viper.GetString("db.clientKey") + } + if viper.IsSet("db.clientCert") { + db.ClientCert = viper.GetString("db.clientCert") + } + if viper.IsSet("db.cacert") { + db.CACert = viper.GetString("db.cacert") + } + + c.Database = db + + return nil +} + +// configInbox provides configuration for the inbox storage +func (c *Config) configInbox() { + if viper.GetString("inbox.type") == S3 { + c.Inbox.Type = S3 + c.Inbox.S3 = configS3Storage("inbox") + } else { + c.Inbox.Type = POSIX + c.Inbox.Posix.Location = viper.GetString("inbox.location") + } +} + +// configOrchestrator provides the configuration for the standalone orchestator. +func (c *Config) configOrchestrator() { + c.Orchestrator = OrchestratorConf{} + if viper.IsSet("broker.dataset.releasedelay") { + c.Orchestrator.ReleaseDelay = time.Duration(viper.GetInt("broker.dataset.releasedelay")) + } else { + c.Orchestrator.ReleaseDelay = 1 + } + c.Orchestrator.ProjectFQDN = viper.GetString("project.fqdn") + if viper.IsSet("broker.queue.verified") { + c.Orchestrator.QueueVerify = viper.GetString("broker.queue.verified") + } else { + c.Orchestrator.QueueVerify = "verified" + } + + if viper.IsSet("broker.queue.inbox") { + c.Orchestrator.QueueInbox = viper.GetString("broker.queue.inbox") + } else { + c.Orchestrator.QueueInbox = "inbox" } - // Setup server + if viper.IsSet("broker.queue.completed") { + c.Orchestrator.QueueComplete = viper.GetString("broker.queue.completed") + } else { + c.Orchestrator.QueueComplete = "completed" + } + + if viper.IsSet("broker.queue.backup") { + c.Orchestrator.QueueBackup = viper.GetString("broker.queue.backup") + } else { + c.Orchestrator.QueueBackup = "backup" + } + + if viper.IsSet("broker.queue.mappings") { + c.Orchestrator.QueueMapping = viper.GetString("broker.queue.mappings") + } else { + c.Orchestrator.QueueMapping = "mappings" + } + + if viper.IsSet("broker.queue.ingest") { + c.Orchestrator.QueueIngest = viper.GetString("broker.queue.ingest") + } else { + c.Orchestrator.QueueIngest = "ingest" + } + + if viper.IsSet("broker.queue.accessionIDs") { + c.Orchestrator.QueueAccession = viper.GetString("broker.queue.accessionIDs") + } else { + c.Orchestrator.QueueAccession = "accessionIDs" + } +} + +// configSchemas configures the schemas to load depending on +// the type IDs of connection Federated EGA or isolate (stand-alone) +func (c *Config) configSchemas() { + if viper.GetString("schema.type") == "federated" { + c.Broker.SchemasPath = "/schemas/federated/" + } else { + c.Broker.SchemasPath = "/schemas/isolated/" + } +} + +// configS3Storage populates and returns a S3Conf from the +// configuration +func configS3Storage(prefix string) storage.S3Conf { + s3 := storage.S3Conf{} + // All these are required + s3.URL = viper.GetString(prefix + ".url") + s3.AccessKey = viper.GetString(prefix + ".accesskey") + s3.SecretKey = viper.GetString(prefix + ".secretkey") + s3.Bucket = viper.GetString(prefix + ".bucket") + + // Defaults (move to viper?) + + s3.Region = "us-east-1" + s3.NonExistRetryTime = 2 * time.Minute + + if viper.IsSet(prefix + ".port") { + s3.Port = viper.GetInt(prefix + ".port") + } + + if viper.IsSet(prefix + ".region") { + s3.Region = viper.GetString(prefix + ".region") + } + + if viper.IsSet(prefix + ".readypath") { + s3.Readypath = viper.GetString(prefix + ".readypath") + } + + if viper.IsSet(prefix + ".chunksize") { + s3.Chunksize = viper.GetInt(prefix+".chunksize") * 1024 * 1024 + } + + if viper.IsSet(prefix + ".cacert") { + s3.CAcert = viper.GetString(prefix + ".cacert") + } + + return s3 +} + +// configSFTP populates and returns a sftpConf with sftp backend configuration +func configSFTP(prefix string) storage.SftpConf { + sftpConf := storage.SftpConf{} + if viper.IsSet(prefix + ".sftp.hostKey") { + sftpConf.HostKey = viper.GetString(prefix + ".sftp.hostKey") + } else { + sftpConf.HostKey = "" + } + // All these are required + sftpConf.Host = viper.GetString(prefix + ".sftp.host") + sftpConf.Port = viper.GetString(prefix + ".sftp.port") + sftpConf.UserName = viper.GetString(prefix + ".sftp.userName") + sftpConf.PemKeyPath = viper.GetString(prefix + ".sftp.pemKeyPath") + sftpConf.PemKeyPass = viper.GetString(prefix + ".sftp.pemKeyPass") + + return sftpConf +} + +// configNotify provides configuration for the backup storage +func (c *Config) configSMTP() { + c.Notify = SMTPConf{} + c.Notify.Host = viper.GetString("smtp.host") + c.Notify.Port = viper.GetInt("smtp.port") + c.Notify.Password = viper.GetString("smtp.password") + c.Notify.FromAddr = viper.GetString("smtp.from") +} + +// GetC4GHKey reads and decrypts and returns the c4gh key +func GetC4GHKey() (*[32]byte, error) { + keyPath := viper.GetString("c4gh.filepath") + passphrase := viper.GetString("c4gh.passphrase") + + // Make sure the key path and passphrase is valid + keyFile, err := os.Open(keyPath) + if err != nil { + return nil, err + } + + key, err := keys.ReadPrivateKey(keyFile, []byte(passphrase)) + if err != nil { + return nil, err + } + + keyFile.Close() + + return &key, nil +} + +// GetC4GHPublicKey reads the c4gh public key +func GetC4GHPublicKey() (*[32]byte, error) { + keyPath := viper.GetString("c4gh.backupPubKey") + + // Make sure the key path and passphrase is valid + keyFile, err := os.Open(keyPath) + if err != nil { + return nil, err + } + + key, err := keys.ReadPublicKey(keyFile) + if err != nil { + return nil, err + } + + keyFile.Close() + + return &key, nil +} + +func (c *Config) configServer() error { s := ServerConfig{} if !(viper.IsSet("server.jwtpubkeypath") || viper.IsSet("server.jwtpubkeyurl")) { @@ -243,13 +837,8 @@ func TLSConfigBroker(c *Config) (*tls.Config, error) { } cfg.RootCAs = systemCAs - // Add CAs for broker and s3 - for _, cacert := range []string{c.Broker.CACert, c.S3.CAcert} { - if cacert == "" { - continue - } - - cacert, e := os.ReadFile(cacert) // #nosec this file comes from our configuration + if c.Broker.CACert != "" { + cacert, e := os.ReadFile(c.Broker.CACert) // #nosec this file comes from our configuration if e != nil { return nil, fmt.Errorf("failed to append %q to RootCAs: %v", cacert, e) } @@ -297,8 +886,8 @@ func TLSConfigProxy(c *Config) (*tls.Config, error) { } cfg.RootCAs = systemCAs - if c.S3.CAcert != "" { - cacert, e := os.ReadFile(c.S3.CAcert) // #nosec this file comes from our configuration + if c.Inbox.S3.CAcert != "" { + cacert, e := os.ReadFile(c.Inbox.S3.CAcert) // #nosec this file comes from our configuration if e != nil { return nil, fmt.Errorf("failed to append %q to RootCAs: %v", cacert, e) } @@ -309,3 +898,12 @@ func TLSConfigProxy(c *Config) (*tls.Config, error) { return cfg, nil } + +// CopyHeader reads the config and returns if the header will be copied +func CopyHeader() bool { + if viper.IsSet("backup.copyHeader") { + return viper.GetBool("backup.copyHeader") + } + + return false +} diff --git a/sda/internal/config/config_test.go b/sda/internal/config/config_test.go index 7bfb8be2a..08c841f59 100644 --- a/sda/internal/config/config_test.go +++ b/sda/internal/config/config_test.go @@ -1,12 +1,14 @@ package config import ( + "errors" "fmt" "os" "path" "path/filepath" "runtime" "testing" + "time" helper "github.com/neicnordic/sensitive-data-archive/internal/helper" @@ -37,11 +39,18 @@ func (suite *ConfigTestSuite) SetupTest() { viper.Set("broker.routingkey", "routingtest") viper.Set("broker.exchange", "testexchange") viper.Set("broker.vhost", "testvhost") - viper.Set("aws.url", "testurl") - viper.Set("aws.accesskey", "testaccess") - viper.Set("aws.secretkey", "testsecret") - viper.Set("aws.bucket", "testbucket") + viper.Set("broker.queue", "testqueue") + viper.Set("db.host", "test") + viper.Set("db.port", 123) + viper.Set("db.user", "test") + viper.Set("db.password", "test") + viper.Set("db.database", "test") + viper.Set("inbox.url", "testurl") + viper.Set("inbox.accesskey", "testaccess") + viper.Set("inbox.secretkey", "testsecret") + viper.Set("inbox.bucket", "testbucket") viper.Set("server.jwtpubkeypath", "testpath") + viper.Set("log.level", "debug") } func (suite *ConfigTestSuite) TearDownTest() { @@ -53,9 +62,18 @@ func TestConfigTestSuite(t *testing.T) { suite.Run(t, new(ConfigTestSuite)) } +func (suite *ConfigTestSuite) TestNonExistingApplication() { + expectedError := errors.New("application 'test' doesn't exist") + config, err := NewConfig("test") + assert.Nil(suite.T(), config) + if assert.Error(suite.T(), err) { + assert.Equal(suite.T(), expectedError, err) + } +} + func (suite *ConfigTestSuite) TestConfigFile() { - viper.Set("server.confFile", rootDir+"/.github/integration/sda/config.yaml") - config, err := NewConfig() + viper.Set("configFile", rootDir+"/.github/integration/sda/config.yaml") + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) absPath, _ := filepath.Abs(rootDir + "/.github/integration/sda/config.yaml") @@ -63,8 +81,8 @@ func (suite *ConfigTestSuite) TestConfigFile() { } func (suite *ConfigTestSuite) TestWrongConfigFile() { - viper.Set("server.confFile", rootDir+"/.github/integration/rabbitmq/cega.conf") - config, err := NewConfig() + viper.Set("configFile", rootDir+"/.github/integration/rabbitmq/cega.conf") + config, err := NewConfig("s3inbox") assert.Nil(suite.T(), config) assert.Error(suite.T(), err) absPath, _ := filepath.Abs(rootDir + "/.github/integration/rabbitmq/cega.conf") @@ -73,8 +91,8 @@ func (suite *ConfigTestSuite) TestWrongConfigFile() { func (suite *ConfigTestSuite) TestConfigPath() { viper.Reset() - viper.Set("server.confPath", rootDir+"/.github/integration/sda/") - config, err := NewConfig() + viper.Set("configPath", rootDir+"/.github/integration/sda/") + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) absPath, _ := filepath.Abs(rootDir + "/.github/integration/sda/config.yaml") @@ -83,7 +101,7 @@ func (suite *ConfigTestSuite) TestConfigPath() { func (suite *ConfigTestSuite) TestNoConfig() { viper.Reset() - config, err := NewConfig() + config, err := NewConfig("s3inbox") assert.Nil(suite.T(), config) assert.Error(suite.T(), err) } @@ -93,7 +111,7 @@ func (suite *ConfigTestSuite) TestMissingRequiredConfVar() { requiredConfVarValue := viper.Get(requiredConfVar) viper.Set(requiredConfVar, nil) expectedError := fmt.Errorf("%s not set", requiredConfVar) - config, err := NewConfig() + config, err := NewConfig("s3inbox") assert.Nil(suite.T(), config) if assert.Error(suite.T(), err) { assert.Equal(suite.T(), expectedError, err) @@ -103,35 +121,35 @@ func (suite *ConfigTestSuite) TestMissingRequiredConfVar() { } func (suite *ConfigTestSuite) TestConfigS3Storage() { - config, err := NewConfig() + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) - assert.NotNil(suite.T(), config.S3) - assert.Equal(suite.T(), "testurl", config.S3.URL) - assert.Equal(suite.T(), "testaccess", config.S3.AccessKey) - assert.Equal(suite.T(), "testsecret", config.S3.SecretKey) - assert.Equal(suite.T(), "testbucket", config.S3.Bucket) + assert.NotNil(suite.T(), config.Inbox.S3) + assert.Equal(suite.T(), "testurl", config.Inbox.S3.URL) + assert.Equal(suite.T(), "testaccess", config.Inbox.S3.AccessKey) + assert.Equal(suite.T(), "testsecret", config.Inbox.S3.SecretKey) + assert.Equal(suite.T(), "testbucket", config.Inbox.S3.Bucket) } func (suite *ConfigTestSuite) TestConfigBroker() { - config, err := NewConfig() + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) - assert.NotNil(suite.T(), config.S3) + assert.NotNil(suite.T(), config.Inbox.S3) assert.Equal(suite.T(), "/testvhost", config.Broker.Vhost) assert.Equal(suite.T(), false, config.Broker.Ssl) viper.Set("broker.ssl", true) viper.Set("broker.verifyPeer", true) - _, err = NewConfig() + _, err = NewConfig("s3inbox") assert.Error(suite.T(), err, "Error expected") viper.Set("broker.clientCert", "dummy-value") viper.Set("broker.clientKey", "dummy-value") - _, err = NewConfig() + _, err = NewConfig("s3inbox") assert.NoError(suite.T(), err) viper.Set("broker.vhost", nil) - config, err = NewConfig() + config, err = NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) assert.Equal(suite.T(), "/", config.Broker.Vhost) @@ -141,7 +159,7 @@ func (suite *ConfigTestSuite) TestTLSConfigBroker() { viper.Set("broker.serverName", "broker") viper.Set("broker.ssl", true) viper.Set("broker.cacert", certPath+"/ca.crt") - config, err := NewConfig() + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) tlsBroker, err := TLSConfigBroker(config) @@ -151,7 +169,7 @@ func (suite *ConfigTestSuite) TestTLSConfigBroker() { viper.Set("broker.verifyPeer", true) viper.Set("broker.clientCert", certPath+"/tls.crt") viper.Set("broker.clientKey", certPath+"/tls.key") - config, err = NewConfig() + config, err = NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) tlsBroker, err = TLSConfigBroker(config) @@ -160,7 +178,7 @@ func (suite *ConfigTestSuite) TestTLSConfigBroker() { viper.Set("broker.clientCert", certPath+"tls.crt") viper.Set("broker.clientKey", certPath+"/tls.key") - config, err = NewConfig() + config, err = NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) tlsBroker, err = TLSConfigBroker(config) @@ -169,8 +187,8 @@ func (suite *ConfigTestSuite) TestTLSConfigBroker() { } func (suite *ConfigTestSuite) TestTLSConfigProxy() { - viper.Set("aws.cacert", certPath+"/ca.crt") - config, err := NewConfig() + viper.Set("inbox.cacert", certPath+"/ca.crt") + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) tlsProxy, err := TLSConfigProxy(config) @@ -180,8 +198,77 @@ func (suite *ConfigTestSuite) TestTLSConfigProxy() { func (suite *ConfigTestSuite) TestDefaultLogLevel() { viper.Set("log.level", "test") - config, err := NewConfig() + config, err := NewConfig("s3inbox") assert.NotNil(suite.T(), config) assert.NoError(suite.T(), err) assert.Equal(suite.T(), log.TraceLevel, log.GetLevel()) } + +func (suite *ConfigTestSuite) TestAPIConfiguration() { + // At this point we should fail because we lack configuration + viper.Reset() + config, err := NewConfig("api") + assert.Error(suite.T(), err) + assert.Nil(suite.T(), config) + + // testing deafult values + suite.SetupTest() + config, err = NewConfig("api") + assert.NotNil(suite.T(), config) + assert.NoError(suite.T(), err) + assert.NotNil(suite.T(), config.API) + assert.Equal(suite.T(), "0.0.0.0", config.API.Host) + assert.Equal(suite.T(), 8080, config.API.Port) + assert.Equal(suite.T(), true, config.API.Session.Secure) + assert.Equal(suite.T(), true, config.API.Session.HTTPOnly) + assert.Equal(suite.T(), "api_session_key", config.API.Session.Name) + assert.Equal(suite.T(), -1*time.Second, config.API.Session.Expiration) + + viper.Reset() + suite.SetupTest() + // over write defaults + viper.Set("api.port", 8443) + viper.Set("api.session.secure", false) + viper.Set("api.session.domain", "test") + viper.Set("api.session.expiration", 60) + + config, err = NewConfig("api") + assert.NotNil(suite.T(), config) + assert.NoError(suite.T(), err) + assert.NotNil(suite.T(), config.API) + assert.Equal(suite.T(), "0.0.0.0", config.API.Host) + assert.Equal(suite.T(), 8443, config.API.Port) + assert.Equal(suite.T(), false, config.API.Session.Secure) + assert.Equal(suite.T(), "test", config.API.Session.Domain) + assert.Equal(suite.T(), 60*time.Second, config.API.Session.Expiration) +} + +func (suite *ConfigTestSuite) TestNotifyConfiguration() { + // At this point we should fail because we lack configuration + config, err := NewConfig("notify") + assert.Error(suite.T(), err) + assert.Nil(suite.T(), config) + + viper.Set("broker.host", "test") + viper.Set("broker.port", 123) + viper.Set("broker.user", "test") + viper.Set("broker.password", "test") + viper.Set("broker.queue", "test") + viper.Set("broker.routingkey", "test") + viper.Set("broker.exchange", "test") + + viper.Set("smtp.host", "test") + viper.Set("smtp.port", 456) + viper.Set("smtp.password", "test") + viper.Set("smtp.from", "noreply") + + config, err = NewConfig("notify") + assert.NoError(suite.T(), err) + assert.NotNil(suite.T(), config) +} + +func (suite *ConfigTestSuite) TestCopyHeader() { + viper.Set("backup.copyHeader", "true") + cHeader := CopyHeader() + assert.Equal(suite.T(), cHeader, true, "The CopyHeader does not work") +} diff --git a/sda/internal/database/database.go b/sda/internal/database/database.go index 4a0c12f0e..26348cf19 100644 --- a/sda/internal/database/database.go +++ b/sda/internal/database/database.go @@ -62,6 +62,15 @@ var FastConnectRate = 5 * time.Second // database during the after FastConnectTimeout. var SlowConnectRate = 1 * time.Minute +// dbRetryTimes is the number of times to retry the same function if it fails +var RetryTimes = 5 + +// hashType returns the identification string for the hash type +func hashType(_ hash.Hash) string { + // TODO: Support/check type + return "SHA256" +} + // NewSDAdb creates a new DB connection from the given DBConf variables. // Currently, only postgresql connections are supported. func NewSDAdb(config DBConf) (*SDAdb, error) { @@ -101,8 +110,7 @@ func (dbs *SDAdb) Connect() error { err := fmt.Errorf("failed to connect within reconnect time") log.Infoln("Connecting to database") - log.Debugf("host: %s:%d, database: %s, user: %s", dbs.Config.Host, - dbs.Config.Port, dbs.Config.Database, dbs.Config.User) + log.Debugf("host: %s:%d, database: %s, user: %s", dbs.Config.Host, dbs.Config.Port, dbs.Config.Database, dbs.Config.User) for ConnectTimeout <= 0 || ConnectTimeout > time.Since(start) { dbs.DB, err = sql.Open(dbs.Config.PgDataSource()) diff --git a/sda/internal/database/db_functions.go b/sda/internal/database/db_functions.go index 2298f9ed0..7a450e614 100644 --- a/sda/internal/database/db_functions.go +++ b/sda/internal/database/db_functions.go @@ -3,7 +3,9 @@ package database import ( + "encoding/hex" "errors" + "fmt" ) // RegisterFile inserts a file in the database, along with a "registered" log @@ -41,3 +43,248 @@ func (dbs *SDAdb) UpdateFileEventLog(fileID, event, userID, message string) erro return err } + +func (dbs *SDAdb) GetFileID(corrID string) (string, error) { + var ( + err error + count int + ID string + ) + + for count == 0 || (err != nil && count < RetryTimes) { + ID, err = dbs.getFileID(corrID) + count++ + } + + return ID, err +} +func (dbs *SDAdb) getFileID(corrID string) (string, error) { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + const getFileID = "SELECT DISTINCT file_id FROM sda.file_event_log where correlation_id = $1;" + + var fileID string + err := db.QueryRow(getFileID, corrID).Scan(&fileID) + if err != nil { + return "", err + } + + return fileID, nil +} + +func (dbs *SDAdb) UpdateFileStatus(fileUUID, event, corrID, user, message string) error { + var ( + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + err = dbs.updateFileStatus(fileUUID, event, corrID, user, message) + count++ + } + + return err +} +func (dbs *SDAdb) updateFileStatus(fileUUID, event, corrID, user, message string) error { + dbs.checkAndReconnectIfNeeded() + + db := dbs.DB + const query = "INSERT INTO sda.file_event_log(file_id, event, correlation_id, user_id, message) VALUES($1, $2, $3, $4, $5);" + + result, err := db.Exec(query, fileUUID, event, corrID, user, message) + if err != nil { + return err + } + if rowsAffected, _ := result.RowsAffected(); rowsAffected == 0 { + return errors.New("something went wrong with the query zero rows were changed") + } + + return nil +} + +// StoreHeader stores the file header in the database +func (dbs *SDAdb) StoreHeader(header []byte, id string) error { + var ( + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + err = dbs.storeHeader(header, id) + count++ + } + + return err +} +func (dbs *SDAdb) storeHeader(header []byte, id string) error { + dbs.checkAndReconnectIfNeeded() + + db := dbs.DB + const query = "UPDATE sda.files SET header = $1 WHERE id = $2;" + result, err := db.Exec(query, hex.EncodeToString(header), id) + if err != nil { + return err + } + if rowsAffected, _ := result.RowsAffected(); rowsAffected == 0 { + return errors.New("something went wrong with the query zero rows were changed") + } + + return nil +} + +// SetArchived marks the file as 'ARCHIVED' +func (dbs *SDAdb) SetArchived(file FileInfo, fileID, corrID string) error { + var ( + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + err = dbs.setArchived(file, fileID, corrID) + count++ + } + + return err +} +func (dbs *SDAdb) setArchived(file FileInfo, fileID, corrID string) error { + dbs.checkAndReconnectIfNeeded() + + db := dbs.DB + const query = "SELECT sda.set_archived($1, $2, $3, $4, $5, $6);" + _, err := db.Exec(query, + fileID, + corrID, + file.Path, + file.Size, + fmt.Sprintf("%x", file.Checksum.Sum(nil)), + hashType(file.Checksum), + ) + + return err +} + +func (dbs *SDAdb) GetFileStatus(corrID string) (string, error) { + var ( + err error + count int + status string + ) + + for count == 0 || (err != nil && count < RetryTimes) { + status, err = dbs.getFileStatus(corrID) + count++ + } + + return status, err +} +func (dbs *SDAdb) getFileStatus(corrID string) (string, error) { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + const getFileID = "SELECT event from sda.file_event_log WHERE correlation_id = $1 ORDER BY id DESC LIMIT 1;" + + var status string + err := db.QueryRow(getFileID, corrID).Scan(&status) + if err != nil { + return "", err + } + + return status, nil +} + +// GetHeader retrieves the file header +func (dbs *SDAdb) GetHeader(fileID string) ([]byte, error) { + var ( + r []byte + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + r, err = dbs.getHeader(fileID) + count++ + } + + return r, err +} +func (dbs *SDAdb) getHeader(fileID string) ([]byte, error) { + dbs.checkAndReconnectIfNeeded() + + db := dbs.DB + const query = "SELECT header from sda.files WHERE id = $1" + + var hexString string + if err := db.QueryRow(query, fileID).Scan(&hexString); err != nil { + return nil, err + } + + header, err := hex.DecodeString(hexString) + if err != nil { + return nil, err + } + + return header, nil +} + +// MarkCompleted marks the file as "COMPLETED" +func (dbs *SDAdb) MarkCompleted(file FileInfo, fileID, corrID string) error { + var ( + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + err = dbs.markCompleted(file, fileID, corrID) + count++ + } + + return err +} +func (dbs *SDAdb) markCompleted(file FileInfo, fileID, corrID string) error { + dbs.checkAndReconnectIfNeeded() + + db := dbs.DB + const completed = "SELECT sda.set_verified($1, $2, $3, $4, $5, $6, $7);" + _, err := db.Exec(completed, + fileID, + corrID, + fmt.Sprintf("%x", file.Checksum.Sum(nil)), + hashType(file.Checksum), + file.DecryptedSize, + fmt.Sprintf("%x", file.DecryptedChecksum.Sum(nil)), + hashType(file.DecryptedChecksum), + ) + + return err +} + +// GetArchived retrieves the location and size of archive +func (dbs *SDAdb) GetArchived(user, filepath, checksum string) (string, int, error) { + var ( + filePath string + fileSize int + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + filePath, fileSize, err = dbs.getArchived(user, filepath, checksum) + count++ + } + + return filePath, fileSize, err +} +func (dbs *SDAdb) getArchived(user, filepath, checksum string) (string, int, error) { + dbs.checkAndReconnectIfNeeded() + + db := dbs.DB + const query = "SELECT archive_path, archive_filesize from local_ega.files WHERE " + + "elixir_id = $1 and inbox_path = $2 and decrypted_file_checksum = $3 and status in ('COMPLETED', 'READY');" + + var filePath string + var fileSize int + if err := db.QueryRow(query, user, filepath, checksum).Scan(&filePath, &fileSize); err != nil { + return "", 0, err + } + + return filePath, fileSize, nil +} diff --git a/sda/internal/database/db_functions_test.go b/sda/internal/database/db_functions_test.go index b307a4b95..3095a10a2 100644 --- a/sda/internal/database/db_functions_test.go +++ b/sda/internal/database/db_functions_test.go @@ -1,8 +1,11 @@ package database import ( + "crypto/sha256" + "fmt" "regexp" + "github.com/google/uuid" "github.com/stretchr/testify/assert" ) @@ -11,16 +14,11 @@ func (suite *DatabaseTests) TestRegisterFile() { // create database connection db, err := NewSDAdb(suite.dbConf) - assert.Nil(suite.T(), err, "got %v when creating new connection", err) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database fileID, err := db.RegisterFile("/testuser/file1.c4gh", "testuser") - if db.Version < 4 { - assert.NotNil(suite.T(), err, "RegisterFile() should not work in db version %v", db.Version) - - return - } - assert.Nil(suite.T(), err, "failed to register file in database") + assert.NoError(suite.T(), err, "failed to register file in database") // check that the returning fileID is a uuid uuidPattern := "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" @@ -30,31 +28,25 @@ func (suite *DatabaseTests) TestRegisterFile() { // check that the file is in the database exists := false err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.files WHERE id=$1)", fileID).Scan(&exists) - assert.Nil(suite.T(), err, "Failed to check if registered file exists") + assert.NoError(suite.T(), err, "Failed to check if registered file exists") assert.True(suite.T(), exists, "RegisterFile() did not insert a row into sda.files with id: "+fileID) // check that there is a "registered" file event connected to the file err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.file_event_log WHERE file_id=$1 AND event='registered')", fileID).Scan(&exists) - assert.Nil(suite.T(), err, "Failed to check if registered file event exists") + assert.NoError(suite.T(), err, "Failed to check if registered file event exists") assert.True(suite.T(), exists, "RegisterFile() did not insert a row into sda.file_event_log with id: "+fileID) } // TestMarkFileAsUploaded tests that MarkFileAsUploaded() behaves as intended -func (suite *DatabaseTests) UpdateFileEventLog() { - +func (suite *DatabaseTests) TestUpdateFileEventLog() { // create database connection db, err := NewSDAdb(suite.dbConf) - assert.Nil(suite.T(), err, "got %v when creating new connection", err) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database fileID, err := db.RegisterFile("/testuser/file2.c4gh", "testuser") - if db.Version < 4 { - assert.NotNil(suite.T(), err, "MarkFileAsUploaded() should not work in db version %v", db.Version) - - return - } - assert.Nil(suite.T(), err, "failed to register file in database") + assert.NoError(suite.T(), err, "failed to register file in database") // Attempt to mark a file that doesn't exist as uploaded err = db.UpdateFileEventLog("00000000-0000-0000-0000-000000000000", "uploaded", "testuser", "{}") @@ -62,11 +54,156 @@ func (suite *DatabaseTests) UpdateFileEventLog() { // mark file as uploaded err = db.UpdateFileEventLog(fileID, "uploaded", "testuser", "{}") - assert.Nil(suite.T(), err, "failed to set file as uploaded in database") + assert.NoError(suite.T(), err, "failed to set file as uploaded in database") + + exists := false + // check that there is an "uploaded" file event connected to the file + err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.file_event_log WHERE file_id=$1 AND event='uploaded')", fileID).Scan(&exists) + assert.NoError(suite.T(), err, "Failed to check if uploaded file event exists") + assert.True(suite.T(), exists, "UpdateFileEventLog() did not insert a row into sda.file_event_log with id: "+fileID) +} + +func (suite *DatabaseTests) TestGetFileID() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + fileID, err := db.RegisterFile("/testuser/file3.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + corrID := uuid.New().String() + err = db.UpdateFileStatus(fileID, "uploaded", corrID, "testuser", "{}") + assert.NoError(suite.T(), err, "failed to update file status") + + fID, err := db.GetFileID(corrID) + assert.NoError(suite.T(), err, "GetFileId failed") + assert.Equal(suite.T(), fileID, fID) +} + +func (suite *DatabaseTests) TestUpdateFileStatus() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/file4.c4gh", "testuser") + assert.Nil(suite.T(), err, "failed to register file in database") + + corrID := uuid.New().String() + // Attempt to mark a file that doesn't exist as uploaded + err = db.UpdateFileStatus("00000000-0000-0000-0000-000000000000", "uploaded", corrID, "testuser", "{}") + assert.NotNil(suite.T(), err, "Unknown file could be marked as uploaded in database") + + // mark file as uploaded + err = db.UpdateFileStatus(fileID, "uploaded", corrID, "testuser", "{}") + assert.NoError(suite.T(), err, "failed to set file as uploaded in database") exists := false // check that there is an "uploaded" file event connected to the file err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.file_event_log WHERE file_id=$1 AND event='uploaded')", fileID).Scan(&exists) - assert.Nil(suite.T(), err, "Failed to check if uploaded file event exists") + assert.NoError(suite.T(), err, "Failed to check if uploaded file event exists") assert.True(suite.T(), exists, "UpdateFileEventLog() did not insert a row into sda.file_event_log with id: "+fileID) } + +func (suite *DatabaseTests) TestStoreHeader() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/TestStoreHeader.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, fileID) + assert.NoError(suite.T(), err, "failed to store file header") + + // store header for non existing entry + err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, "00000000-0000-0000-0000-000000000000") + assert.EqualError(suite.T(), err, "something went wrong with the query zero rows were changed") +} + +func (suite *DatabaseTests) TestSetArchived() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/TestSetArchived.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + fileInfo := FileInfo{sha256.New(), 1000, "/tmp/TestSetArchived.c4gh", sha256.New(), -1} + corrID := uuid.New().String() + err = db.SetArchived(fileInfo, fileID, corrID) + assert.NoError(suite.T(), err, "failed to mark file as Archived") + + err = db.SetArchived(fileInfo, "00000000-0000-0000-0000-000000000000", corrID) + assert.ErrorContains(suite.T(), err, "violates foreign key constraint") + + err = db.SetArchived(fileInfo, fileID, "00000000-0000-0000-0000-000000000000") + assert.ErrorContains(suite.T(), err, "duplicate key value violates unique constraint") +} + +func (suite *DatabaseTests) TestGetFileStatus() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/TestGetFileStatus.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + corrID := uuid.New().String() + err = db.UpdateFileStatus(fileID, "downloaded", corrID, "testuser", "{}") + assert.NoError(suite.T(), err, "failed to set file as downloaded in database") + + status, err := db.GetFileStatus(corrID) + assert.NoError(suite.T(), err, "failed to get file status") + assert.Equal(suite.T(), "downloaded", status) +} + +func (suite *DatabaseTests) TestGetHeader() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/TestGetHeader.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, fileID) + assert.NoError(suite.T(), err, "failed to store file header") + + header, err := db.GetHeader(fileID) + assert.NoError(suite.T(), err, "failed to get file header") + assert.Equal(suite.T(), []byte{15, 45, 20, 40, 48}, header) +} + +func (suite *DatabaseTests) TestMarkCompleted() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/TestMarkCompleted.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + corrID := uuid.New().String() + fileInfo := FileInfo{sha256.New(), 1000, "/testuser/TestMarkCompleted.c4gh", sha256.New(), 948} + err = db.MarkCompleted(fileInfo, fileID, corrID) + assert.NoError(suite.T(), err, "got (%v) when marking file as completed", err) +} + +func (suite *DatabaseTests) TestGetArchived() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile("/testuser/TestGetArchived.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + + decSha := sha256.New() + fileInfo := FileInfo{sha256.New(), 1000, "/tmp/TestGetArchived.c4gh", decSha, 987} + corrID := uuid.New().String() + err = db.SetArchived(fileInfo, fileID, corrID) + assert.NoError(suite.T(), err, "got (%v) when marking file as Archived") + err = db.MarkCompleted(fileInfo, fileID, corrID) + assert.NoError(suite.T(), err, "got (%v) when marking file as completed", err) + + filePath, fileSize, err := db.GetArchived("testuser", "/testuser/TestGetArchived.c4gh", fmt.Sprintf("%x", decSha.Sum(nil))) + assert.NoError(suite.T(), err, "got (%v) when getting file archive information", err) + assert.Equal(suite.T(), 1000, fileSize) + assert.Equal(suite.T(), "/tmp/TestGetArchived.c4gh", filePath) +} diff --git a/sda/internal/helper/helper.go b/sda/internal/helper/helper.go index eb67299cf..aac69199b 100644 --- a/sda/internal/helper/helper.go +++ b/sda/internal/helper/helper.go @@ -8,17 +8,15 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "fmt" "log" "math/big" "net" "os" "path/filepath" - "regexp" - "strings" "time" "github.com/golang-jwt/jwt/v4" + "golang.org/x/crypto/ssh" ) // Global variables for test token creation @@ -153,6 +151,50 @@ func CreateRSAkeys(prPath, pubPath string) error { return nil } +func CreateSSHKey(path string) error { + privatekey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return err + } + pk := &privatekey.PublicKey + + // dump private key to file + privateKeyBytes := x509.MarshalPKCS1PrivateKey(privatekey) + privateKeyBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: privateKeyBytes, + } + encPrivateKeyBlock, err := x509.EncryptPEMBlock(rand.Reader, privateKeyBlock.Type, privateKeyBlock.Bytes, []byte("password"), x509.PEMCipherAES256) //nolint:staticcheck + if err != nil { + return err + } + privatePem, err := os.Create(path + "/id_rsa") + if err != nil { + return err + } + err = pem.Encode(privatePem, encPrivateKeyBlock) + if err != nil { + return err + } + + err = os.Chmod(path+"/id_rsa", 0600) + if err != nil { + return err + } + + publicKey, err := ssh.NewPublicKey(pk) + if err != nil { + return err + } + pubKeyBytes := ssh.MarshalAuthorizedKey(publicKey) + err = os.WriteFile(path+"/id_rsa.pub", pubKeyBytes, 0600) + if err != nil { + return err + } + + return nil +} + // CreateRSAToken creates an RSA token func CreateRSAToken(key *rsa.PrivateKey, headerAlg, headerType string, tokenClaims map[string]interface{}) (string, error) { token := jwt.New(jwt.SigningMethodRS256) @@ -366,27 +408,3 @@ func TLScertToFile(filename string, derBytes []byte) error { return err } - -// FormatUploadFilePath ensures that path separators are "/", and returns error if the -// filepath contains a disallowed character matched with regex -func FormatUploadFilePath(filePath string) (string, error) { - - // Check for mixed "\" and "/" in filepath. Stop and throw an error if true so that - // we do not end up with unintended folder structure when applying ReplaceAll below - if strings.Contains(filePath, "\\") && strings.Contains(filePath, "/") { - return filePath, fmt.Errorf("filepath contains mixed '\\' and '/' characters") - } - - // make any windows path separators linux compatible - outPath := strings.ReplaceAll(filePath, "\\", "/") - - // [\x00-\x1F\x7F] is the control character set - re := regexp.MustCompile(`[\\:\*\?"<>\|\x00-\x1F\x7F]`) - - dissallowedChars := re.FindAllString(outPath, -1) - if dissallowedChars != nil { - return outPath, fmt.Errorf("filepath contains disallowed characters: %+v", strings.Join(dissallowedChars, ", ")) - } - - return outPath, nil -} diff --git a/sda/internal/helper/helper_test.go b/sda/internal/helper/helper_test.go index 94826d779..358ffcbd9 100644 --- a/sda/internal/helper/helper_test.go +++ b/sda/internal/helper/helper_test.go @@ -70,23 +70,3 @@ func TestCreateECToken(t *testing.T) { defer os.RemoveAll("dummy-folder") } - -func TestFormatUploadFilePath(t *testing.T) { - - unixPath := "a/b/c.c4gh" - testPath := "a\\b\\c.c4gh" - uploadPath, err := FormatUploadFilePath(testPath) - assert.NoError(t, err) - assert.Equal(t, unixPath, uploadPath) - - // mixed "\" and "/" - weirdPath := `dq\sw:*?"<>|\t\s/df.c4gh` - _, err = FormatUploadFilePath(weirdPath) - assert.EqualError(t, err, "filepath contains mixed '\\' and '/' characters") - - // no mixed "\" and "/" but not allowed - weirdPath = `dq\sw:*?"<>|\t\sdf.c4gh` - _, err = FormatUploadFilePath(weirdPath) - assert.EqualError(t, err, "filepath contains disallowed characters: :, *, ?, \", <, >, |") - -} diff --git a/sda/internal/storage/storage.go b/sda/internal/storage/storage.go new file mode 100644 index 000000000..05a354dcb --- /dev/null +++ b/sda/internal/storage/storage.go @@ -0,0 +1,527 @@ +// Package storage provides interface for storage areas, e.g. s3 or POSIX file system. +package storage + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "os" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" + + log "github.com/sirupsen/logrus" +) + +// Backend defines methods to be implemented by PosixBackend, S3Backend and sftpBackend +type Backend interface { + GetFileSize(filePath string) (int64, error) + RemoveFile(filePath string) error + NewFileReader(filePath string) (io.ReadCloser, error) + NewFileWriter(filePath string) (io.WriteCloser, error) +} + +// Conf is a wrapper for the storage config +type Conf struct { + Type string + S3 S3Conf + Posix posixConf + SFTP SftpConf +} + +type posixBackend struct { + FileReader io.Reader + FileWriter io.Writer + Location string +} + +type posixConf struct { + Location string +} + +// NewBackend initiates a storage backend +func NewBackend(config Conf) (Backend, error) { + switch config.Type { + case "s3": + return newS3Backend(config.S3) + case "sftp": + return newSftpBackend(config.SFTP) + default: + return newPosixBackend(config.Posix) + } +} + +func newPosixBackend(config posixConf) (*posixBackend, error) { + fileInfo, err := os.Stat(config.Location) + + if err != nil { + return nil, err + } + + if !fileInfo.IsDir() { + return nil, fmt.Errorf("%s is not a directory", config.Location) + } + + return &posixBackend{Location: config.Location}, nil +} + +// NewFileReader returns an io.Reader instance +func (pb *posixBackend) NewFileReader(filePath string) (io.ReadCloser, error) { + if pb == nil { + return nil, fmt.Errorf("invalid posixBackend") + } + + file, err := os.Open(filepath.Join(filepath.Clean(pb.Location), filePath)) + if err != nil { + log.Error(err) + + return nil, err + } + + return file, nil +} + +// NewFileWriter returns an io.Writer instance +func (pb *posixBackend) NewFileWriter(filePath string) (io.WriteCloser, error) { + if pb == nil { + return nil, fmt.Errorf("invalid posixBackend") + } + + file, err := os.OpenFile(filepath.Join(filepath.Clean(pb.Location), filePath), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640) + if err != nil { + log.Error(err) + + return nil, err + } + + return file, nil +} + +// GetFileSize returns the size of the file +func (pb *posixBackend) GetFileSize(filePath string) (int64, error) { + if pb == nil { + return 0, fmt.Errorf("invalid posixBackend") + } + + stat, err := os.Stat(filepath.Join(filepath.Clean(pb.Location), filePath)) + if err != nil { + log.Error(err) + + return 0, err + } + + return stat.Size(), nil +} + +// RemoveFile removes a file from a given path +func (pb *posixBackend) RemoveFile(filePath string) error { + if pb == nil { + return fmt.Errorf("invalid posixBackend") + } + + err := os.Remove(filepath.Join(filepath.Clean(pb.Location), filePath)) + if err != nil { + log.Error(err) + + return err + } + + return nil +} + +type s3Backend struct { + Client *s3.S3 + Uploader *s3manager.Uploader + Bucket string + Conf *S3Conf +} + +// S3Conf stores information about the S3 storage backend +type S3Conf struct { + URL string + Port int + AccessKey string + SecretKey string + Bucket string + Region string + UploadConcurrency int + Chunksize int + CAcert string + NonExistRetryTime time.Duration + Readypath string +} + +func newS3Backend(config S3Conf) (*s3Backend, error) { + s3Session := CreateS3Session(config) + if err := CheckS3Bucket(config.Bucket, s3Session); err != nil { + return nil, err + } + + sb := &s3Backend{ + Bucket: config.Bucket, + Uploader: s3manager.NewUploader(s3Session, func(u *s3manager.Uploader) { + u.PartSize = int64(config.Chunksize) + u.Concurrency = config.UploadConcurrency + u.LeavePartsOnError = false + }), + Client: s3.New(s3Session), + Conf: &config} + + _, err := sb.Client.ListObjectsV2(&s3.ListObjectsV2Input{Bucket: &config.Bucket}) + if err != nil { + return nil, err + } + + return sb, nil +} + +func CreateS3Session(config S3Conf) *session.Session { + s3Transport := transportConfigS3(config) + client := http.Client{Transport: s3Transport} + s3Session := session.Must(session.NewSession( + &aws.Config{ + Endpoint: aws.String(fmt.Sprintf("%s:%d", config.URL, config.Port)), + Region: aws.String(config.Region), + HTTPClient: &client, + S3ForcePathStyle: aws.Bool(true), + DisableSSL: aws.Bool(strings.HasPrefix(config.URL, "http:")), + Credentials: credentials.NewStaticCredentials(config.AccessKey, config.SecretKey, ""), + }, + )) + + return s3Session +} + +func CheckS3Bucket(bucket string, s3Session *session.Session) error { + _, err := s3.New(s3Session).CreateBucket(&s3.CreateBucketInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() != s3.ErrCodeBucketAlreadyOwnedByYou && + aerr.Code() != s3.ErrCodeBucketAlreadyExists { + return fmt.Errorf("unexpected issue while creating bucket: %v", err) + } + + return nil + } + + return fmt.Errorf("verifying bucket failed, check S3 configuration") + } + + return nil +} + +// NewFileReader returns an io.Reader instance +func (sb *s3Backend) NewFileReader(filePath string) (io.ReadCloser, error) { + if sb == nil { + return nil, fmt.Errorf("invalid s3Backend") + } + + r, err := sb.Client.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath), + }) + + retryTime := 2 * time.Minute + if sb.Conf != nil { + retryTime = sb.Conf.NonExistRetryTime + } + + start := time.Now() + for err != nil && time.Since(start) < retryTime { + r, err = sb.Client.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath), + }) + time.Sleep(1 * time.Second) + } + + if err != nil { + log.Error(err) + + return nil, err + } + + return r.Body, nil +} + +// NewFileWriter uploads the contents of an io.Reader to a S3 bucket +func (sb *s3Backend) NewFileWriter(filePath string) (io.WriteCloser, error) { + if sb == nil { + return nil, fmt.Errorf("invalid s3Backend") + } + + reader, writer := io.Pipe() + go func() { + + _, err := sb.Uploader.Upload(&s3manager.UploadInput{ + Body: reader, + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath), + ContentEncoding: aws.String("application/octet-stream"), + }) + + if err != nil { + _ = reader.CloseWithError(err) + } + }() + + return writer, nil +} + +// GetFileSize returns the size of a specific object +func (sb *s3Backend) GetFileSize(filePath string) (int64, error) { + if sb == nil { + return 0, fmt.Errorf("invalid s3Backend") + } + + r, err := sb.Client.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath)}) + + start := time.Now() + + retryTime := 2 * time.Minute + if sb.Conf != nil { + retryTime = sb.Conf.NonExistRetryTime + } + + // Retry on error up to five minutes to allow for + // "slow writes' or s3 eventual consistency + for err != nil && time.Since(start) < retryTime { + r, err = sb.Client.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath)}) + + time.Sleep(1 * time.Second) + + } + + if err != nil { + log.Errorln(err) + + return 0, err + } + + return *r.ContentLength, nil +} + +// RemoveFile removes an object from a bucket +func (sb *s3Backend) RemoveFile(filePath string) error { + if sb == nil { + return fmt.Errorf("invalid s3Backend") + } + + _, err := sb.Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath)}) + if err != nil { + log.Error(err) + + return err + } + + err = sb.Client.WaitUntilObjectNotExists(&s3.HeadObjectInput{ + Bucket: aws.String(sb.Bucket), + Key: aws.String(filePath)}) + if err != nil { + return err + } + + return nil +} + +// transportConfigS3 is a helper method to setup TLS for the S3 client. +func transportConfigS3(config S3Conf) http.RoundTripper { + cfg := new(tls.Config) + + // Enforce TLS1.2 or higher + cfg.MinVersion = 2 + + // Read system CAs + var systemCAs, _ = x509.SystemCertPool() + if reflect.DeepEqual(systemCAs, x509.NewCertPool()) { + log.Debug("creating new CApool") + systemCAs = x509.NewCertPool() + } + cfg.RootCAs = systemCAs + + if config.CAcert != "" { + cacert, e := os.ReadFile(config.CAcert) // #nosec this file comes from our config + if e != nil { + log.Fatalf("failed to append %q to RootCAs: %v", cacert, e) + } + if ok := cfg.RootCAs.AppendCertsFromPEM(cacert); !ok { + log.Debug("no certs appended, using system certs only") + } + } + + var trConfig http.RoundTripper = &http.Transport{ + TLSClientConfig: cfg, + ForceAttemptHTTP2: true} + + return trConfig +} + +type sftpBackend struct { + Connection *ssh.Client + Client *sftp.Client + Conf *SftpConf +} + +// sftpConf stores information about the sftp storage backend +type SftpConf struct { + Host string + Port string + UserName string + PemKeyPath string + PemKeyPass string + HostKey string +} + +func newSftpBackend(config SftpConf) (*sftpBackend, error) { + // read in and parse pem key + key, err := os.ReadFile(config.PemKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read from key file, %v", err) + } + + var signer ssh.Signer + if config.PemKeyPass == "" { + signer, err = ssh.ParsePrivateKey(key) + } else { + signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(config.PemKeyPass)) + } + if err != nil { + return nil, fmt.Errorf("failed to parse private key, %v", err) + } + + // connect + conn, err := ssh.Dial("tcp", config.Host+":"+config.Port, + &ssh.ClientConfig{ + User: config.UserName, + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + HostKeyCallback: TrustedHostKeyCallback(config.HostKey), + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to start ssh connection, %v", err) + } + + // create new SFTP client + client, err := sftp.NewClient(conn) + if err != nil { + return nil, fmt.Errorf("failed to start sftp client, %v", err) + } + + sfb := &sftpBackend{ + Connection: conn, + Client: client, + Conf: &config, + } + + _, err = client.ReadDir("./") + + if err != nil { + return nil, fmt.Errorf("failed to list files with sftp, %v", err) + } + + return sfb, nil +} + +// NewFileWriter returns an io.Writer instance for the sftp remote +func (sfb *sftpBackend) NewFileWriter(filePath string) (io.WriteCloser, error) { + if sfb == nil { + return nil, fmt.Errorf("invalid sftpBackend") + } + // Make remote directories + parent := filepath.Dir(filePath) + err := sfb.Client.MkdirAll(parent) + if err != nil { + return nil, fmt.Errorf("failed to create dir with sftp, %v", err) + } + + file, err := sfb.Client.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_RDWR) + if err != nil { + return nil, fmt.Errorf("failed to create file with sftp, %v", err) + } + + return file, nil +} + +// GetFileSize returns the size of the file +func (sfb *sftpBackend) GetFileSize(filePath string) (int64, error) { + if sfb == nil { + return 0, fmt.Errorf("invalid sftpBackend") + } + + stat, err := sfb.Client.Lstat(filePath) + if err != nil { + return 0, fmt.Errorf("failed to get file size with sftp, %v", err) + } + + return stat.Size(), nil +} + +// NewFileReader returns an io.Reader instance +func (sfb *sftpBackend) NewFileReader(filePath string) (io.ReadCloser, error) { + if sfb == nil { + return nil, fmt.Errorf("invalid sftpBackend") + } + + file, err := sfb.Client.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open file with sftp, %v", err) + } + + return file, nil +} + +// RemoveFile removes a file or an empty directory. +func (sfb *sftpBackend) RemoveFile(filePath string) error { + if sfb == nil { + return fmt.Errorf("invalid sftpBackend") + } + + err := sfb.Client.Remove(filePath) + if err != nil { + return fmt.Errorf("failed to remove file with sftp, %v", err) + } + + return nil +} + +func TrustedHostKeyCallback(key string) ssh.HostKeyCallback { + if key == "" { + return func(_ string, _ net.Addr, k ssh.PublicKey) error { + keyString := k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) + log.Warningf("host key verification is not in effect (Fix by adding trustedKey: %q)", keyString) + + return nil + } + } + + return func(_ string, _ net.Addr, k ssh.PublicKey) error { + keyString := k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) + if ks := keyString; key != ks { + return fmt.Errorf("host key verification expected %q but got %q", key, ks) + } + + return nil + } +} diff --git a/sda/internal/storage/storage_test.go b/sda/internal/storage/storage_test.go new file mode 100644 index 000000000..a249b2f36 --- /dev/null +++ b/sda/internal/storage/storage_test.go @@ -0,0 +1,466 @@ +package storage + +import ( + "bytes" + "fmt" + "io" + "net/http" + "os" + "strconv" + "testing" + "time" + + "github.com/neicnordic/sensitive-data-archive/internal/helper" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + log "github.com/sirupsen/logrus" +) + +type StorageTestSuite struct { + suite.Suite +} + +var testConf = Conf{} +var sshPath string +var s3Port, sftpPort int +var writeData = []byte("this is a test") + +const posixType = "posix" +const s3Type = "s3" +const sftpType = "sftp" + +func TestMain(m *testing.M) { + sshPath, _ = os.MkdirTemp("", "ssh") + defer os.RemoveAll(sshPath) + if err := helper.CreateSSHKey(sshPath); err != nil { + log.Panicf("Failed to create SSH keys, reason: %v", err.Error()) + } + + defer func() { + if r := recover(); r != nil { + log.Infoln("Recovered") + } + }() + // uses a sensible default on windows (tcp/http) and linux/osx (socket) + pool, err := dockertest.NewPool("") + if err != nil { + log.Panicf("Could not construct pool: %s", err) + } + + // uses pool to try to connect to Docker + err = pool.Client.Ping() + if err != nil { + log.Panicf("Could not connect to Docker: %s", err) + } + + // pulls an image, creates a container based on it and runs it + sftp, err := pool.RunWithOptions(&dockertest.RunOptions{ + Name: "sftp", + Repository: "atmoz/sftp", + Tag: "latest", + Cmd: []string{"user:test:1001::share"}, + Mounts: []string{ + fmt.Sprintf("%s/id_rsa.pub:/home/user/.ssh/keys/id_rsa.pub", sshPath), + fmt.Sprintf("%s/id_rsa:/etc/ssh/ssh_host_rsa_key", sshPath), + }, + }, func(config *docker.HostConfig) { + // set AutoRemove to true so that stopped container goes away by itself + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{ + Name: "no", + } + }) + if err != nil { + log.Panicf("Could not start resource: %s", err) + } + + // sftpHostAndPort := sftp.GetHostPort("22/tcp") + sftpPort, _ = strconv.Atoi(sftp.GetPort("22/tcp")) + + // pulls an image, creates a container based on it and runs it + minio, err := pool.RunWithOptions(&dockertest.RunOptions{ + Name: "s3", + Repository: "minio/minio", + Tag: "RELEASE.2023-05-18T00-05-36Z", + Cmd: []string{"server", "/data"}, + Env: []string{ + "MINIO_ROOT_USER=access", + "MINIO_ROOT_PASSWORD=secretKey", + "MINIO_SERVER_URL=http://127.0.0.1:9000", + }, + }, func(config *docker.HostConfig) { + // set AutoRemove to true so that stopped container goes away by itself + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{ + Name: "no", + } + }) + if err != nil { + log.Panicf("Could not start resource: %s", err) + } + + s3HostAndPort := minio.GetHostPort("9000/tcp") + s3Port, _ = strconv.Atoi(minio.GetPort("9000/tcp")) + + client := http.Client{Timeout: 5 * time.Second} + req, err := http.NewRequest(http.MethodGet, "http://"+s3HostAndPort+"/minio/health/live", http.NoBody) + if err != nil { + log.Panic(err) + } + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + if err := pool.Retry(func() error { + res, err := client.Do(req) + if err != nil { + return err + } + res.Body.Close() + + return nil + }); err != nil { + if err := pool.Purge(minio); err != nil { + log.Panicf("Could not purge resource: %s", err) + } + log.Panicf("Could not connect to minio: %s", err) + } + + _ = m.Run() + + log.Println("tests completed") + if err := pool.Purge(minio); err != nil { + log.Panicf("Could not purge resource: %s", err) + } + if err := pool.Purge(sftp); err != nil { + log.Panicf("Could not purge resource: %s", err) + } +} + +func TestStorageTestSuite(t *testing.T) { + suite.Run(t, new(StorageTestSuite)) +} + +func (suite *StorageTestSuite) SetupTest() { + testS3Conf := S3Conf{ + "http://127.0.0.1", + s3Port, + "access", + "secretKey", + "bucket", + "region", + 10, + 5 * 1024 * 1024, + "", + 2 * time.Second, + "", + } + + testSftpConf := SftpConf{ + "localhost", + strconv.Itoa(sftpPort), + "user", + fmt.Sprintf("%s/id_rsa", sshPath), + "password", + "", + } + + testPosixConf := posixConf{ + os.TempDir(), + } + + testConf = Conf{posixType, testS3Conf, testPosixConf, testSftpConf} +} + +func (suite *StorageTestSuite) TestNewBackend() { + testConf.Type = posixType + p, err := NewBackend(testConf) + assert.NoError(suite.T(), err, "Backend posix failed") + assert.IsType(suite.T(), p, &posixBackend{}, "Wrong type from NewBackend with posix") + + var buf bytes.Buffer + log.SetOutput(&buf) + + testConf.Type = sftpType + sf, err := NewBackend(testConf) + assert.NoError(suite.T(), err, "Backend sftp failed") + assert.NotZero(suite.T(), buf.Len(), "Expected warning missing") + assert.IsType(suite.T(), sf, &sftpBackend{}, "Wrong type from NewBackend with SFTP") + buf.Reset() + + testConf.Type = s3Type + s, err := NewBackend(testConf) + assert.NoError(suite.T(), err, "Backend s3 failed") + assert.IsType(suite.T(), s, &s3Backend{}, "Wrong type from NewBackend with S3") + + // test some extra ssl handling + testConf.S3.CAcert = "/dev/null" + s, err = NewBackend(testConf) + assert.NoError(suite.T(), err, "Backend s3 failed") + assert.IsType(suite.T(), s, &s3Backend{}, "Wrong type from NewBackend with S3") +} + +func (suite *StorageTestSuite) TestCheckS3Bucket() { + err := CheckS3Bucket(testConf.S3.Bucket, CreateS3Session(testConf.S3)) + assert.NoError(suite.T(), err) + + testConf.S3.URL = "file://tmp/" + err = CheckS3Bucket(testConf.S3.Bucket, CreateS3Session(testConf.S3)) + assert.Error(suite.T(), err) +} + +func (suite *StorageTestSuite) TestPosixBackend() { + posixPath, _ := os.MkdirTemp("", "posix") + defer os.RemoveAll(posixPath) + testConf.Type = posixType + testConf.Posix = posixConf{posixPath} + backend, err := NewBackend(testConf) + assert.Nil(suite.T(), err, "POSIX backend failed unexpectedly") + + log.SetOutput(os.Stdout) + + writer, err := backend.NewFileWriter("testFile") + assert.NotNil(suite.T(), writer, "Got a nil reader for writer from posix") + assert.NoError(suite.T(), err, "posix NewFileWriter failed when it shouldn't") + + written, err := writer.Write(writeData) + assert.NoError(suite.T(), err, "Failure when writing to posix writer") + assert.Equal(suite.T(), len(writeData), written, "Did not write all writeData") + writer.Close() + + reader, err := backend.NewFileReader("testFile") + assert.Nil(suite.T(), err, "posix NewFileReader failed when it should work") + assert.NotNil(suite.T(), reader, "Reader that should be usable is nosuite.T(), bailing out") + + var buf bytes.Buffer + log.SetOutput(&buf) + writer, err = backend.NewFileWriter("posix/Not/Creatable") + assert.Nil(suite.T(), writer, "Got a non-nil reader for writer from posix") + assert.Error(suite.T(), err, "posix NewFileWriter worked when it shouldn't") + assert.NotZero(suite.T(), buf.Len(), "Expected warning missing") + buf.Reset() + log.SetOutput(os.Stdout) + + var readBackBuffer [4096]byte + readBack, err := reader.Read(readBackBuffer[0:4096]) + + assert.Equal(suite.T(), len(writeData), readBack, "did not read back data as expected") + assert.Equal(suite.T(), writeData, readBackBuffer[:readBack], "did not read back data as expected") + assert.Nil(suite.T(), err, "unexpected error when reading back data") + + size, err := backend.GetFileSize("testFile") + assert.Nil(suite.T(), err, "posix NewFileReader failed when it should work") + assert.NotNil(suite.T(), size, "Got a nil size for posix") + + err = backend.RemoveFile("testFile") + assert.Nil(suite.T(), err, "posix RemoveFile failed when it should work") + + log.SetOutput(&buf) + reader, err = backend.NewFileReader("posixDoesNotExist") + assert.Error(suite.T(), err, "posix NewFileReader worked when it should not") + assert.Nil(suite.T(), reader, "Got a non-nil reader for posix") + assert.NotZero(suite.T(), buf.Len(), "Expected warning missing") + + buf.Reset() + _, err = backend.GetFileSize("posixDoesNotExist") + assert.Error(suite.T(), err, "posix GetFileSize worked when it should not") + assert.NotZero(suite.T(), buf.Len(), "Expected warning missing") + + log.SetOutput(os.Stdout) + + testConf.Posix.Location = "/thisdoesnotexist" + backEnd, err := NewBackend(testConf) + assert.NotNil(suite.T(), err, "Backend worked when it should not") + assert.Nil(suite.T(), backEnd, "Got a backend when expected not to") + + testConf.Posix.Location = "/etc/passwd" + + backEnd, err = NewBackend(testConf) + assert.NotNil(suite.T(), err, "Backend worked when it should not") + assert.Nil(suite.T(), backEnd, "Got a backend when expected not to") + + var dummyBackend *posixBackend + failReader, err := dummyBackend.NewFileReader("/") + assert.NotNil(suite.T(), err, "NewFileReader worked when it should not") + assert.Nil(suite.T(), failReader, "Got a Reader when expected not to") + + failWriter, err := dummyBackend.NewFileWriter("/") + assert.NotNil(suite.T(), err, "NewFileWriter worked when it should not") + assert.Nil(suite.T(), failWriter, "Got a Writer when expected not to") + + _, err = dummyBackend.GetFileSize("/") + assert.NotNil(suite.T(), err, "GetFileSize worked when it should not") + + err = dummyBackend.RemoveFile("/") + assert.NotNil(suite.T(), err, "RemoveFile worked when it should not") +} + +func (suite *StorageTestSuite) TestS3Backend() { + testConf.Type = s3Type + s3back, err := NewBackend(testConf) + assert.NoError(suite.T(), err, "Backend failed") + + writer, err := s3back.NewFileWriter("s3Creatable") + assert.NotNil(suite.T(), writer, "Got a nil reader for writer from s3") + assert.Nil(suite.T(), err, "s3 NewFileWriter failed when it shouldn't") + + written, err := writer.Write(writeData) + assert.Nil(suite.T(), err, "Failure when writing to s3 writer") + assert.Equal(suite.T(), len(writeData), written, "Did not write all writeData") + writer.Close() + + reader, err := s3back.NewFileReader("s3Creatable") + assert.Nil(suite.T(), err, "s3 NewFileReader failed when it should work") + assert.NotNil(suite.T(), reader, "Reader that should be usable is not, bailing out") + + size, err := s3back.GetFileSize("s3Creatable") + assert.Nil(suite.T(), err, "s3 GetFileSize failed when it should work") + assert.NotNil(suite.T(), size, "Got a nil size for s3") + assert.Equal(suite.T(), int64(len(writeData)), size, "Got an incorrect file size") + + err = s3back.RemoveFile("s3Creatable") + assert.Nil(suite.T(), err, "s3 RemoveFile failed when it should work") + + var readBackBuffer [4096]byte + readBack, err := reader.Read(readBackBuffer[0:4096]) + + assert.Equal(suite.T(), len(writeData), readBack, "did not read back data as expected") + assert.Equal(suite.T(), writeData, readBackBuffer[:readBack], "did not read back data as expected") + + if err != nil && err != io.EOF { + assert.Nil(suite.T(), err, "unexpected error when reading back data") + } + + var buf bytes.Buffer + log.SetOutput(&buf) + + _, err = s3back.GetFileSize("s3DoesNotExist") + assert.NotNil(suite.T(), err, "s3 GetFileSize worked when it should not") + assert.NotZero(suite.T(), buf.Len(), "Expected warning missing") + buf.Reset() + + reader, err = s3back.NewFileReader("s3DoesNotExist") + assert.NotNil(suite.T(), err, "s3 NewFileReader worked when it should not") + assert.Nil(suite.T(), reader, "Got a non-nil reader for s3") + assert.NotZero(suite.T(), buf.Len(), "Expected warning missing") + + log.SetOutput(os.Stdout) + + testConf.S3.URL = "file://tmp/" + _, err = NewBackend(testConf) + assert.Error(suite.T(), err, "Backend worked when it should not") + + var dummyBackend *s3Backend + failReader, err := dummyBackend.NewFileReader("/") + assert.NotNil(suite.T(), err, "NewFileReader worked when it should not") + assert.Nil(suite.T(), failReader, "Got a Reader when expected not to") + + failWriter, err := dummyBackend.NewFileWriter("/") + assert.NotNil(suite.T(), err, "NewFileWriter worked when it should not") + assert.Nil(suite.T(), failWriter, "Got a Writer when expected not to") + + _, err = dummyBackend.GetFileSize("/") + assert.NotNil(suite.T(), err, "GetFileSize worked when it should not") + + err = dummyBackend.RemoveFile("/") + assert.NotNil(suite.T(), err, "RemoveFile worked when it should not") + +} + +func (suite *StorageTestSuite) TestSftpBackend() { + var buf bytes.Buffer + log.SetOutput(&buf) + testConf.Type = sftpType + sftpBack, err := NewBackend(testConf) + assert.NoError(suite.T(), err, "Backend failed") + buf.Reset() + + var sftpDoesNotExist = "nonexistent/file" + var sftpCreatable = "/share/file/exists" + + writer, err := sftpBack.NewFileWriter(sftpCreatable) + assert.NotNil(suite.T(), writer, "Got a nil reader for writer from sftp") + assert.Nil(suite.T(), err, "sftp NewFileWriter failed when it shouldn't") + + written, err := writer.Write(writeData) + assert.Nil(suite.T(), err, "Failure when writing to sftp writer") + assert.Equal(suite.T(), len(writeData), written, "Did not write all writeData") + writer.Close() + + reader, err := sftpBack.NewFileReader(sftpCreatable) + assert.Nil(suite.T(), err, "sftp NewFileReader failed when it should work") + assert.NotNil(suite.T(), reader, "Reader that should be usable is not, bailing out") + + size, err := sftpBack.GetFileSize(sftpCreatable) + assert.Nil(suite.T(), err, "sftp GetFileSize failed when it should work") + assert.NotNil(suite.T(), size, "Got a nil size for sftp") + assert.Equal(suite.T(), int64(len(writeData)), size, "Got an incorrect file size") + + err = sftpBack.RemoveFile(sftpCreatable) + assert.Nil(suite.T(), err, "sftp RemoveFile failed when it should work") + + err = sftpBack.RemoveFile(sftpDoesNotExist) + assert.EqualError(suite.T(), err, "failed to remove file with sftp, file does not exist") + + var readBackBuffer [4096]byte + readBack, err := reader.Read(readBackBuffer[0:4096]) + + assert.Equal(suite.T(), len(writeData), readBack, "did not read back data as expected") + assert.Equal(suite.T(), writeData, readBackBuffer[:readBack], "did not read back data as expected") + + if err != nil && err != io.EOF { + assert.Nil(suite.T(), err, "unexpected error when reading back data") + } + + _, err = sftpBack.GetFileSize(sftpDoesNotExist) + assert.EqualError(suite.T(), err, "failed to get file size with sftp, file does not exist") + reader, err = sftpBack.NewFileReader(sftpDoesNotExist) + assert.EqualError(suite.T(), err, "failed to open file with sftp, file does not exist") + assert.Nil(suite.T(), reader, "Got a non-nil reader for sftp") + + // wrong host key + testConf.SFTP.HostKey = "wronghostkey" + _, err = NewBackend(testConf) + assert.ErrorContains(suite.T(), err, "failed to start ssh connection, ssh: handshake failed: host key verification expected") + + // wrong key password + testConf.SFTP.PemKeyPass = "wrongkey" + _, err = NewBackend(testConf) + assert.EqualError(suite.T(), err, "failed to parse private key, x509: decryption password incorrect") + + // missing key password + testConf.SFTP.PemKeyPass = "" + _, err = NewBackend(testConf) + assert.EqualError(suite.T(), err, "failed to parse private key, ssh: this private key is passphrase protected") + + // wrong key + testConf.SFTP.PemKeyPath = "nonexistentkey" + _, err = NewBackend(testConf) + assert.EqualError(suite.T(), err, "failed to read from key file, open nonexistentkey: no such file or directory") + + f, _ := os.CreateTemp(sshPath, "dummy") + testConf.SFTP.PemKeyPath = f.Name() + _, err = NewBackend(testConf) + assert.EqualError(suite.T(), err, "failed to parse private key, ssh: no key found") + + testConf.SFTP.Host = "nonexistenthost" + _, err = NewBackend(testConf) + assert.NotNil(suite.T(), err, "Backend worked when it should not") + + var dummyBackend *sftpBackend + failReader, err := dummyBackend.NewFileReader("/") + assert.NotNil(suite.T(), err, "NewFileReader worked when it should not") + assert.Nil(suite.T(), failReader, "Got a Reader when expected not to") + + failWriter, err := dummyBackend.NewFileWriter("/") + assert.NotNil(suite.T(), err, "NewFileWriter worked when it should not") + assert.Nil(suite.T(), failWriter, "Got a Writer when expected not to") + + _, err = dummyBackend.GetFileSize("/") + assert.NotNil(suite.T(), err, "GetFileSize worked when it should not") + + err = dummyBackend.RemoveFile("/") + assert.NotNil(suite.T(), err, "RemoveFile worked when it should not") + assert.EqualError(suite.T(), err, "invalid sftpBackend") +}