Skip to content
This repository has been archived by the owner on Mar 4, 2024. It is now read-only.

Commit

Permalink
Merge branch 'main' into dependabot/github_actions/aquasecurity/trivy…
Browse files Browse the repository at this point in the history
…-action-0.16.1
  • Loading branch information
recharte authored Feb 2, 2024
2 parents 9362f41 + cd7c23c commit b11034f
Show file tree
Hide file tree
Showing 9 changed files with 447 additions and 242 deletions.
216 changes: 130 additions & 86 deletions api-tests/package-lock.json

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions api-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@
"author": "",
"license": "ISC",
"devDependencies": {
"@typescript-eslint/parser": "^6.15.0",
"dotenv": "^16.1.4",
"@typescript-eslint/parser": "^6.20.0",
"dotenv": "^16.4.1",
"eslint": "^8.56.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-airbnb-typescript": "^17.1.0",
"eslint-plugin-playwright": "^0.20.0"
"eslint-plugin-playwright": "^0.22.1"
},
"dependencies": {
"@playwright/test": "^1.40.1",
"@playwright/test": "^1.41.2",
"shelljs": "^0.8.5"
}
}
87 changes: 47 additions & 40 deletions api/monitoring_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
package api

import (
"context"
"fmt"
"net/http"

Expand All @@ -31,7 +32,7 @@ import (
)

// CreateMonitoringInstance creates a new monitoring instance.
func (e *EverestServer) CreateMonitoringInstance(ctx echo.Context) error { //nolint:funlen,cyclop
func (e *EverestServer) CreateMonitoringInstance(ctx echo.Context) error {
params, err := validateCreateMonitoringInstanceRequest(ctx)
if err != nil {
return ctx.JSON(http.StatusBadRequest, Error{Message: pointer.ToString(err.Error())})
Expand All @@ -51,23 +52,45 @@ func (e *EverestServer) CreateMonitoringInstance(ctx echo.Context) error { //nol
e.l.Error(err)
return ctx.JSON(http.StatusConflict, Error{Message: pointer.ToString(err.Error())})
}
var apiKey string
if params.Pmm != nil && params.Pmm.ApiKey != "" {
apiKey = params.Pmm.ApiKey

apiKey, err := e.getPMMApiKey(c, params)
if err != nil {
e.l.Error(err)
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString("Could not create an API key in PMM"),
})
}
if params.Pmm != nil && params.Pmm.ApiKey == "" && params.Pmm.User != "" && params.Pmm.Password != "" {
e.l.Debug("Getting PMM API key by username and password")
apiKey, err = pmm.CreatePMMApiKey(
c, params.Url, fmt.Sprintf("everest-%s-%s", params.Name, uuid.NewString()),
params.Pmm.User, params.Pmm.Password,
)
if err != nil {
e.l.Error(err)
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString("Could not create an API key in PMM"),
})
}

if err := e.createMonitoringK8sResources(c, params, apiKey); err != nil {
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString(err.Error()),
})
}

result := MonitoringInstance{
Type: MonitoringInstanceBaseWithNameType(params.Type),
Name: params.Name,
Url: params.Url,
}

return ctx.JSON(http.StatusOK, result)
}

func (e *EverestServer) getPMMApiKey(ctx context.Context, params *CreateMonitoringInstanceJSONRequestBody) (string, error) {
if params.Pmm != nil && params.Pmm.ApiKey != "" {
return params.Pmm.ApiKey, nil
}

e.l.Debug("Getting PMM API key by username and password")
return pmm.CreatePMMApiKey(
ctx, params.Url, fmt.Sprintf("everest-%s-%s", params.Name, uuid.NewString()),
params.Pmm.User, params.Pmm.Password,
)
}

func (e *EverestServer) createMonitoringK8sResources(
c context.Context, params *CreateMonitoringInstanceJSONRequestBody, apiKey string,
) error {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: params.Name,
Expand All @@ -76,24 +99,19 @@ func (e *EverestServer) CreateMonitoringInstance(ctx echo.Context) error { //nol
Type: corev1.SecretTypeOpaque,
StringData: e.monitoringConfigSecretData(apiKey),
}
_, err = e.kubeClient.CreateSecret(c, secret)
if err != nil {
if _, err := e.kubeClient.CreateSecret(c, secret); err != nil {
if k8serrors.IsAlreadyExists(err) {
_, err = e.kubeClient.UpdateSecret(c, secret)
if err != nil {
e.l.Error(err)
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString(fmt.Sprintf("Could not update k8s secret %s", params.Name)),
})
return fmt.Errorf("could not update k8s secret %s", params.Name)
}
} else {
e.l.Error(err)
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString("Failed creating secret in the Kubernetes cluster"),
})
return fmt.Errorf("failed creating secret in the Kubernetes cluster")
}
}
err = e.kubeClient.CreateMonitoringConfig(c, &everestv1alpha1.MonitoringConfig{
err := e.kubeClient.CreateMonitoringConfig(c, &everestv1alpha1.MonitoringConfig{
ObjectMeta: metav1.ObjectMeta{
Name: params.Name,
Namespace: e.kubeClient.Namespace(),
Expand All @@ -108,24 +126,13 @@ func (e *EverestServer) CreateMonitoringInstance(ctx echo.Context) error { //nol
})
if err != nil {
e.l.Error(err)
// TODO: Move this logic to the operator
dErr := e.kubeClient.DeleteSecret(c, params.Name)
if dErr != nil {
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString("Failing cleaning up the secret because failed creating backup storage"),
})
if dErr := e.kubeClient.DeleteSecret(c, params.Name); dErr != nil {
return fmt.Errorf("failed cleaning up the secret because failed creating backup storage")
}
return ctx.JSON(http.StatusInternalServerError, Error{
Message: pointer.ToString("Failed creating monitoring instance"),
})
}
result := MonitoringInstance{
Type: MonitoringInstanceBaseWithNameType(params.Type),
Name: params.Name,
Url: params.Url,
return fmt.Errorf("failed creating monitoring instance")
}

return ctx.JSON(http.StatusOK, result)
return nil
}

// ListMonitoringInstances lists all monitoring instances.
Expand Down
140 changes: 117 additions & 23 deletions api/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"fmt"
"net/url"
"regexp"
"time"

"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/aws/aws-sdk-go/aws"
Expand All @@ -44,32 +45,40 @@ const (
pxcDeploymentName = "percona-xtradb-cluster-operator"
psmdbDeploymentName = "percona-server-mongodb-operator"
pgDeploymentName = "percona-postgresql-operator"
dateFormat = "2006-01-02T15:04:05Z"
)

var (
minStorageQuantity = resource.MustParse("1G") //nolint:gochecknoglobals
minCPUQuantity = resource.MustParse("600m") //nolint:gochecknoglobals
minMemQuantity = resource.MustParse("512M") //nolint:gochecknoglobals

errDBCEmptyMetadata = errors.New("databaseCluster's Metadata should not be empty")
errDBCNameEmpty = errors.New("databaseCluster's metadata.name should not be empty")
errDBCNameWrongFormat = errors.New("databaseCluster's metadata.name should be a string")
errNotEnoughMemory = fmt.Errorf("memory limits should be above %s", minMemQuantity.String())
errInt64NotSupported = errors.New("specifying resources using int64 data type is not supported. Please use string format for that")
errNotEnoughCPU = fmt.Errorf("CPU limits should be above %s", minCPUQuantity.String())
errNotEnoughDiskSize = fmt.Errorf("storage size should be above %s", minStorageQuantity.String())
errUnsupportedPXCProxy = errors.New("you can use either HAProxy or Proxy SQL for PXC clusters")
errUnsupportedPGProxy = errors.New("you can use only PGBouncer as a proxy type for Postgres clusters")
errUnsupportedPSMDBProxy = errors.New("you can use only Mongos as a proxy type for MongoDB clusters")
errNoSchedules = errors.New("please specify at least one backup schedule")
errNoNameInSchedule = errors.New("'name' field for the backup schedules cannot be empty")
errScheduleNoBackupStorageName = errors.New("'backupStorageName' field cannot be empty when schedule is enabled")
errPitrNoBackupStorageName = errors.New("'backupStorageName' field cannot be empty when pitr is enabled")
errNoResourceDefined = errors.New("please specify resource limits for the cluster")
errPitrUploadInterval = errors.New("'uploadIntervalSec' should be more than 0")
errPXCPitrS3Only = errors.New("point-in-time recovery only supported for s3 compatible storages")
errPSMDBMultipleStorages = errors.New("can't use more than one backup storage for PSMDB clusters")
errPSMDBViolateActiveStorage = errors.New("can't change the active storage for PSMDB clusters")
errDBCEmptyMetadata = errors.New("databaseCluster's Metadata should not be empty")
errDBCNameEmpty = errors.New("databaseCluster's metadata.name should not be empty")
errDBCNameWrongFormat = errors.New("databaseCluster's metadata.name should be a string")
errNotEnoughMemory = fmt.Errorf("memory limits should be above %s", minMemQuantity.String())
errInt64NotSupported = errors.New("specifying resources using int64 data type is not supported. Please use string format for that")
errNotEnoughCPU = fmt.Errorf("CPU limits should be above %s", minCPUQuantity.String())
errNotEnoughDiskSize = fmt.Errorf("storage size should be above %s", minStorageQuantity.String())
errUnsupportedPXCProxy = errors.New("you can use either HAProxy or Proxy SQL for PXC clusters")
errUnsupportedPGProxy = errors.New("you can use only PGBouncer as a proxy type for Postgres clusters")
errUnsupportedPSMDBProxy = errors.New("you can use only Mongos as a proxy type for MongoDB clusters")
errNoSchedules = errors.New("please specify at least one backup schedule")
errNoNameInSchedule = errors.New("'name' field for the backup schedules cannot be empty")
errScheduleNoBackupStorageName = errors.New("'backupStorageName' field cannot be empty when schedule is enabled")
errPitrNoBackupStorageName = errors.New("'backupStorageName' field cannot be empty when pitr is enabled")
errNoResourceDefined = errors.New("please specify resource limits for the cluster")
errPitrUploadInterval = errors.New("'uploadIntervalSec' should be more than 0")
errPXCPitrS3Only = errors.New("point-in-time recovery only supported for s3 compatible storages")
errPSMDBMultipleStorages = errors.New("can't use more than one backup storage for PSMDB clusters")
errPSMDBViolateActiveStorage = errors.New("can't change the active storage for PSMDB clusters")
errDataSourceConfig = errors.New("either DBClusterBackupName or BackupSource must be specified in the DataSource field")
errDataSourceNoPitrDateSpecified = errors.New("pitr Date must be specified for type Date")
errDataSourceWrongDateFormat = errors.New("failed to parse .Spec.DataSource.Pitr.Date as 2006-01-02T15:04:05Z")
errDataSourceNoBackupStorageName = errors.New("'backupStorageName' should be specified in .Spec.DataSource.BackupSource")
errDataSourceNoPath = errors.New("'path' should be specified in .Spec.DataSource.BackupSource")
errIncorrectDataSourceStruct = errors.New("incorrect data source struct")
errUnsupportedPitrType = errors.New("the given point-in-time recovery type is not supported")
//nolint:gochecknoglobals
operatorEngine = map[everestv1alpha1.EngineType]string{
everestv1alpha1.DatabaseEnginePXC: pxcDeploymentName,
Expand Down Expand Up @@ -251,11 +260,13 @@ func validateUpdateBackupStorageRequest(ctx echo.Context, bs *everestv1alpha1.Ba
return nil, err
}

url := &bs.Spec.EndpointURL
if params.Url != nil {
if ok := validateURL(*params.Url); !ok {
err := ErrInvalidURL("url")
return nil, err
}
url = params.Url
}
accessKey := string(secret.Data["AWS_ACCESS_KEY_ID"])
if params.AccessKey != nil {
Expand All @@ -270,12 +281,16 @@ func validateUpdateBackupStorageRequest(ctx echo.Context, bs *everestv1alpha1.Ba
if params.BucketName != nil {
bucketName = *params.BucketName
}
region := bs.Spec.Region
if params.Region != nil {
region = *params.Region
}
switch string(bs.Spec.Type) {
case string(BackupStorageTypeS3):
if params.Region != nil && *params.Region == "" {
return nil, errors.New("region is required when using S3 storage type")
}
if err := s3Access(l, &bs.Spec.EndpointURL, accessKey, secretKey, bucketName, bs.Spec.Region); err != nil {
if err := s3Access(l, url, accessKey, secretKey, bucketName, region); err != nil {
return nil, err
}
case string(BackupStorageTypeAzure):
Expand Down Expand Up @@ -341,8 +356,8 @@ func validateCreateMonitoringInstanceRequest(ctx echo.Context) (*CreateMonitorin
return nil, fmt.Errorf("pmm key is required for type %s", params.Type)
}

if params.Pmm.ApiKey == "" && params.Pmm.User == "" && params.Pmm.Password == "" {
return nil, errors.New("one of pmm.apiKey, pmm.user or pmm.password fields is required")
if params.Pmm.ApiKey == "" && (params.Pmm.User == "" || params.Pmm.Password == "") {
return nil, errors.New("pmm.apiKey or pmm.user with pmm.password fields are required")
}
default:
return nil, fmt.Errorf("monitoring type %s is not supported", params.Type)
Expand Down Expand Up @@ -409,7 +424,7 @@ func validateCreateDatabaseClusterRequest(dbc DatabaseCluster) error {
return validateRFC1035(strName, "metadata.name")
}

func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, databaseCluster *DatabaseCluster) error {
func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, databaseCluster *DatabaseCluster) error { //nolint:cyclop
if err := validateCreateDatabaseClusterRequest(*databaseCluster); err != nil {
return err
}
Expand Down Expand Up @@ -446,6 +461,12 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, databaseClus
return err
}

if databaseCluster.Spec.DataSource != nil {
if err := validateDBDataSource(databaseCluster); err != nil {
return err
}
}

return validateResourceLimits(databaseCluster)
}

Expand Down Expand Up @@ -615,6 +636,64 @@ func validateResourceLimits(cluster *DatabaseCluster) error {
return validateStorageSize(cluster)
}

func validateDBDataSource(db *DatabaseCluster) error {
bytes, err := json.Marshal(db.Spec.DataSource)
if err != nil {
return errIncorrectDataSourceStruct
}
return validateCommonDataSourceStruct(bytes)
}

func validateRestoreDataSource(restore *DatabaseClusterRestore) error {
bytes, err := json.Marshal(restore.Spec.DataSource)
if err != nil {
return errIncorrectDataSourceStruct
}
return validateCommonDataSourceStruct(bytes)
}

func validateCommonDataSourceStruct(data []byte) error {
// marshal and unmarshal to use the same validation func to validate DataSource for both db and restore
ds := &dataSourceStruct{}
err := json.Unmarshal(data, ds)
if err != nil {
return errIncorrectDataSourceStruct
}
return validateDataSource(*ds)
}

func validateDataSource(dataSource dataSourceStruct) error {
if (dataSource.DbClusterBackupName == nil && dataSource.BackupSource == nil) ||
(dataSource.DbClusterBackupName != nil && *dataSource.DbClusterBackupName != "" && dataSource.BackupSource != nil) {
return errDataSourceConfig
}

if dataSource.BackupSource != nil {
if dataSource.BackupSource.BackupStorageName == "" {
return errDataSourceNoBackupStorageName
}

if dataSource.BackupSource.Path == "" {
return errDataSourceNoPath
}
}

if dataSource.Pitr != nil { //nolint:nestif
if dataSource.Pitr.Type == nil || *dataSource.Pitr.Type == string(DatabaseClusterSpecDataSourcePitrTypeDate) {
if dataSource.Pitr.Date == nil {
return errDataSourceNoPitrDateSpecified
}

if _, err := time.Parse(dateFormat, *dataSource.Pitr.Date); err != nil {
return errDataSourceWrongDateFormat
}
} else {
return errUnsupportedPitrType
}
}
return nil
}

func ensureNonEmptyResources(cluster *DatabaseCluster) error {
if cluster.Spec.Engine.Resources == nil {
return errNoResourceDefined
Expand Down Expand Up @@ -790,5 +869,20 @@ func validateDatabaseClusterRestore(ctx context.Context, restore *DatabaseCluste
}
return err
}
if err = validateRestoreDataSource(restore); err != nil {
return err
}
return err
}

type dataSourceStruct struct {
BackupSource *struct {
BackupStorageName string `json:"backupStorageName"`
Path string `json:"path"`
} `json:"backupSource,omitempty"`
DbClusterBackupName *string `json:"dbClusterBackupName,omitempty"` //nolint:stylecheck
Pitr *struct {
Date *string `json:"date,omitempty"`
Type *string `json:"type,omitempty"`
} `json:"pitr,omitempty"`
}
Loading

0 comments on commit b11034f

Please sign in to comment.