From ca6f895a03e4afbbb21c8ca5773545befe490737 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Thu, 8 Feb 2024 12:29:59 +0200 Subject: [PATCH 01/11] EVEREST-827-pg-repos-validation --- api/validation.go | 88 +++++++++++++++++++++++-- api/validation_test.go | 144 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 228 insertions(+), 4 deletions(-) diff --git a/api/validation.go b/api/validation.go index 8e845eb0..7bdf0009 100644 --- a/api/validation.go +++ b/api/validation.go @@ -36,6 +36,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/percona/percona-everest-backend/cmd/config" "github.com/percona/percona-everest-backend/pkg/kubernetes" @@ -46,6 +47,7 @@ const ( psmdbDeploymentName = "percona-server-mongodb-operator" pgDeploymentName = "percona-postgresql-operator" dateFormat = "2006-01-02T15:04:05Z" + pgReposLimit = 3 ) var ( @@ -79,6 +81,9 @@ var ( errDataSourceNoPath = errors.New("'path' should be specified in .Spec.DataSource.BackupSource") errIncorrectDataSourceStruct = errors.New("incorrect data source struct") errUnsupportedPitrType = errors.New("the given point-in-time recovery type is not supported") + errTooManyPGSchedules = fmt.Errorf("only %d schedules are allowed", pgReposLimit) + errTooManyPGStorages = fmt.Errorf("only %d different storages are allowed to use for a postgres cluster", pgReposLimit) + //nolint:gochecknoglobals operatorEngine = map[everestv1alpha1.EngineType]string{ everestv1alpha1.DatabaseEnginePXC: pxcDeploymentName, @@ -406,22 +411,31 @@ func validateUpdateMonitoringInstanceType(params UpdateMonitoringInstanceJSONReq } func validateCreateDatabaseClusterRequest(dbc DatabaseCluster) error { + strName, err := nameFromDatabaseCluster(dbc) + if err != nil { + return err + } + + return validateRFC1035(strName, "metadata.name") +} + +func nameFromDatabaseCluster(dbc DatabaseCluster) (string, error) { if dbc.Metadata == nil { - return errDBCEmptyMetadata + return "", errDBCEmptyMetadata } md := *dbc.Metadata name, ok := md["name"] if !ok { - return errDBCNameEmpty + return "", errDBCNameEmpty } strName, ok := name.(string) if !ok { - return errDBCNameWrongFormat + return "", errDBCNameWrongFormat } - return validateRFC1035(strName, "metadata.name") + return strName, nil } func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, databaseCluster *DatabaseCluster) error { //nolint:cyclop @@ -467,6 +481,12 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, databaseClus } } + if databaseCluster.Spec.Engine.Type == DatabaseClusterSpecEngineType(everestv1alpha1.DatabaseEnginePostgresql) { + if err = validatePGReposForAPIDB(ctx.Request().Context(), databaseCluster, e.kubeClient.ListDatabaseClusterBackups); err != nil { + return err + } + } + return validateResourceLimits(databaseCluster) } @@ -819,6 +839,10 @@ func validateDatabaseClusterBackup(ctx context.Context, backup *DatabaseClusterB return err } + if err = validatePGRepos(ctx, *db, kubeClient); err != nil { + return err + } + if db.Spec.Engine.Type == everestv1alpha1.DatabaseEnginePSMDB { if db.Status.ActiveStorage != "" && db.Status.ActiveStorage != b.Spec.BackupStorageName { return errPSMDBViolateActiveStorage @@ -827,6 +851,26 @@ func validateDatabaseClusterBackup(ctx context.Context, backup *DatabaseClusterB return nil } +func validatePGRepos(ctx context.Context, db everestv1alpha1.DatabaseCluster, kubeClient *kubernetes.Kubernetes) error { + if db.Spec.Engine.Type != everestv1alpha1.DatabaseEnginePostgresql { + return nil + } + + // convert between k8s and api structure + str, err := json.Marshal(db) + if err != nil { + return err + } + apiDB := &DatabaseCluster{} + if err := json.Unmarshal(str, apiDB); err != nil { + return err + } + if err = validatePGReposForAPIDB(ctx, apiDB, kubeClient.ListDatabaseClusterBackups); err != nil { + return err + } + return nil +} + func validateDatabaseClusterRestore(ctx context.Context, restore *DatabaseClusterRestore, kubeClient *kubernetes.Kubernetes) error { if restore == nil { return errors.New("restore cannot be empty") @@ -886,3 +930,39 @@ type dataSourceStruct struct { Type *string `json:"type,omitempty"` } `json:"pitr,omitempty"` } + +func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBackupsFunc func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error)) error { + bs := make(map[string]bool) + if dbc.Spec != nil && dbc.Spec.Backup != nil && dbc.Spec.Backup.Schedules != nil { + for _, shed := range *dbc.Spec.Backup.Schedules { + bs[shed.BackupStorageName] = true + } + + // first check if there are too many schedules. Each schedule is configured in a separate repo. + if len(*dbc.Spec.Backup.Schedules) > pgReposLimit { + return errTooManyPGSchedules + } + } + + dbcName, err := nameFromDatabaseCluster(*dbc) + if err != nil { + return err + } + backups, err := getBackupsFunc(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("clusterName=%s", dbcName), + }) + if err != nil { + return err + } + + for _, backup := range backups.Items { + bs[backup.Spec.BackupStorageName] = true + } + + // second check if there are too many schedules used. + if len(bs) > pgReposLimit { + return errTooManyPGStorages + } + + return nil +} diff --git a/api/validation_test.go b/api/validation_test.go index 4990f94b..596ac73e 100644 --- a/api/validation_test.go +++ b/api/validation_test.go @@ -24,6 +24,7 @@ import ( everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestValidateRFC1035(t *testing.T) { @@ -720,3 +721,146 @@ func TestValidateDataSource(t *testing.T) { }) } } + +func TestValidatePGReposForAPIDB(t *testing.T) { + t.Parallel() + cases := []struct { + name string + cluster []byte + getBackupsFunc func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) + err error + }{ + { + name: "ok: no schedules no backups", + cluster: []byte(`{"metaData":{"name":"some"}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{}, + }, nil + }, + err: nil, + }, + { + name: "ok: 2 schedules 2 backups with the same storages", + cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + }, + }, nil + }, + err: nil, + }, + { + name: "ok: 3 schedules", + cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"}]}}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{}, + }, nil + }, + err: nil, + }, + { + name: "ok: 3 backups with different storages", + cluster: []byte(`{"metaData":{"name":"some"}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}}, + }, + }, nil + }, + err: nil, + }, + { + name: "ok: 5 backups with repeating storages", + cluster: []byte(`{"metaData":{"name":"some"}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + }, + }, nil + }, + err: nil, + }, + { + name: "error: 4 backups with different storages", + cluster: []byte(`{"metaData":{"name":"some"}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs4"}}, + }, + }, nil + }, + err: errTooManyPGStorages, + }, + { + name: "ok: 4 backups with same storages", + cluster: []byte(`{"metaData":{"name":"some"}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, + }, + }, nil + }, + err: nil, + }, + { + name: "error: 4 schedules", + cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"},{"backupStorageName":"bs4"}]}}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{}, + }, nil + }, + err: errTooManyPGSchedules, + }, + { + name: "error: 2 schedules 2 backups with different storages", + cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`), + getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}}, + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs4"}}, + }, + }, nil + }, + err: errTooManyPGStorages, + }, + } + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + db := &DatabaseCluster{} + err := json.Unmarshal(tc.cluster, db) + require.NoError(t, err) + err = validatePGReposForAPIDB(context.Background(), db, tc.getBackupsFunc) + if tc.err == nil { + require.NoError(t, err) + return + } + require.Error(t, err) + assert.Equal(t, tc.err.Error(), err.Error()) + }) + } +} From ca8cab2ac223ee8c8a6d0bd986c529af2772d00a Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Thu, 8 Feb 2024 12:50:15 +0200 Subject: [PATCH 02/11] EVEREST-827-adopt-to-namespaces --- api/validation.go | 34 ++++++++++++++++++++++++---------- api/validation_test.go | 38 +++++++++++++++++++------------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/api/validation.go b/api/validation.go index 0e78659f..c0151b8c 100644 --- a/api/validation.go +++ b/api/validation.go @@ -57,7 +57,9 @@ var ( errDBCEmptyMetadata = errors.New("databaseCluster's Metadata should not be empty") errDBCNameEmpty = errors.New("databaseCluster's metadata.name should not be empty") + errDBCNamespaceEmpty = errors.New("databaseCluster's metadata.namespace should not be empty") errDBCNameWrongFormat = errors.New("databaseCluster's metadata.name should be a string") + errDBCNamespaceWrongFormat = errors.New("databaseCluster's metadata.namespace should be a string") errNotEnoughMemory = fmt.Errorf("memory limits should be above %s", minMemQuantity.String()) errInt64NotSupported = errors.New("specifying resources using int64 data type is not supported. Please use string format for that") errNotEnoughCPU = fmt.Errorf("CPU limits should be above %s", minCPUQuantity.String()) @@ -458,31 +460,42 @@ func validateUpdateMonitoringInstanceType(params UpdateMonitoringInstanceJSONReq } func validateCreateDatabaseClusterRequest(dbc DatabaseCluster) error { - strName, err := nameFromDatabaseCluster(dbc) + name, _, err := nameFromDatabaseCluster(dbc) if err != nil { return err } - return validateRFC1035(strName, "metadata.name") + return validateRFC1035(name, "metadata.name") } -func nameFromDatabaseCluster(dbc DatabaseCluster) (string, error) { +func nameFromDatabaseCluster(dbc DatabaseCluster) (string, string, error) { if dbc.Metadata == nil { - return "", errDBCEmptyMetadata + return "", "", errDBCEmptyMetadata } md := *dbc.Metadata name, ok := md["name"] if !ok { - return "", errDBCNameEmpty + return "", "", errDBCNameEmpty } strName, ok := name.(string) if !ok { - return "", errDBCNameWrongFormat + return "", "", errDBCNameWrongFormat } - return strName, nil + md = *dbc.Metadata + ns, ok := md["namespace"] + if !ok { + return "", "", errDBCNamespaceEmpty + } + + strNS, ok := ns.(string) + if !ok { + return "", "", errDBCNamespaceWrongFormat + } + + return strName, strNS, nil } func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, namespace string, databaseCluster *DatabaseCluster) error { //nolint:cyclop @@ -978,7 +991,7 @@ type dataSourceStruct struct { } `json:"pitr,omitempty"` } -func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBackupsFunc func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error)) error { +func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBackupsFunc func(context.Context, string, metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error)) error { bs := make(map[string]bool) if dbc.Spec != nil && dbc.Spec.Backup != nil && dbc.Spec.Backup.Schedules != nil { for _, shed := range *dbc.Spec.Backup.Schedules { @@ -991,11 +1004,12 @@ func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBacku } } - dbcName, err := nameFromDatabaseCluster(*dbc) + dbcName, dbcNamespace, err := nameFromDatabaseCluster(*dbc) if err != nil { return err } - backups, err := getBackupsFunc(ctx, metav1.ListOptions{ + + backups, err := getBackupsFunc(ctx, dbcNamespace, metav1.ListOptions{ LabelSelector: fmt.Sprintf("clusterName=%s", dbcName), }) if err != nil { diff --git a/api/validation_test.go b/api/validation_test.go index 596ac73e..e775f906 100644 --- a/api/validation_test.go +++ b/api/validation_test.go @@ -727,13 +727,13 @@ func TestValidatePGReposForAPIDB(t *testing.T) { cases := []struct { name string cluster []byte - getBackupsFunc func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) + getBackupsFunc func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) err error }{ { name: "ok: no schedules no backups", - cluster: []byte(`{"metaData":{"name":"some"}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{}, }, nil @@ -742,8 +742,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "ok: 2 schedules 2 backups with the same storages", - cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{ {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, @@ -755,8 +755,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "ok: 3 schedules", - cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"}]}}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"}]}}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{}, }, nil @@ -765,8 +765,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "ok: 3 backups with different storages", - cluster: []byte(`{"metaData":{"name":"some"}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{ {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, @@ -779,8 +779,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "ok: 5 backups with repeating storages", - cluster: []byte(`{"metaData":{"name":"some"}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{ {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, @@ -795,8 +795,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "error: 4 backups with different storages", - cluster: []byte(`{"metaData":{"name":"some"}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{ {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, @@ -810,8 +810,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "ok: 4 backups with same storages", - cluster: []byte(`{"metaData":{"name":"some"}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{ {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}}, @@ -825,8 +825,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "error: 4 schedules", - cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"},{"backupStorageName":"bs4"}]}}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"},{"backupStorageName":"bs4"}]}}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{}, }, nil @@ -835,8 +835,8 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, { name: "error: 2 schedules 2 backups with different storages", - cluster: []byte(`{"metaData":{"name":"some"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`), - getBackupsFunc: func(ctx context.Context, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { return &everestv1alpha1.DatabaseClusterBackupList{ Items: []everestv1alpha1.DatabaseClusterBackup{ {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}}, From 993a6d4f5c1287a3aa6e0e1cf81cfce59e2de6cd Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Thu, 8 Feb 2024 16:53:33 +0200 Subject: [PATCH 03/11] operator version --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2e3b6d91..a1257506 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/labstack/echo/v4 v4.11.4 github.com/oapi-codegen/echo-middleware v1.0.1 github.com/oapi-codegen/runtime v1.1.1 - github.com/percona/everest-operator v0.6.0-dev1.0.20240207144724-d5253b875e28 + github.com/percona/everest-operator v0.6.0-dev1.0.20240207193854-cdd70b8eb1e6 github.com/stretchr/testify v1.8.4 go.uber.org/zap v1.26.0 golang.org/x/crypto v0.18.0 diff --git a/go.sum b/go.sum index 258256d5..a399ac90 100644 --- a/go.sum +++ b/go.sum @@ -420,8 +420,8 @@ github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8P github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/percona/everest-operator v0.6.0-dev1.0.20240207144724-d5253b875e28 h1:N9dZVyeXzUTK+xRdz9DBcaWj3X6oUPpezEwdH2jT4cg= -github.com/percona/everest-operator v0.6.0-dev1.0.20240207144724-d5253b875e28/go.mod h1:45pGpvWrPy495qiQqxNuOJor4wif+vTTTJP4Qee8qZk= +github.com/percona/everest-operator v0.6.0-dev1.0.20240207193854-cdd70b8eb1e6 h1:leGa/XuWVstdYyj61r92xByjuT52jsbVroHB0fj4j7A= +github.com/percona/everest-operator v0.6.0-dev1.0.20240207193854-cdd70b8eb1e6/go.mod h1:45pGpvWrPy495qiQqxNuOJor4wif+vTTTJP4Qee8qZk= github.com/percona/percona-backup-mongodb v1.8.1-0.20230920143330-3b1c2e263901 h1:BDgsZRCjEuxl2/z4yWBqB0s8d20shuIDks7/RVdZiLs= github.com/percona/percona-backup-mongodb v1.8.1-0.20230920143330-3b1c2e263901/go.mod h1:fZRCMpUqkWlLVdRKqqaj001LoVP2eo6F0ZhoMPeXDng= github.com/percona/percona-postgresql-operator v0.0.0-20231220140959-ad5eef722609 h1:+UOK4gcHrRgqjo4smgfwT7/0apF6PhAJdQIdAV4ub/M= From e5000b5051af1d294f6dbf2dca9654784ac748d7 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Thu, 8 Feb 2024 16:57:08 +0200 Subject: [PATCH 04/11] ns tests --- api/validation_test.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/api/validation_test.go b/api/validation_test.go index e775f906..28eb8b30 100644 --- a/api/validation_test.go +++ b/api/validation_test.go @@ -112,35 +112,40 @@ func TestValidateCreateDatabaseClusterRequest(t *testing.T) { { name: "empty dbCluster name", value: DatabaseCluster{Metadata: &map[string]interface{}{ - "name": "", + "name": "", + "namespace": "ns", }}, err: ErrNameNotRFC1035Compatible("metadata.name"), }, { name: "starts with -", value: DatabaseCluster{Metadata: &map[string]interface{}{ - "name": "-sdfasa", + "name": "-sdfasa", + "namespace": "ns", }}, err: ErrNameNotRFC1035Compatible("metadata.name"), }, { name: "ends with -", value: DatabaseCluster{Metadata: &map[string]interface{}{ - "name": "sdfasa-", + "name": "sdfasa-", + "namespace": "ns", }}, err: ErrNameNotRFC1035Compatible("metadata.name"), }, { name: "contains uppercase", value: DatabaseCluster{Metadata: &map[string]interface{}{ - "name": "AAsdf", + "name": "AAsdf", + "namespace": "ns", }}, err: ErrNameNotRFC1035Compatible("metadata.name"), }, { name: "valid", value: DatabaseCluster{Metadata: &map[string]interface{}{ - "name": "amsdf-sllla", + "name": "amsdf-sllla", + "namespace": "ns", }}, err: nil, }, @@ -154,7 +159,8 @@ func TestValidateCreateDatabaseClusterRequest(t *testing.T) { { name: "dbCluster name too long", value: DatabaseCluster{Metadata: &map[string]interface{}{ - "name": "a123456789a123456789a12", + "name": "a123456789a123456789a12", + "namespace": "ns", }}, err: ErrNameTooLong("metadata.name"), }, From 3f7ad68c3168328693fccdf1415aec0114be2eba Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Fri, 9 Feb 2024 09:54:46 +0200 Subject: [PATCH 05/11] remove name --- .github/workflows/ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e10da63..b8434a6a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -356,7 +356,6 @@ jobs: echo $(git describe --always) make build ./bin/everest install \ - --name minikube \ --operator.mongodb \ --operator.postgresql \ --operator.xtradb-cluster \ From 6bed86ea637960c9270eb399c39a37c333083e8f Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Fri, 9 Feb 2024 11:13:53 +0200 Subject: [PATCH 06/11] consider the being created backup by validation --- api/validation.go | 33 +++++++++++++++++++++++++-------- api/validation_test.go | 12 ++++++++++++ 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/api/validation.go b/api/validation.go index c0151b8c..fb804204 100644 --- a/api/validation.go +++ b/api/validation.go @@ -899,7 +899,7 @@ func validateDatabaseClusterBackup(ctx context.Context, namespace string, backup return err } - if err = validatePGRepos(ctx, *db, kubeClient); err != nil { + if err = validatePGReposForBackup(ctx, *db, kubeClient, *b); err != nil { return err } @@ -911,7 +911,7 @@ func validateDatabaseClusterBackup(ctx context.Context, namespace string, backup return nil } -func validatePGRepos(ctx context.Context, db everestv1alpha1.DatabaseCluster, kubeClient *kubernetes.Kubernetes) error { +func validatePGReposForBackup(ctx context.Context, db everestv1alpha1.DatabaseCluster, kubeClient *kubernetes.Kubernetes, newBackup everestv1alpha1.DatabaseClusterBackup) error { if db.Spec.Engine.Type != everestv1alpha1.DatabaseEnginePostgresql { return nil } @@ -925,7 +925,18 @@ func validatePGRepos(ctx context.Context, db everestv1alpha1.DatabaseCluster, ku if err := json.Unmarshal(str, apiDB); err != nil { return err } - if err = validatePGReposForAPIDB(ctx, apiDB, kubeClient.ListDatabaseClusterBackups); err != nil { + + // put the backup that being validated to the list of all backups to calculate if the limitations are respected + getBackupsFunc := func(ctx context.Context, namespace string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + list, err := kubeClient.ListDatabaseClusterBackups(ctx, namespace, options) + if err != nil { + return nil, err + } + list.Items = append(list.Items, newBackup) + return list, nil + } + + if err = validatePGReposForAPIDB(ctx, apiDB, getBackupsFunc); err != nil { return err } return nil @@ -993,13 +1004,15 @@ type dataSourceStruct struct { func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBackupsFunc func(context.Context, string, metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error)) error { bs := make(map[string]bool) + var reposCount int if dbc.Spec != nil && dbc.Spec.Backup != nil && dbc.Spec.Backup.Schedules != nil { for _, shed := range *dbc.Spec.Backup.Schedules { bs[shed.BackupStorageName] = true } - + // each schedule counts as a separate repo regardless of the BS used in it + reposCount = len(*dbc.Spec.Backup.Schedules) // first check if there are too many schedules. Each schedule is configured in a separate repo. - if len(*dbc.Spec.Backup.Schedules) > pgReposLimit { + if reposCount > pgReposLimit { return errTooManyPGSchedules } } @@ -1017,11 +1030,15 @@ func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBacku } for _, backup := range backups.Items { - bs[backup.Spec.BackupStorageName] = true + // repos count is increased only if there wasn't such a BS used + if _, ok := bs[backup.Spec.BackupStorageName]; !ok { + bs[backup.Spec.BackupStorageName] = true + reposCount++ + } } - // second check if there are too many schedules used. - if len(bs) > pgReposLimit { + // second check if there are too many repos used. + if reposCount > pgReposLimit { return errTooManyPGStorages } diff --git a/api/validation_test.go b/api/validation_test.go index 28eb8b30..290b51ef 100644 --- a/api/validation_test.go +++ b/api/validation_test.go @@ -759,6 +759,18 @@ func TestValidatePGReposForAPIDB(t *testing.T) { }, err: nil, }, + { + name: "error: 3 schedules in one bs and 1 backup in other", + cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs1"},{"backupStorageName":"bs1"}]}}}`), + getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) { + return &everestv1alpha1.DatabaseClusterBackupList{ + Items: []everestv1alpha1.DatabaseClusterBackup{ + {Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}}, + }, + }, nil + }, + err: errTooManyPGStorages, + }, { name: "ok: 3 schedules", cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"}]}}}`), From c76ce23c07d1ca43ce0c4f45beb7b47285beb3f0 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko <91597950+oksana-grishchenko@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:24:54 +0200 Subject: [PATCH 07/11] Update api/validation.go Co-authored-by: Diogo Recharte --- api/validation.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/validation.go b/api/validation.go index fb804204..e7e38d71 100644 --- a/api/validation.go +++ b/api/validation.go @@ -83,8 +83,8 @@ var ( errDataSourceNoPath = errors.New("'path' should be specified in .Spec.DataSource.BackupSource") errIncorrectDataSourceStruct = errors.New("incorrect data source struct") errUnsupportedPitrType = errors.New("the given point-in-time recovery type is not supported") - errTooManyPGSchedules = fmt.Errorf("only %d schedules are allowed", pgReposLimit) - errTooManyPGStorages = fmt.Errorf("only %d different storages are allowed to use for a postgres cluster", pgReposLimit) + errTooManyPGSchedules = fmt.Errorf("only %d schedules are allowed in a PostgreSQL cluster", pgReposLimit) + errTooManyPGStorages = fmt.Errorf("only %d different storages are allowed in a PostgreSQL cluster", pgReposLimit) //nolint:gochecknoglobals operatorEngine = map[everestv1alpha1.EngineType]string{ From a474d23cae89dc2b36e354a3944fce2d0e293714 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Mon, 12 Feb 2024 10:00:28 +0200 Subject: [PATCH 08/11] dbc namespaces --- api-tests/tests/database-cluster.spec.ts | 4 ++++ api-tests/tests/pg-clusters.spec.ts | 3 +++ api-tests/tests/psmdb-clusters.spec.ts | 3 +++ api-tests/tests/pxc-clusters.spec.ts | 3 +++ 4 files changed, 13 insertions(+) diff --git a/api-tests/tests/database-cluster.spec.ts b/api-tests/tests/database-cluster.spec.ts index bb7c0109..097bb8a3 100644 --- a/api-tests/tests/database-cluster.spec.ts +++ b/api-tests/tests/database-cluster.spec.ts @@ -59,6 +59,7 @@ test('create db cluster with monitoring config', async ({ request }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { monitoring: { @@ -113,6 +114,7 @@ test('update db cluster with a new monitoring config', async ({ request }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { monitoring: { @@ -181,6 +183,7 @@ test('update db cluster without monitoring config with a new monitoring config', kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -246,6 +249,7 @@ test('update db cluster monitoring config with an empty monitoring config', asyn kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { monitoring: { diff --git a/api-tests/tests/pg-clusters.spec.ts b/api-tests/tests/pg-clusters.spec.ts index b0c000b0..c758bab4 100644 --- a/api-tests/tests/pg-clusters.spec.ts +++ b/api-tests/tests/pg-clusters.spec.ts @@ -36,6 +36,7 @@ test('create/edit/delete single node pg cluster', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -118,6 +119,7 @@ test('expose pg cluster after creation', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -196,6 +198,7 @@ test('expose pg cluster on EKS to the public internet and scale up', async ({ re kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { diff --git a/api-tests/tests/psmdb-clusters.spec.ts b/api-tests/tests/psmdb-clusters.spec.ts index 1c56eeae..5d73d201 100644 --- a/api-tests/tests/psmdb-clusters.spec.ts +++ b/api-tests/tests/psmdb-clusters.spec.ts @@ -27,6 +27,7 @@ test('create/edit/delete single node psmdb cluster', async ({ request, page }) = kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -109,6 +110,7 @@ test('expose psmdb cluster after creation', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -193,6 +195,7 @@ test('expose psmdb cluster on EKS to the public internet and scale up', async ({ kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { diff --git a/api-tests/tests/pxc-clusters.spec.ts b/api-tests/tests/pxc-clusters.spec.ts index d8143779..9ec33290 100644 --- a/api-tests/tests/pxc-clusters.spec.ts +++ b/api-tests/tests/pxc-clusters.spec.ts @@ -40,6 +40,7 @@ test('create/edit/delete pxc single node cluster', async ({ request, page }) => kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -130,6 +131,7 @@ test('expose pxc cluster after creation', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { @@ -209,6 +211,7 @@ test('expose pxc cluster on EKS to the public internet and scale up', async ({ r kind: 'DatabaseCluster', metadata: { name: clusterName, + namespace: 'ns', }, spec: { engine: { From 159abce70aea7824218ab397a2207f9d97b2ec86 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Mon, 12 Feb 2024 10:13:41 +0200 Subject: [PATCH 09/11] dbc namespaces --- api-tests/tests/database-cluster.spec.ts | 8 ++++---- api-tests/tests/pg-clusters.spec.ts | 6 +++--- api-tests/tests/psmdb-clusters.spec.ts | 6 +++--- api-tests/tests/pxc-clusters.spec.ts | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/api-tests/tests/database-cluster.spec.ts b/api-tests/tests/database-cluster.spec.ts index a28e03e3..f698595e 100644 --- a/api-tests/tests/database-cluster.spec.ts +++ b/api-tests/tests/database-cluster.spec.ts @@ -59,7 +59,7 @@ test('create db cluster with monitoring config', async ({ request }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { monitoring: { @@ -114,7 +114,7 @@ test('update db cluster with a new monitoring config', async ({ request }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { monitoring: { @@ -183,7 +183,7 @@ test('update db cluster without monitoring config with a new monitoring config', kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -249,7 +249,7 @@ test('update db cluster monitoring config with an empty monitoring config', asyn kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { monitoring: { diff --git a/api-tests/tests/pg-clusters.spec.ts b/api-tests/tests/pg-clusters.spec.ts index a1dba795..32ae61f5 100644 --- a/api-tests/tests/pg-clusters.spec.ts +++ b/api-tests/tests/pg-clusters.spec.ts @@ -37,7 +37,7 @@ test('create/edit/delete single node pg cluster', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -120,7 +120,7 @@ test('expose pg cluster after creation', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -199,7 +199,7 @@ test('expose pg cluster on EKS to the public internet and scale up', async ({ re kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { diff --git a/api-tests/tests/psmdb-clusters.spec.ts b/api-tests/tests/psmdb-clusters.spec.ts index 51954649..0186a935 100644 --- a/api-tests/tests/psmdb-clusters.spec.ts +++ b/api-tests/tests/psmdb-clusters.spec.ts @@ -28,7 +28,7 @@ test('create/edit/delete single node psmdb cluster', async ({ request, page }) = kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -111,7 +111,7 @@ test('expose psmdb cluster after creation', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -196,7 +196,7 @@ test('expose psmdb cluster on EKS to the public internet and scale up', async ({ kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { diff --git a/api-tests/tests/pxc-clusters.spec.ts b/api-tests/tests/pxc-clusters.spec.ts index beef6d27..4ef651a9 100644 --- a/api-tests/tests/pxc-clusters.spec.ts +++ b/api-tests/tests/pxc-clusters.spec.ts @@ -41,7 +41,7 @@ test('create/edit/delete pxc single node cluster', async ({ request, page }) => kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -132,7 +132,7 @@ test('expose pxc cluster after creation', async ({ request, page }) => { kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { @@ -212,7 +212,7 @@ test('expose pxc cluster on EKS to the public internet and scale up', async ({ r kind: 'DatabaseCluster', metadata: { name: clusterName, - namespace: 'ns', + namespace: testsNs, }, spec: { engine: { From f09ec21096e53381702287e8239619c0dbea5425 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Mon, 12 Feb 2024 10:27:03 +0200 Subject: [PATCH 10/11] dbc namespaces --- api-tests/tests/helpers.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api-tests/tests/helpers.ts b/api-tests/tests/helpers.ts index 50b63ab0..c5a604a2 100644 --- a/api-tests/tests/helpers.ts +++ b/api-tests/tests/helpers.ts @@ -22,7 +22,8 @@ export const createDBCluster = async (request, name) => { apiVersion: 'everest.percona.com/v1alpha1', kind: 'DatabaseCluster', metadata: { - name, + name: name, + namespace: testsNs }, spec: { engine: { From 6a5392c5beb031290ad165c913af68e339d80401 Mon Sep 17 00:00:00 2001 From: Oksana Grishchenko Date: Mon, 12 Feb 2024 10:27:24 +0200 Subject: [PATCH 11/11] dbc namespaces --- api-tests/tests/helpers.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api-tests/tests/helpers.ts b/api-tests/tests/helpers.ts index c5a604a2..d46c97ff 100644 --- a/api-tests/tests/helpers.ts +++ b/api-tests/tests/helpers.ts @@ -23,7 +23,7 @@ export const createDBCluster = async (request, name) => { kind: 'DatabaseCluster', metadata: { name: name, - namespace: testsNs + namespace: testsNs, }, spec: { engine: {