Skip to content
This repository has been archived by the owner on Mar 4, 2024. It is now read-only.

EVEREST-827-pg-repos-validation #429

Merged
merged 16 commits into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 98 additions & 4 deletions api/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/percona/percona-everest-backend/cmd/config"
"github.com/percona/percona-everest-backend/pkg/kubernetes"
Expand All @@ -46,6 +47,7 @@ const (
psmdbDeploymentName = "percona-server-mongodb-operator"
pgDeploymentName = "percona-postgresql-operator"
dateFormat = "2006-01-02T15:04:05Z"
pgReposLimit = 3
)

var (
Expand All @@ -55,7 +57,9 @@ var (

errDBCEmptyMetadata = errors.New("databaseCluster's Metadata should not be empty")
errDBCNameEmpty = errors.New("databaseCluster's metadata.name should not be empty")
errDBCNamespaceEmpty = errors.New("databaseCluster's metadata.namespace should not be empty")
errDBCNameWrongFormat = errors.New("databaseCluster's metadata.name should be a string")
errDBCNamespaceWrongFormat = errors.New("databaseCluster's metadata.namespace should be a string")
errNotEnoughMemory = fmt.Errorf("memory limits should be above %s", minMemQuantity.String())
errInt64NotSupported = errors.New("specifying resources using int64 data type is not supported. Please use string format for that")
errNotEnoughCPU = fmt.Errorf("CPU limits should be above %s", minCPUQuantity.String())
Expand All @@ -79,6 +83,9 @@ var (
errDataSourceNoPath = errors.New("'path' should be specified in .Spec.DataSource.BackupSource")
errIncorrectDataSourceStruct = errors.New("incorrect data source struct")
errUnsupportedPitrType = errors.New("the given point-in-time recovery type is not supported")
errTooManyPGSchedules = fmt.Errorf("only %d schedules are allowed", pgReposLimit)
errTooManyPGStorages = fmt.Errorf("only %d different storages are allowed to use for a postgres cluster", pgReposLimit)
oksana-grishchenko marked this conversation as resolved.
Show resolved Hide resolved

//nolint:gochecknoglobals
operatorEngine = map[everestv1alpha1.EngineType]string{
everestv1alpha1.DatabaseEnginePXC: pxcDeploymentName,
Expand Down Expand Up @@ -453,22 +460,42 @@ func validateUpdateMonitoringInstanceType(params UpdateMonitoringInstanceJSONReq
}

func validateCreateDatabaseClusterRequest(dbc DatabaseCluster) error {
name, _, err := nameFromDatabaseCluster(dbc)
if err != nil {
return err
}

return validateRFC1035(name, "metadata.name")
}

func nameFromDatabaseCluster(dbc DatabaseCluster) (string, string, error) {
if dbc.Metadata == nil {
return errDBCEmptyMetadata
return "", "", errDBCEmptyMetadata
}

md := *dbc.Metadata
name, ok := md["name"]
if !ok {
return errDBCNameEmpty
return "", "", errDBCNameEmpty
}

strName, ok := name.(string)
if !ok {
return errDBCNameWrongFormat
return "", "", errDBCNameWrongFormat
}

return validateRFC1035(strName, "metadata.name")
md = *dbc.Metadata
ns, ok := md["namespace"]
if !ok {
return "", "", errDBCNamespaceEmpty
}

strNS, ok := ns.(string)
if !ok {
return "", "", errDBCNamespaceWrongFormat
}

return strName, strNS, nil
}

func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, namespace string, databaseCluster *DatabaseCluster) error { //nolint:cyclop
Expand Down Expand Up @@ -514,6 +541,12 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, namespace st
}
}

if databaseCluster.Spec.Engine.Type == DatabaseClusterSpecEngineType(everestv1alpha1.DatabaseEnginePostgresql) {
if err = validatePGReposForAPIDB(ctx.Request().Context(), databaseCluster, e.kubeClient.ListDatabaseClusterBackups); err != nil {
return err
}
}

return validateResourceLimits(databaseCluster)
}

Expand Down Expand Up @@ -866,6 +899,10 @@ func validateDatabaseClusterBackup(ctx context.Context, namespace string, backup
return err
}

if err = validatePGRepos(ctx, *db, kubeClient); err != nil {
return err
}

if db.Spec.Engine.Type == everestv1alpha1.DatabaseEnginePSMDB {
if db.Status.ActiveStorage != "" && db.Status.ActiveStorage != b.Spec.BackupStorageName {
return errPSMDBViolateActiveStorage
Expand All @@ -874,6 +911,26 @@ func validateDatabaseClusterBackup(ctx context.Context, namespace string, backup
return nil
}

func validatePGRepos(ctx context.Context, db everestv1alpha1.DatabaseCluster, kubeClient *kubernetes.Kubernetes) error {
if db.Spec.Engine.Type != everestv1alpha1.DatabaseEnginePostgresql {
return nil
}

// convert between k8s and api structure
str, err := json.Marshal(db)
if err != nil {
return err
}
apiDB := &DatabaseCluster{}
if err := json.Unmarshal(str, apiDB); err != nil {
return err
}
if err = validatePGReposForAPIDB(ctx, apiDB, kubeClient.ListDatabaseClusterBackups); err != nil {
return err
}
return nil
}

func validateDatabaseClusterRestore(ctx context.Context, namespace string, restore *DatabaseClusterRestore, kubeClient *kubernetes.Kubernetes) error {
if restore == nil {
return errors.New("restore cannot be empty")
Expand Down Expand Up @@ -933,3 +990,40 @@ type dataSourceStruct struct {
Type *string `json:"type,omitempty"`
} `json:"pitr,omitempty"`
}

func validatePGReposForAPIDB(ctx context.Context, dbc *DatabaseCluster, getBackupsFunc func(context.Context, string, metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error)) error {
bs := make(map[string]bool)
if dbc.Spec != nil && dbc.Spec.Backup != nil && dbc.Spec.Backup.Schedules != nil {
for _, shed := range *dbc.Spec.Backup.Schedules {
bs[shed.BackupStorageName] = true
}

// first check if there are too many schedules. Each schedule is configured in a separate repo.
if len(*dbc.Spec.Backup.Schedules) > pgReposLimit {
return errTooManyPGSchedules
}
}

dbcName, dbcNamespace, err := nameFromDatabaseCluster(*dbc)
if err != nil {
return err
}

backups, err := getBackupsFunc(ctx, dbcNamespace, metav1.ListOptions{
LabelSelector: fmt.Sprintf("clusterName=%s", dbcName),
})
if err != nil {
return err
}

for _, backup := range backups.Items {
bs[backup.Spec.BackupStorageName] = true
}

// second check if there are too many schedules used.
if len(bs) > pgReposLimit {
return errTooManyPGStorages
}

return nil
}
162 changes: 156 additions & 6 deletions api/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func TestValidateRFC1035(t *testing.T) {
Expand Down Expand Up @@ -111,35 +112,40 @@ func TestValidateCreateDatabaseClusterRequest(t *testing.T) {
{
name: "empty dbCluster name",
value: DatabaseCluster{Metadata: &map[string]interface{}{
"name": "",
"name": "",
"namespace": "ns",
}},
err: ErrNameNotRFC1035Compatible("metadata.name"),
},
{
name: "starts with -",
value: DatabaseCluster{Metadata: &map[string]interface{}{
"name": "-sdfasa",
"name": "-sdfasa",
"namespace": "ns",
}},
err: ErrNameNotRFC1035Compatible("metadata.name"),
},
{
name: "ends with -",
value: DatabaseCluster{Metadata: &map[string]interface{}{
"name": "sdfasa-",
"name": "sdfasa-",
"namespace": "ns",
}},
err: ErrNameNotRFC1035Compatible("metadata.name"),
},
{
name: "contains uppercase",
value: DatabaseCluster{Metadata: &map[string]interface{}{
"name": "AAsdf",
"name": "AAsdf",
"namespace": "ns",
}},
err: ErrNameNotRFC1035Compatible("metadata.name"),
},
{
name: "valid",
value: DatabaseCluster{Metadata: &map[string]interface{}{
"name": "amsdf-sllla",
"name": "amsdf-sllla",
"namespace": "ns",
}},
err: nil,
},
Expand All @@ -153,7 +159,8 @@ func TestValidateCreateDatabaseClusterRequest(t *testing.T) {
{
name: "dbCluster name too long",
value: DatabaseCluster{Metadata: &map[string]interface{}{
"name": "a123456789a123456789a12",
"name": "a123456789a123456789a12",
"namespace": "ns",
}},
err: ErrNameTooLong("metadata.name"),
},
Expand Down Expand Up @@ -720,3 +727,146 @@ func TestValidateDataSource(t *testing.T) {
})
}
}

func TestValidatePGReposForAPIDB(t *testing.T) {
t.Parallel()
cases := []struct {
name string
cluster []byte
getBackupsFunc func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error)
err error
}{
{
name: "ok: no schedules no backups",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{},
}, nil
},
err: nil,
},
{
name: "ok: 2 schedules 2 backups with the same storages",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
},
}, nil
},
err: nil,
},
{
name: "ok: 3 schedules",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"}]}}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{},
}, nil
},
err: nil,
},
{
name: "ok: 3 backups with different storages",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}},
},
}, nil
},
err: nil,
},
{
name: "ok: 5 backups with repeating storages",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
},
}, nil
},
err: nil,
},
{
name: "error: 4 backups with different storages",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs4"}},
},
}, nil
},
err: errTooManyPGStorages,
},
{
name: "ok: 4 backups with same storages",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs2"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs1"}},
},
}, nil
},
err: nil,
},
{
name: "error: 4 schedules",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"},{"backupStorageName":"bs3"},{"backupStorageName":"bs4"}]}}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{},
}, nil
},
err: errTooManyPGSchedules,
},
{
name: "error: 2 schedules 2 backups with different storages",
cluster: []byte(`{"metaData":{"name":"some","namespace":"ns"},"spec":{"backup":{"schedules":[{"backupStorageName":"bs1"},{"backupStorageName":"bs2"}]}}}`),
getBackupsFunc: func(ctx context.Context, ns string, options metav1.ListOptions) (*everestv1alpha1.DatabaseClusterBackupList, error) {
return &everestv1alpha1.DatabaseClusterBackupList{
Items: []everestv1alpha1.DatabaseClusterBackup{
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs3"}},
{Spec: everestv1alpha1.DatabaseClusterBackupSpec{BackupStorageName: "bs4"}},
},
}, nil
},
err: errTooManyPGStorages,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
db := &DatabaseCluster{}
err := json.Unmarshal(tc.cluster, db)
require.NoError(t, err)
err = validatePGReposForAPIDB(context.Background(), db, tc.getBackupsFunc)
if tc.err == nil {
require.NoError(t, err)
return
}
require.Error(t, err)
assert.Equal(t, tc.err.Error(), err.Error())
})
}
}
Loading