Skip to content
This repository has been archived by the owner on Mar 4, 2024. It is now read-only.

Commit

Permalink
EVEREST-520 Update PITR API (#386)
Browse files Browse the repository at this point in the history
Co-authored-by: Diogo Recharte <[email protected]>
  • Loading branch information
oksana-grishchenko and recharte authored Jan 18, 2024
1 parent 68b701e commit 74fd87c
Show file tree
Hide file tree
Showing 7 changed files with 436 additions and 295 deletions.
267 changes: 135 additions & 132 deletions api/everest-server.gen.go

Large diffs are not rendered by default.

58 changes: 48 additions & 10 deletions api/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,9 @@ var (
errPitrNoBackupStorageName = errors.New("'backupStorageName' field cannot be empty when pitr is enabled")
errNoResourceDefined = errors.New("please specify resource limits for the cluster")
errPitrUploadInterval = errors.New("'uploadIntervalSec' should be more than 0")
errPitrS3Only = errors.New("point-in-time recovery only supported for s3 compatible storages")
errPXCPitrS3Only = errors.New("point-in-time recovery only supported for s3 compatible storages")
errPSMDBMultipleStorages = errors.New("can't use more than one backup storage for PSMDB clusters")
errPSMDBViolateActiveStorage = errors.New("can't change the active storage for PSMDB clusters")
//nolint:gochecknoglobals
operatorEngine = map[everestv1alpha1.EngineType]string{
everestv1alpha1.DatabaseEnginePXC: pxcDeploymentName,
Expand Down Expand Up @@ -449,34 +451,63 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, databaseClus
return err
}

if err = e.validateBackupStoragesFor(ctx.Request().Context(), databaseCluster); err != nil {
if err = validateBackupStoragesFor(ctx.Request().Context(), databaseCluster, e.validateBackupStoragesAccess); err != nil {
return err
}

return validateResourceLimits(databaseCluster)
}

func (e *EverestServer) validateBackupStoragesFor(ctx context.Context, databaseCluster *DatabaseCluster) error {
func validateBackupStoragesFor( //nolint:cyclop
ctx context.Context,
databaseCluster *DatabaseCluster,
validateBackupStorageAccessFunc func(context.Context, string) (*everestv1alpha1.BackupStorage, error),
) error {
if databaseCluster.Spec.Backup == nil {
return nil
}
storages := make(map[string]bool)
if databaseCluster.Spec.Backup.Schedules != nil {
for _, schedule := range *databaseCluster.Spec.Backup.Schedules {
_, err := e.validateBackupStoragesAccess(ctx, schedule.BackupStorageName)
_, err := validateBackupStorageAccessFunc(ctx, schedule.BackupStorageName)
if err != nil {
return err
}
storages[schedule.BackupStorageName] = true
}
}

if databaseCluster.Spec.Backup.Pitr != nil && databaseCluster.Spec.Backup.Pitr.Enabled {
storage, err := e.validateBackupStoragesAccess(ctx, databaseCluster.Spec.Backup.Pitr.BackupStorageName)
if databaseCluster.Spec.Engine.Type == DatabaseClusterSpecEngineType(everestv1alpha1.DatabaseEnginePSMDB) {
// attempt to configure more than one storage for psmdb
if len(storages) > 1 {
return errPSMDBMultipleStorages
}
// attempt to use a storage other than the active one
if databaseCluster.Status != nil {
activeStorage := databaseCluster.Status.ActiveStorage
for name := range storages {
if activeStorage != nil && name != *activeStorage {
return errPSMDBViolateActiveStorage
}
}
}
}

if databaseCluster.Spec.Backup.Pitr == nil || !databaseCluster.Spec.Backup.Pitr.Enabled {
return nil
}

if databaseCluster.Spec.Engine.Type == DatabaseClusterSpecEngineType(everestv1alpha1.DatabaseEnginePXC) {
if databaseCluster.Spec.Backup.Pitr.BackupStorageName == nil || *databaseCluster.Spec.Backup.Pitr.BackupStorageName == "" {
return errPitrNoBackupStorageName
}
storage, err := validateBackupStorageAccessFunc(ctx, *databaseCluster.Spec.Backup.Pitr.BackupStorageName)
if err != nil {
return err
}
// pxc only supports s3 for pitr
if databaseCluster.Spec.Engine.Type == DatabaseClusterSpecEngineType(everestv1alpha1.DatabaseEnginePXC) && storage.Spec.Type != everestv1alpha1.BackupStorageTypeS3 {
return errPitrS3Only
if storage.Spec.Type != everestv1alpha1.BackupStorageTypeS3 {
return errPXCPitrS3Only
}
}

Expand Down Expand Up @@ -568,7 +599,8 @@ func validatePitrSpec(cluster *DatabaseCluster) error {
return nil
}

if cluster.Spec.Backup.Pitr.BackupStorageName == "" {
if cluster.Spec.Engine.Type == DatabaseClusterSpecEngineType(everestv1alpha1.DatabaseEnginePXC) &&
(cluster.Spec.Backup.Pitr.BackupStorageName == nil || *cluster.Spec.Backup.Pitr.BackupStorageName == "") {
return errPitrNoBackupStorageName
}

Expand Down Expand Up @@ -702,7 +734,7 @@ func validateDatabaseClusterBackup(ctx context.Context, backup *DatabaseClusterB
if b.Spec.DBClusterName == "" {
return errors.New(".spec.dbClusterName cannot be empty")
}
_, err = kubeClient.GetDatabaseCluster(ctx, b.Spec.DBClusterName)
db, err := kubeClient.GetDatabaseCluster(ctx, b.Spec.DBClusterName)
if err != nil {
if k8serrors.IsNotFound(err) {
return fmt.Errorf("database cluster %s does not exist", b.Spec.DBClusterName)
Expand All @@ -716,6 +748,12 @@ func validateDatabaseClusterBackup(ctx context.Context, backup *DatabaseClusterB
}
return err
}

if db.Spec.Engine.Type == everestv1alpha1.DatabaseEnginePSMDB {
if db.Status.ActiveStorage != b.Spec.BackupStorageName {
return errPSMDBViolateActiveStorage
}
}
return nil
}

Expand Down
85 changes: 81 additions & 4 deletions api/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package api

import (
"context"
"encoding/json"
"errors"
"testing"
Expand Down Expand Up @@ -384,7 +385,7 @@ func TestValidateVersion(t *testing.T) {
}
}

func TestValidateBackupSpec(t *testing.T) { //nolint:dupl
func TestValidateBackupSpec(t *testing.T) {
t.Parallel()
cases := []struct {
name string
Expand Down Expand Up @@ -439,7 +440,73 @@ func TestValidateBackupSpec(t *testing.T) { //nolint:dupl
}
}

func TestValidatePitrSpec(t *testing.T) { //nolint:dupl
func TestValidateBackupStoragesFor(t *testing.T) {
t.Parallel()
cases := []struct {
name string
cluster []byte
storage []byte
err error
}{
{
name: "errPSMDBMultipleStorages",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "name", "backupStorageName": "storage1"}, {"enabled": true, "name": "name2", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPSMDBMultipleStorages,
},
{
name: "errPSMDBViolateActiveStorage",
cluster: []byte(`{"status": {"activeStorage": "storage1"}, "spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPSMDBViolateActiveStorage,
},
{
name: "errPXCPitrS3Only",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "storage"}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "azure"}}`),
err: errPXCPitrS3Only,
},
{
name: "errPitrNoBackupStorageName",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPitrNoBackupStorageName,
},
{
name: "valid",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "storage2"}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: nil,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
cluster := &DatabaseCluster{}
err := json.Unmarshal(tc.cluster, cluster)
require.NoError(t, err)

storage := &everestv1alpha1.BackupStorage{}
err = json.Unmarshal(tc.storage, storage)
require.NoError(t, err)

err = validateBackupStoragesFor(
context.Background(),
cluster,
func(ctx context.Context, s string) (*everestv1alpha1.BackupStorage, error) { return storage, nil },
)
if tc.err == nil {
require.NoError(t, err)
return
}
require.Error(t, err)
assert.Equal(t, err.Error(), tc.err.Error())
})
}
}

func TestValidatePitrSpec(t *testing.T) {
t.Parallel()

cases := []struct {
Expand All @@ -463,10 +530,20 @@ func TestValidatePitrSpec(t *testing.T) { //nolint:dupl
err: nil,
},
{
name: "no backup storage",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}}}}`),
name: "no backup storage pxc",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}}, "engine": {"type": "pxc"}}}`),
err: errPitrNoBackupStorageName,
},
{
name: "no backup storage psmdb",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}}, "engine": {"type": "psmdb"}}}`),
err: nil,
},
{
name: "no backup storage pg",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}}, "engine": {"type": "postgresql"}}}`),
err: nil,
},
{
name: "zero upload interval",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "name", "uploadIntervalSec": 0}}}}`),
Expand Down
Loading

0 comments on commit 74fd87c

Please sign in to comment.