Skip to content
This repository has been archived by the owner on Mar 4, 2024. It is now read-only.

Commit

Permalink
EVEREST-832 Fix access to namespaces (#444)
Browse files Browse the repository at this point in the history
Co-authored-by: Oksana Grishchenko <[email protected]>
  • Loading branch information
recharte and oksana-grishchenko authored Feb 12, 2024
1 parent 3fac690 commit d762746
Show file tree
Hide file tree
Showing 3 changed files with 78 additions and 51 deletions.
2 changes: 1 addition & 1 deletion api/database_cluster_backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (e *EverestServer) CreateDatabaseClusterBackup(ctx echo.Context, namespace
})
}
// TODO: Improve returns status code in EVEREST-616
if err := validateDatabaseClusterBackup(ctx.Request().Context(), namespace, dbb, e.kubeClient); err != nil {
if err := e.validateDatabaseClusterBackup(ctx.Request().Context(), namespace, dbb); err != nil {
e.l.Error(err)
return ctx.JSON(http.StatusBadRequest, Error{Message: pointer.ToString(err.Error())})
}
Expand Down
61 changes: 40 additions & 21 deletions api/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -515,11 +515,8 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, namespace st
return err
}
if databaseCluster.Spec != nil && databaseCluster.Spec.Monitoring != nil && databaseCluster.Spec.Monitoring.MonitoringConfigName != nil {
if _, err := e.kubeClient.GetMonitoringConfig(context.Background(), MonitoringNamespace, *databaseCluster.Spec.Monitoring.MonitoringConfigName); err != nil {
if k8serrors.IsNotFound(err) {
return fmt.Errorf("monitoring config %s does not exist", *databaseCluster.Spec.Monitoring.MonitoringConfigName)
}
return fmt.Errorf("failed getting monitoring config %s", *databaseCluster.Spec.Monitoring.MonitoringConfigName)
if _, err := e.validateMonitoringConfigAccess(context.Background(), namespace, *databaseCluster.Spec.Monitoring.MonitoringConfigName); err != nil {
return err
}
}
if databaseCluster.Spec.Proxy != nil && databaseCluster.Spec.Proxy.Type != nil {
Expand All @@ -531,7 +528,7 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, namespace st
return err
}

if err = validateBackupStoragesFor(ctx.Request().Context(), databaseCluster, e.validateBackupStoragesAccess); err != nil {
if err = validateBackupStoragesFor(ctx.Request().Context(), namespace, databaseCluster, e.validateBackupStoragesAccess); err != nil {
return err
}

Expand All @@ -552,16 +549,17 @@ func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, namespace st

func validateBackupStoragesFor( //nolint:cyclop
ctx context.Context,
namespace string,
databaseCluster *DatabaseCluster,
validateBackupStorageAccessFunc func(context.Context, string) (*everestv1alpha1.BackupStorage, error),
validateBackupStorageAccessFunc func(context.Context, string, string) (*everestv1alpha1.BackupStorage, error),
) error {
if databaseCluster.Spec.Backup == nil {
return nil
}
storages := make(map[string]bool)
if databaseCluster.Spec.Backup.Schedules != nil {
for _, schedule := range *databaseCluster.Spec.Backup.Schedules {
_, err := validateBackupStorageAccessFunc(ctx, schedule.BackupStorageName)
_, err := validateBackupStorageAccessFunc(ctx, namespace, schedule.BackupStorageName)
if err != nil {
return err
}
Expand Down Expand Up @@ -593,7 +591,7 @@ func validateBackupStoragesFor( //nolint:cyclop
if databaseCluster.Spec.Backup.Pitr.BackupStorageName == nil || *databaseCluster.Spec.Backup.Pitr.BackupStorageName == "" {
return errPitrNoBackupStorageName
}
storage, err := validateBackupStorageAccessFunc(ctx, *databaseCluster.Spec.Backup.Pitr.BackupStorageName)
storage, err := validateBackupStorageAccessFunc(ctx, namespace, *databaseCluster.Spec.Backup.Pitr.BackupStorageName)
if err != nil {
return err
}
Expand All @@ -606,15 +604,38 @@ func validateBackupStoragesFor( //nolint:cyclop
return nil
}

func (e *EverestServer) validateBackupStoragesAccess(ctx context.Context, name string) (*everestv1alpha1.BackupStorage, error) {
func (e *EverestServer) validateBackupStoragesAccess(ctx context.Context, namespace, name string) (*everestv1alpha1.BackupStorage, error) {
bs, err := e.kubeClient.GetBackupStorage(ctx, name)
if err == nil {
return bs, nil
}
if k8serrors.IsNotFound(err) {
return nil, fmt.Errorf("backup storage %s does not exist", name)
}
return nil, fmt.Errorf("could not validate backup storage %s", name)
if err != nil {
return nil, fmt.Errorf("could not validate backup storage %s", name)
}

for _, ns := range bs.Spec.TargetNamespaces {
if ns == namespace {
return bs, nil
}
}
return nil, fmt.Errorf("backup storage %s is not allowed for namespace %s", name, namespace)
}

func (e *EverestServer) validateMonitoringConfigAccess(ctx context.Context, namespace, name string) (*everestv1alpha1.MonitoringConfig, error) {
mc, err := e.kubeClient.GetMonitoringConfig(ctx, MonitoringNamespace, name)
if err != nil {
if k8serrors.IsNotFound(err) {
return nil, fmt.Errorf("monitoring config %s does not exist", name)
}
return nil, fmt.Errorf("failed getting monitoring config %s", name)
}

for _, ns := range mc.Spec.TargetNamespaces {
if ns == namespace {
return mc, nil
}
}
return nil, fmt.Errorf("monitoring config %s is not allowed for namespace %s", name, namespace)
}

func validateVersion(version *string, engine *everestv1alpha1.DatabaseEngine) error {
Expand Down Expand Up @@ -863,7 +884,7 @@ func validateDatabaseClusterOnUpdate(dbc *DatabaseCluster, oldDB *everestv1alpha
return nil
}

func validateDatabaseClusterBackup(ctx context.Context, namespace string, backup *DatabaseClusterBackup, kubeClient *kubernetes.Kubernetes) error {
func (e *EverestServer) validateDatabaseClusterBackup(ctx context.Context, namespace string, backup *DatabaseClusterBackup) error {
if backup == nil {
return errors.New("backup cannot be empty")
}
Expand All @@ -884,22 +905,20 @@ func validateDatabaseClusterBackup(ctx context.Context, namespace string, backup
if b.Spec.DBClusterName == "" {
return errors.New(".spec.dbClusterName cannot be empty")
}
db, err := kubeClient.GetDatabaseCluster(ctx, namespace, b.Spec.DBClusterName)
db, err := e.kubeClient.GetDatabaseCluster(ctx, namespace, b.Spec.DBClusterName)
if err != nil {
if k8serrors.IsNotFound(err) {
return fmt.Errorf("database cluster %s does not exist", b.Spec.DBClusterName)
}
return err
}
_, err = kubeClient.GetBackupStorage(ctx, b.Spec.BackupStorageName)

_, err = e.validateBackupStoragesAccess(ctx, namespace, b.Spec.BackupStorageName)
if err != nil {
if k8serrors.IsNotFound(err) {
return fmt.Errorf("backup storage %s does not exist", b.Spec.BackupStorageName)
}
return err
}

if err = validatePGReposForBackup(ctx, *db, kubeClient, *b); err != nil {
if err = validatePGReposForBackup(ctx, *db, e.kubeClient, *b); err != nil {
return err
}

Expand Down
66 changes: 37 additions & 29 deletions api/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -450,46 +450,53 @@ func TestValidateBackupSpec(t *testing.T) {
func TestValidateBackupStoragesFor(t *testing.T) {
t.Parallel()
cases := []struct {
name string
cluster []byte
storage []byte
err error
name string
namespace string
cluster []byte
storage []byte
err error
}{
{
name: "errPSMDBMultipleStorages",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "name", "backupStorageName": "storage1"}, {"enabled": true, "name": "name2", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPSMDBMultipleStorages,
name: "errPSMDBMultipleStorages",
namespace: "everest",
cluster: []byte(`{"spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "name", "backupStorageName": "storage1"}, {"enabled": true, "name": "name2", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPSMDBMultipleStorages,
},
{
name: "errPSMDBViolateActiveStorage",
cluster: []byte(`{"status": {"activeStorage": "storage1"}, "spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPSMDBViolateActiveStorage,
name: "errPSMDBViolateActiveStorage",
namespace: "everest",
cluster: []byte(`{"status": {"activeStorage": "storage1"}, "spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPSMDBViolateActiveStorage,
},
{
name: "no errPSMDBViolateActiveStorage",
cluster: []byte(`{"status": {"activeStorage": ""}, "spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: nil,
name: "no errPSMDBViolateActiveStorage",
namespace: "everest",
cluster: []byte(`{"status": {"activeStorage": ""}, "spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage2"}]}, "engine": {"type": "psmdb"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: nil,
},
{
name: "errPXCPitrS3Only",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "storage"}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "azure"}}`),
err: errPXCPitrS3Only,
name: "errPXCPitrS3Only",
namespace: "everest",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "storage"}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "azure"}}`),
err: errPXCPitrS3Only,
},
{
name: "errPitrNoBackupStorageName",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPitrNoBackupStorageName,
name: "errPitrNoBackupStorageName",
namespace: "everest",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: errPitrNoBackupStorageName,
},
{
name: "valid",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "storage2"}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: nil,
name: "valid",
namespace: "everest",
cluster: []byte(`{"status":{},"spec": {"backup": {"enabled": true, "pitr": {"enabled": true, "backupStorageName": "storage2"}, "schedules": [{"enabled": true, "name": "otherName", "backupStorageName": "storage"}]}, "engine": {"type": "pxc"}}}`),
storage: []byte(`{"spec": {"type": "s3"}}`),
err: nil,
},
}
for _, tc := range cases {
Expand All @@ -506,8 +513,9 @@ func TestValidateBackupStoragesFor(t *testing.T) {

err = validateBackupStoragesFor(
context.Background(),
tc.namespace,
cluster,
func(ctx context.Context, s string) (*everestv1alpha1.BackupStorage, error) { return storage, nil },
func(context.Context, string, string) (*everestv1alpha1.BackupStorage, error) { return storage, nil },
)
if tc.err == nil {
require.NoError(t, err)
Expand Down

0 comments on commit d762746

Please sign in to comment.