diff --git a/.golangci.yml b/.golangci.yml index 7b3c9ba6..00da60a2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,8 +9,6 @@ linters-settings: - $all - "!$test" deny: - - pkg: errors - desc: use "github.com/pkg/errors" instead - pkg: github.com/gogo/protobuf/proto desc: use "github.com/golang/protobuf/proto" instead @@ -33,6 +31,7 @@ linters-settings: linters: enable-all: true disable: + - goerr113 # not useful after migration to the standard errors - exhaustruct # not useful - exhaustivestruct # annoying and duplicates exhaustruct - godox # fails to be nolint-ed when necessary diff --git a/api/database_cluster.go b/api/database_cluster.go index 4798e76d..89eb04a9 100644 --- a/api/database_cluster.go +++ b/api/database_cluster.go @@ -38,7 +38,7 @@ func (e *EverestServer) CreateDatabaseCluster(ctx echo.Context, kubernetesID str }) } - if err := validateCreateDatabaseClusterRequest(*dbc); err != nil { + if err := e.validateDatabaseClusterCR(ctx, kubernetesID, dbc); err != nil { return ctx.JSON(http.StatusBadRequest, Error{Message: pointer.ToString(err.Error())}) } @@ -125,7 +125,7 @@ func (e *EverestServer) GetDatabaseCluster(ctx echo.Context, kubernetesID string } // UpdateDatabaseCluster replaces the specified database cluster on the specified kubernetes cluster. -func (e *EverestServer) UpdateDatabaseCluster(ctx echo.Context, kubernetesID string, name string) error { //nolint:funlen +func (e *EverestServer) UpdateDatabaseCluster(ctx echo.Context, kubernetesID string, name string) error { //nolint:funlen,cyclop dbc := &DatabaseCluster{} if err := e.getBodyFromContext(ctx, dbc); err != nil { e.l.Error(err) @@ -134,6 +134,10 @@ func (e *EverestServer) UpdateDatabaseCluster(ctx echo.Context, kubernetesID str }) } + if err := e.validateDatabaseClusterCR(ctx, kubernetesID, dbc); err != nil { + return ctx.JSON(http.StatusBadRequest, Error{Message: pointer.ToString(err.Error())}) + } + _, kubeClient, code, err := e.initKubeClient(ctx.Request().Context(), kubernetesID) if err != nil { return ctx.JSON(code, Error{Message: pointer.ToString(err.Error())}) diff --git a/api/validation.go b/api/validation.go index 3c1f07a5..d381ec83 100644 --- a/api/validation.go +++ b/api/validation.go @@ -17,6 +17,7 @@ package api import ( + "errors" "fmt" "net/http" "net/url" @@ -28,45 +29,73 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/labstack/echo/v4" - "github.com/pkg/errors" + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" "go.uber.org/zap" k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" "github.com/percona/percona-everest-backend/cmd/config" "github.com/percona/percona-everest-backend/model" ) +const ( + pxcDeploymentName = "percona-xtradb-cluster-operator" + psmdbDeploymentName = "percona-server-mongodb-operator" + pgDeploymentName = "percona-postgresql-operator" +) + var ( - errDBCEmptyMetadata = errors.New("DatabaseCluster's Metadata should not be empty") - errDBCNameEmpty = errors.New("DatabaseCluster's metadata.name should not be empty") - errDBCNameWrongFormat = errors.New("DatabaseCluster's metadata.name should be a string") + minStorageQuantity = resource.MustParse("1G") //nolint:gochecknoglobals + minCPUQuantity = resource.MustParse("600m") //nolint:gochecknoglobals + minMemQuantity = resource.MustParse("512M") //nolint:gochecknoglobals + + errDBCEmptyMetadata = errors.New("DatabaseCluster's Metadata should not be empty") + errDBCNameEmpty = errors.New("DatabaseCluster's metadata.name should not be empty") + errDBCNameWrongFormat = errors.New("DatabaseCluster's metadata.name should be a string") + errNotEnoughMemory = fmt.Errorf("Memory limits should be above %s", minMemQuantity.String()) //nolint:stylecheck + errInt64NotSupported = errors.New("Specifying resources using int64 data type is not supported. Please use string format for that") //nolint:stylecheck + errNotEnoughCPU = fmt.Errorf("CPU limits should be above %s", minCPUQuantity.String()) //nolint:stylecheck + errNotEnoughDiskSize = fmt.Errorf("Storage size should be above %s", minStorageQuantity.String()) //nolint:stylecheck + errUnsupportedPXCProxy = errors.New("You can use either HAProxy or Proxy SQL for PXC clusters") //nolint:stylecheck + errUnsupportedPGProxy = errors.New("You can use only PGBouncer as a proxy type for Postgres clusters") //nolint:stylecheck + errUnsupportedPSMDBProxy = errors.New("You can use only Mongos as a proxy type for MongoDB clusters") //nolint:stylecheck + errNoSchedules = errors.New("Please specify at least one backup schedule") //nolint:stylecheck + errNoNameInSchedule = errors.New("'name' field for the backup schedules cannot be empty") + errNoBackupStorageName = errors.New("'backupStorageName' field cannot be empty when schedule is enabled") + errNoResourceDefined = errors.New("Please specify resource limits for the cluster") //nolint:stylecheck + //nolint:gochecknoglobals + operatorEngine = map[everestv1alpha1.EngineType]string{ + everestv1alpha1.DatabaseEnginePXC: pxcDeploymentName, + everestv1alpha1.DatabaseEnginePSMDB: psmdbDeploymentName, + everestv1alpha1.DatabaseEnginePostgresql: pgDeploymentName, + } ) // ErrNameNotRFC1035Compatible when the given fieldName doesn't contain RFC 1035 compatible string. func ErrNameNotRFC1035Compatible(fieldName string) error { - return errors.Errorf(`'%s' is not RFC 1035 compatible. The name should contain only lowercase alphanumeric characters or '-', start with an alphabetic character, end with an alphanumeric character`, + return fmt.Errorf(`'%s' is not RFC 1035 compatible. The name should contain only lowercase alphanumeric characters or '-', start with an alphabetic character, end with an alphanumeric character`, fieldName, ) } // ErrNameTooLong when the given fieldName is longer than expected. func ErrNameTooLong(fieldName string) error { - return errors.Errorf("'%s' can be at most 22 characters long", fieldName) + return fmt.Errorf("'%s' can be at most 22 characters long", fieldName) } // ErrCreateStorageNotSupported appears when trying to create a storage of a type that is not supported. func ErrCreateStorageNotSupported(storageType string) error { - return errors.Errorf("Creating storage is not implemented for '%s'", storageType) + return fmt.Errorf("Creating storage is not implemented for '%s'", storageType) //nolint:stylecheck } // ErrUpdateStorageNotSupported appears when trying to update a storage of a type that is not supported. func ErrUpdateStorageNotSupported(storageType string) error { - return errors.Errorf("Updating storage is not implemented for '%s'", storageType) + return fmt.Errorf("Updating storage is not implemented for '%s'", storageType) //nolint:stylecheck } // ErrInvalidURL when the given fieldName contains invalid URL. func ErrInvalidURL(fieldName string) error { - return errors.Errorf("'%s' is an invalid URL", fieldName) + return fmt.Errorf("'%s' is an invalid URL", fieldName) } // validates names to be RFC-1035 compatible https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names @@ -208,7 +237,7 @@ func validateCreateBackupStorageRequest(ctx echo.Context, l *zap.SugaredLogger) // check data access if err := validateStorageAccessByCreate(params); err != nil { l.Error(err) - return nil, errors.New("Could not connect to the backup storage, please check the new credentials are correct") + return nil, errors.New("Could not connect to the backup storage, please check the new credentials are correct") //nolint:stylecheck } return ¶ms, nil @@ -231,14 +260,14 @@ func validateCreateMonitoringInstanceRequest(ctx echo.Context) (*CreateMonitorin switch params.Type { case MonitoringInstanceCreateParamsTypePmm: if params.Pmm == nil { - return nil, errors.Errorf("pmm key is required for type %s", params.Type) + return nil, fmt.Errorf("pmm key is required for type %s", params.Type) } if params.Pmm.ApiKey == "" && params.Pmm.User == "" && params.Pmm.Password == "" { return nil, errors.New("one of pmm.apiKey, pmm.user or pmm.password fields is required") } default: - return nil, errors.Errorf("monitoring type %s is not supported", params.Type) + return nil, fmt.Errorf("monitoring type %s is not supported", params.Type) } return ¶ms, nil @@ -274,7 +303,7 @@ func validateUpdateMonitoringInstanceType(params UpdateMonitoringInstanceJSONReq return nil case MonitoringInstanceUpdateParamsTypePmm: if params.Pmm == nil { - return errors.Errorf("pmm key is required for type %s", params.Type) + return fmt.Errorf("pmm key is required for type %s", params.Type) } default: return errors.New("this monitoring type is not supported") @@ -319,3 +348,180 @@ func (e *EverestServer) validateDBClusterAccess(ctx echo.Context, kubernetesID, return nil } + +func (e *EverestServer) validateDatabaseClusterCR(ctx echo.Context, kubernetesID string, databaseCluster *DatabaseCluster) error { + if err := validateCreateDatabaseClusterRequest(*databaseCluster); err != nil { + return err + } + + _, kubeClient, _, err := e.initKubeClient(ctx.Request().Context(), kubernetesID) + if err != nil { + return err + } + engineName, ok := operatorEngine[everestv1alpha1.EngineType(databaseCluster.Spec.Engine.Type)] + if !ok { + return errors.New("Unsupported database engine") //nolint:stylecheck + } + engine, err := kubeClient.GetDatabaseEngine(ctx.Request().Context(), engineName) + if err != nil { + return err + } + if err := validateVersion(databaseCluster.Spec.Engine.Version, engine); err != nil { + return err + } + if databaseCluster.Spec.Proxy != nil && databaseCluster.Spec.Proxy.Type != nil { + if err := validateProxy(databaseCluster.Spec.Engine.Type, string(*databaseCluster.Spec.Proxy.Type)); err != nil { + return err + } + } + if err := validateBackupSpec(databaseCluster); err != nil { + return err + } + return validateResourceLimits(databaseCluster) +} + +func validateVersion(version *string, engine *everestv1alpha1.DatabaseEngine) error { + if version != nil { + if len(engine.Spec.AllowedVersions) != 0 { + if !containsVersion(*version, engine.Spec.AllowedVersions) { + return fmt.Errorf("Using %s version for %s is not allowed", *version, engine.Spec.Type) //nolint:stylecheck + } + return nil + } + if _, ok := engine.Status.AvailableVersions.Engine[*version]; !ok { + return fmt.Errorf("%s is not in available versions list", *version) + } + } + return nil +} + +func containsVersion(version string, versions []string) bool { + if version == "" { + return true + } + for _, allowedVersion := range versions { + if version == allowedVersion { + return true + } + } + return false +} + +func validateProxy(engineType, proxyType string) error { + if engineType == string(everestv1alpha1.DatabaseEnginePXC) { + if proxyType != string(everestv1alpha1.ProxyTypeProxySQL) && proxyType != string(everestv1alpha1.ProxyTypeHAProxy) { + return errUnsupportedPXCProxy + } + } + + if engineType == string(everestv1alpha1.DatabaseEnginePostgresql) && proxyType != string(everestv1alpha1.ProxyTypePGBouncer) { + return errUnsupportedPGProxy + } + if engineType == string(everestv1alpha1.DatabaseEnginePSMDB) && proxyType != string(everestv1alpha1.ProxyTypeMongos) { + return errUnsupportedPSMDBProxy + } + return nil +} + +func validateBackupSpec(cluster *DatabaseCluster) error { + if cluster.Spec.Backup == nil { + return nil + } + if !cluster.Spec.Backup.Enabled { + return nil + } + if cluster.Spec.Backup.Schedules == nil { + return errNoSchedules + } + + for _, schedule := range *cluster.Spec.Backup.Schedules { + if schedule.Name == "" { + return errNoNameInSchedule + } + if schedule.Enabled && schedule.BackupStorageName == "" { + return errNoBackupStorageName + } + } + return nil +} + +func validateResourceLimits(cluster *DatabaseCluster) error { + if err := ensureNonEmptyResources(cluster); err != nil { + return err + } + if err := validateCPU(cluster); err != nil { + return err + } + if err := validateMemory(cluster); err != nil { + return err + } + return validateStorageSize(cluster) +} + +func ensureNonEmptyResources(cluster *DatabaseCluster) error { + if cluster.Spec.Engine.Resources == nil { + return errNoResourceDefined + } + if cluster.Spec.Engine.Resources.Cpu == nil { + return errNotEnoughCPU + } + if cluster.Spec.Engine.Resources.Memory == nil { + return errNotEnoughMemory + } + return nil +} + +func validateCPU(cluster *DatabaseCluster) error { + cpuStr, err := cluster.Spec.Engine.Resources.Cpu.AsDatabaseClusterSpecEngineResourcesCpu1() + if err == nil { + cpu, err := resource.ParseQuantity(cpuStr) + if err != nil { + return err + } + if cpu.Cmp(minCPUQuantity) == -1 { + return errNotEnoughCPU + } + } + _, err = cluster.Spec.Engine.Resources.Cpu.AsDatabaseClusterSpecEngineResourcesCpu0() + if err == nil { + return errInt64NotSupported + } + return nil +} + +func validateMemory(cluster *DatabaseCluster) error { + _, err := cluster.Spec.Engine.Resources.Memory.AsDatabaseClusterSpecEngineResourcesMemory0() + if err == nil { + return errInt64NotSupported + } + memStr, err := cluster.Spec.Engine.Resources.Memory.AsDatabaseClusterSpecEngineResourcesMemory1() + if err == nil { + mem, err := resource.ParseQuantity(memStr) + if err != nil { + return err + } + if mem.Cmp(minMemQuantity) == -1 { + return errNotEnoughMemory + } + } + return nil +} + +func validateStorageSize(cluster *DatabaseCluster) error { + _, err := cluster.Spec.Engine.Storage.Size.AsDatabaseClusterSpecEngineStorageSize0() + if err == nil { + return errInt64NotSupported + } + sizeStr, err := cluster.Spec.Engine.Storage.Size.AsDatabaseClusterSpecEngineStorageSize1() + + if err == nil { + size, err := resource.ParseQuantity(sizeStr) + if err != nil { + return err + } + if size.Cmp(minStorageQuantity) == -1 { + return errNotEnoughDiskSize + } + } + return nil +} diff --git a/api/validation_test.go b/api/validation_test.go index 388fe713..fd9b255a 100644 --- a/api/validation_test.go +++ b/api/validation_test.go @@ -15,8 +15,13 @@ package api import ( + "encoding/json" "testing" + "github.com/AlekSi/pointer" + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -166,3 +171,345 @@ func TestValidateCreateDatabaseClusterRequest(t *testing.T) { }) } } + +func TestValidateProxy(t *testing.T) { + t.Parallel() + cases := []struct { + name string + engineType string + proxyType string + err error + }{ + { + name: "PXC with mongos", + engineType: "pxc", + proxyType: "mongos", + err: errUnsupportedPXCProxy, + }, + { + name: "PXC with pgbouncer", + engineType: "pxc", + proxyType: "pgbouncer", + err: errUnsupportedPXCProxy, + }, + { + name: "PXC with haproxy", + engineType: "pxc", + proxyType: "haproxy", + err: nil, + }, + { + name: "PXC with proxysql", + engineType: "pxc", + proxyType: "proxysql", + err: nil, + }, + { + name: "psmdb with mongos", + engineType: "psmdb", + proxyType: "mongos", + err: nil, + }, + { + name: "psmdb with pgbouncer", + engineType: "psmdb", + proxyType: "pgbouncer", + err: errUnsupportedPSMDBProxy, + }, + { + name: "psmdb with haproxy", + engineType: "psmdb", + proxyType: "haproxy", + err: errUnsupportedPSMDBProxy, + }, + { + name: "psmdb with proxysql", + engineType: "psmdb", + proxyType: "proxysql", + err: errUnsupportedPSMDBProxy, + }, + { + name: "postgresql with mongos", + engineType: "postgresql", + proxyType: "mongos", + err: errUnsupportedPGProxy, + }, + { + name: "postgresql with pgbouncer", + engineType: "postgresql", + proxyType: "pgbouncer", + err: nil, + }, + { + name: "postgresql with haproxy", + engineType: "postgresql", + proxyType: "haproxy", + err: errUnsupportedPGProxy, + }, + { + name: "postgresql with proxysql", + engineType: "postgresql", + proxyType: "proxysql", + err: errUnsupportedPGProxy, + }, + } + for _, tc := range cases { + c := tc + t.Run(c.name, func(t *testing.T) { + t.Parallel() + err := validateProxy(c.engineType, c.proxyType) + if c.err == nil { + require.Nil(t, err) + return + } + assert.Equal(t, c.err.Error(), err.Error()) + }) + } +} + +func TestContainsVersion(t *testing.T) { + t.Parallel() + cases := []struct { + version string + versions []string + result bool + }{ + { + version: "1", + versions: []string{}, + result: false, + }, + { + version: "1", + versions: []string{"1", "2"}, + result: true, + }, + { + version: "1", + versions: []string{"1"}, + result: true, + }, + { + version: "1", + versions: []string{"12", "23"}, + result: false, + }, + } + for _, tc := range cases { + tc := tc + t.Run(tc.version, func(t *testing.T) { + t.Parallel() + res := containsVersion(tc.version, tc.versions) + assert.Equal(t, res, tc.result) + }) + } +} + +func TestValidateVersion(t *testing.T) { + t.Parallel() + cases := []struct { + name string + version *string + engine *everestv1alpha1.DatabaseEngine + err error + }{ + { + name: "empty version is allowed", + version: nil, + engine: nil, + err: nil, + }, + { + name: "shall exist in availableVersions", + version: pointer.ToString("8.0.32"), + engine: &everestv1alpha1.DatabaseEngine{ + Status: everestv1alpha1.DatabaseEngineStatus{ + AvailableVersions: everestv1alpha1.Versions{ + Engine: everestv1alpha1.ComponentsMap{ + "8.0.32": &everestv1alpha1.Component{}, + }, + }, + }, + }, + err: nil, + }, + { + name: "shall not exist in availableVersions", + version: pointer.ToString("8.0.32"), + engine: &everestv1alpha1.DatabaseEngine{ + Status: everestv1alpha1.DatabaseEngineStatus{ + AvailableVersions: everestv1alpha1.Versions{ + Engine: everestv1alpha1.ComponentsMap{ + "8.0.31": &everestv1alpha1.Component{}, + }, + }, + }, + }, + err: errors.New("8.0.32 is not in available versions list"), + }, + { + name: "shall exist in allowedVersions", + version: pointer.ToString("8.0.32"), + engine: &everestv1alpha1.DatabaseEngine{ + Spec: everestv1alpha1.DatabaseEngineSpec{ + Type: "pxc", + AllowedVersions: []string{"8.0.32"}, + }, + }, + err: nil, + }, + { + name: "shall not exist in allowedVersions", + version: pointer.ToString("8.0.32"), + engine: &everestv1alpha1.DatabaseEngine{ + Spec: everestv1alpha1.DatabaseEngineSpec{ + Type: "pxc", + AllowedVersions: []string{"8.0.31"}, + }, + }, + err: errors.New("Using 8.0.32 version for pxc is not allowed"), + }, + } + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := validateVersion(tc.version, tc.engine) + if tc.err == nil { + require.Nil(t, err) + return + } + assert.Equal(t, err.Error(), tc.err.Error()) + }) + } +} + +func TestValidateBackupSpec(t *testing.T) { + t.Parallel() + cases := []struct { + name string + cluster []byte + err error + }{ + { + name: "empty backup is allowed", + cluster: []byte(`{"spec": {"backup": null}}`), + err: nil, + }, + { + name: "disabled backup is allowed", + cluster: []byte(`{"spec": {"backup": {"enabled": false}}}`), + err: nil, + }, + { + name: "errNoSchedules", + cluster: []byte(`{"spec": {"backup": {"enabled": true}}}`), + err: errNoSchedules, + }, + { + name: "errNoNameInSchedule", + cluster: []byte(`{"spec": {"backup": {"enabled": true, "schedules": [{"enabled": true}]}}}`), + err: errNoNameInSchedule, + }, + { + name: "errNoBackupStorageName", + cluster: []byte(`{"spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "name"}]}}}`), + err: errNoBackupStorageName, + }, + { + name: "valid spec", + cluster: []byte(`{"spec": {"backup": {"enabled": true, "schedules": [{"enabled": true, "name": "name", "backupStorageName": "some"}]}}}`), + err: nil, + }, + } + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + cluster := &DatabaseCluster{} + err := json.Unmarshal(tc.cluster, cluster) + require.NoError(t, err) + err = validateBackupSpec(cluster) + if tc.err == nil { + require.Nil(t, err) + return + } + assert.Equal(t, err.Error(), tc.err.Error()) + }) + } +} + +func TestValidateResourceLimits(t *testing.T) { + t.Parallel() + cases := []struct { + name string + cluster []byte + err error + }{ + { + name: "success", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "600m", "memory":"1G"}, "storage": {"size": "2G"}}}}`), + err: nil, + }, + { + name: "errNoResourceDefined", + cluster: []byte(`{"spec": {"engine": {"resources":null, "storage": {"size": "2G"}}}}`), + err: errNoResourceDefined, + }, + { + name: "Not enough CPU", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": null, "memory":"1G"}, "storage": {"size": "2G"}}}}`), + err: errNotEnoughCPU, + }, + { + name: "Not enough memory", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "600m", "memory":null}, "storage": {"size": "2G"}}}}`), + err: errNotEnoughMemory, + }, + { + name: "No int64 for CPU", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": 6000, "memory": "1G"}, "storage": {"size": "2G"}}}}`), + err: errInt64NotSupported, + }, + { + name: "No int64 for Memory", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "600m", "memory": 1000000}, "storage": {"size": "2G"}}}}`), + err: errInt64NotSupported, + }, + { + name: "No int64 for storage", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "600m", "memory": "1G"}, "storage": {"size": 20000}}}}`), + err: errInt64NotSupported, + }, + { + name: "not enough disk size", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "600m", "memory": "1G"}, "storage": {"size": "512M"}}}}`), + err: errNotEnoughDiskSize, + }, + { + name: "not enough CPU", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "200m", "memory": "1G"}, "storage": {"size": "2G"}}}}`), + err: errNotEnoughCPU, + }, + { + name: "not enough Mem", + cluster: []byte(`{"spec": {"engine": {"resources": {"cpu": "600m", "memory": "400M"}, "storage": {"size": "2G"}}}}`), + err: errNotEnoughMemory, + }, + } + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + cluster := &DatabaseCluster{} + err := json.Unmarshal(tc.cluster, cluster) + require.NoError(t, err) + err = validateResourceLimits(cluster) + if tc.err == nil { + require.Nil(t, err) + return + } + assert.Equal(t, err.Error(), tc.err.Error()) + }) + } +} diff --git a/pkg/kubernetes/client/customresources/databaseclusters.go b/pkg/kubernetes/client/customresources/databaseclusters.go index 9db7e9c0..2ddac12d 100644 --- a/pkg/kubernetes/client/customresources/databaseclusters.go +++ b/pkg/kubernetes/client/customresources/databaseclusters.go @@ -1,3 +1,4 @@ +// Package customresources ... // percona-everest-backend // Copyright (C) 2023 Percona LLC // @@ -12,7 +13,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - +// +//nolint:dupl package customresources import ( diff --git a/pkg/kubernetes/client/customresources/databaseengines.go b/pkg/kubernetes/client/customresources/databaseengines.go new file mode 100644 index 00000000..77be0dd3 --- /dev/null +++ b/pkg/kubernetes/client/customresources/databaseengines.go @@ -0,0 +1,97 @@ +// Package customresources ... +// percona-everest-backend +// Copyright (C) 2023 Percona LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//nolint:dupl +package customresources + +import ( + "context" + + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +const ( + dbEnginesAPIKind = "databaseengines" +) + +// DBEngines returns a db engine. +func (c *Client) DBEngines(namespace string) DBEngineInterface { //nolint:ireturn + return &dbEngineClient{ + restClient: c.restClient, + namespace: namespace, + } +} + +type dbEngineClient struct { + restClient rest.Interface + namespace string +} + +// DBEngineInterface supports list, get and watch methods. +type DBEngineInterface interface { + List(ctx context.Context, opts metav1.ListOptions) (*everestv1alpha1.DatabaseEngineList, error) + Get(ctx context.Context, name string, options metav1.GetOptions) (*everestv1alpha1.DatabaseEngine, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) +} + +// List lists database clusters based on opts. +func (c *dbEngineClient) List(ctx context.Context, opts metav1.ListOptions) (*everestv1alpha1.DatabaseEngineList, error) { + result := &everestv1alpha1.DatabaseEngineList{} + err := c.restClient. + Get(). + Namespace(c.namespace). + Resource(dbEnginesAPIKind). + VersionedParams(&opts, scheme.ParameterCodec). + Do(ctx). + Into(result) + return result, err +} + +// Get retrieves database cluster based on opts. +func (c *dbEngineClient) Get( + ctx context.Context, + name string, + opts metav1.GetOptions, +) (*everestv1alpha1.DatabaseEngine, error) { + result := &everestv1alpha1.DatabaseEngine{} + err := c.restClient. + Get(). + Namespace(c.namespace). + Resource(dbEnginesAPIKind). + VersionedParams(&opts, scheme.ParameterCodec). + Name(name). + Do(ctx). + Into(result) + return result, err +} + +// Watch starts a watch based on opts. +func (c *dbEngineClient) Watch( //nolint:ireturn + ctx context.Context, + opts metav1.ListOptions, +) (watch.Interface, error) { + opts.Watch = true + return c.restClient. + Get(). + Namespace(c.namespace). + Resource(dbEnginesAPIKind). + VersionedParams(&opts, scheme.ParameterCodec). + Watch(ctx) +} diff --git a/pkg/kubernetes/client/database_engine.go b/pkg/kubernetes/client/database_engine.go new file mode 100644 index 00000000..452598df --- /dev/null +++ b/pkg/kubernetes/client/database_engine.go @@ -0,0 +1,18 @@ +package client + +import ( + "context" + + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ListDatabaseEngines returns list of managed database clusters. +func (c *Client) ListDatabaseEngines(ctx context.Context) (*everestv1alpha1.DatabaseEngineList, error) { + return c.customClientSet.DBEngines(c.namespace).List(ctx, metav1.ListOptions{}) +} + +// GetDatabaseEngine returns database clusters by provided name. +func (c *Client) GetDatabaseEngine(ctx context.Context, name string) (*everestv1alpha1.DatabaseEngine, error) { + return c.customClientSet.DBEngines(c.namespace).Get(ctx, name, metav1.GetOptions{}) +} diff --git a/pkg/kubernetes/client/gen.go b/pkg/kubernetes/client/gen.go index b9082a79..4eeedab8 100644 --- a/pkg/kubernetes/client/gen.go +++ b/pkg/kubernetes/client/gen.go @@ -15,5 +15,5 @@ package client -//go:generate ../../../bin/ifacemaker -f backup_storage.go -f client.go -f database_cluster.go -f monitoring_config.go -f namespace.go -f node.go -f pod.go -f resource.go -f secret.go -f storage.go -s Client -i KubeClientConnector -p client -o kubeclient_interface.go +//go:generate ../../../bin/ifacemaker -f backup_storage.go -f client.go -f database_cluster.go -f database_engine.go -f monitoring_config.go -f namespace.go -f node.go -f pod.go -f resource.go -f secret.go -f storage.go -s Client -i KubeClientConnector -p client -o kubeclient_interface.go //go:generate ../../../bin/mockery --name=KubeClientConnector --case=snake --inpackage diff --git a/pkg/kubernetes/client/kubeclient_interface.go b/pkg/kubernetes/client/kubeclient_interface.go index f915d960..6b6d0399 100644 --- a/pkg/kubernetes/client/kubeclient_interface.go +++ b/pkg/kubernetes/client/kubeclient_interface.go @@ -41,6 +41,10 @@ type KubeClientConnector interface { ListDatabaseClusters(ctx context.Context) (*everestv1alpha1.DatabaseClusterList, error) // GetDatabaseCluster returns database clusters by provided name. GetDatabaseCluster(ctx context.Context, name string) (*everestv1alpha1.DatabaseCluster, error) + // ListDatabaseEngines returns list of managed database clusters. + ListDatabaseEngines(ctx context.Context) (*everestv1alpha1.DatabaseEngineList, error) + // GetDatabaseEngine returns database clusters by provided name. + GetDatabaseEngine(ctx context.Context, name string) (*everestv1alpha1.DatabaseEngine, error) // CreateMonitoringConfig creates an MonitoringConfig. CreateMonitoringConfig(ctx context.Context, mc *everestv1alpha1.MonitoringConfig) error // GetMonitoringConfig returns the MonitoringConfig. diff --git a/pkg/kubernetes/client/mock_kube_client_connector.go b/pkg/kubernetes/client/mock_kube_client_connector.go index a94914bd..cefd0896 100644 --- a/pkg/kubernetes/client/mock_kube_client_connector.go +++ b/pkg/kubernetes/client/mock_kube_client_connector.go @@ -238,6 +238,32 @@ func (_m *MockKubeClientConnector) GetDatabaseCluster(ctx context.Context, name return r0, r1 } +// GetDatabaseEngine provides a mock function with given fields: ctx, name +func (_m *MockKubeClientConnector) GetDatabaseEngine(ctx context.Context, name string) (*v1alpha1.DatabaseEngine, error) { + ret := _m.Called(ctx, name) + + var r0 *v1alpha1.DatabaseEngine + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*v1alpha1.DatabaseEngine, error)); ok { + return rf(ctx, name) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *v1alpha1.DatabaseEngine); ok { + r0 = rf(ctx, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1alpha1.DatabaseEngine) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetMonitoringConfig provides a mock function with given fields: ctx, name func (_m *MockKubeClientConnector) GetMonitoringConfig(ctx context.Context, name string) (*v1alpha1.MonitoringConfig, error) { ret := _m.Called(ctx, name) @@ -500,6 +526,32 @@ func (_m *MockKubeClientConnector) ListDatabaseClusters(ctx context.Context) (*v return r0, r1 } +// ListDatabaseEngines provides a mock function with given fields: ctx +func (_m *MockKubeClientConnector) ListDatabaseEngines(ctx context.Context) (*v1alpha1.DatabaseEngineList, error) { + ret := _m.Called(ctx) + + var r0 *v1alpha1.DatabaseEngineList + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*v1alpha1.DatabaseEngineList, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *v1alpha1.DatabaseEngineList); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1alpha1.DatabaseEngineList) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ListMonitoringConfigs provides a mock function with given fields: ctx func (_m *MockKubeClientConnector) ListMonitoringConfigs(ctx context.Context) (*v1alpha1.MonitoringConfigList, error) { ret := _m.Called(ctx) diff --git a/pkg/kubernetes/database_engine.go b/pkg/kubernetes/database_engine.go new file mode 100644 index 00000000..b14ee9be --- /dev/null +++ b/pkg/kubernetes/database_engine.go @@ -0,0 +1,33 @@ +// percona-everest-backend +// Copyright (C) 2023 Percona LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package kubernetes ... +package kubernetes + +import ( + "context" + + everestv1alpha1 "github.com/percona/everest-operator/api/v1alpha1" +) + +// ListDatabaseEngines returns list of managed database clusters. +func (k *Kubernetes) ListDatabaseEngines(ctx context.Context) (*everestv1alpha1.DatabaseEngineList, error) { + return k.client.ListDatabaseEngines(ctx) +} + +// GetDatabaseEngine returns database clusters by provided name. +func (k *Kubernetes) GetDatabaseEngine(ctx context.Context, name string) (*everestv1alpha1.DatabaseEngine, error) { + return k.client.GetDatabaseEngine(ctx, name) +}