diff --git a/Makefile b/Makefile index 132ef14bc..ec6a87219 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# # /bin/sh does not support source command needed in make test +# # /bin/sh does not support source command needed in make all-test #SHELL := /bin/bash ROOT_DIR=$(shell git rev-parse --show-toplevel) @@ -113,6 +113,9 @@ help: ## Display this help. manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases cp $(ROOT_DIR)/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml $(ROOT_DIR)/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml + cp $(ROOT_DIR)/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml $(ROOT_DIR)/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml + cp $(ROOT_DIR)/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml $(ROOT_DIR)/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml + cp $(ROOT_DIR)/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml $(ROOT_DIR)/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. @@ -137,10 +140,27 @@ $(GOLANGCI_LINT): $(LOCALBIN) go-lint: golanci-lint ## Run golangci-lint against code. $(GOLANGCI_LINT) run -.PHONY: test -test: manifests generate fmt vet envtest ## Run tests. +.PHONY: all-test +all-test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out -r --keep-going -show-node-events -v -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} + +.PHONY: cluster-test +cluster-test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out --keep-separate-coverprofiles -v . ./cluster -show-node-events -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} + + +.PHONY: backup-service-test +backup-service-test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out --keep-separate-coverprofiles -v . ./backup_service -show-node-events -timeout=1h0m0s --junit-report="junit.xml" -- ${ARGS} + +.PHONY: backup-test +backup-test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out --keep-separate-coverprofiles -v . ./backup -show-node-events -timeout=1h0m0s --junit-report="junit.xml" -- ${ARGS} + +.PHONY: restore-test +restore-test: manifests generate fmt vet envtest ## Run tests. # KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out -show-node-events -v -timeout=12h0m0s -focus=${FOCUS} --junit-report="junit.xml" -- ${ARGS} + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out --keep-separate-coverprofiles -v . ./restore -show-node-events -timeout=1h0m0s --junit-report="junit.xml" -- ${ARGS} ##@ Build diff --git a/PROJECT b/PROJECT index 4f6c25950..c81d8f89e 100644 --- a/PROJECT +++ b/PROJECT @@ -28,4 +28,43 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: aerospike.com + group: asdb + kind: AerospikeBackup + path: github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: false + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: aerospike.com + group: asdb + kind: AerospikeRestore + path: github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: aerospike.com + group: asdb + kind: AerospikeBackupService + path: github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1 + version: v1beta1 + webhooks: + defaulting: false + validation: true + webhookVersion: v1 version: "3" diff --git a/api/v1beta1/aerospikebackup_types.go b/api/v1beta1/aerospikebackup_types.go new file mode 100644 index 000000000..a54928a93 --- /dev/null +++ b/api/v1beta1/aerospikebackup_types.go @@ -0,0 +1,110 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// AerospikeBackupSpec defines the desired state of AerospikeBackup for a given AerospikeCluster +// +k8s:openapi-gen=true +type AerospikeBackupSpec struct { + // BackupService is the backup service reference i.e. name and namespace. + // It is used to communicate to the backup service to trigger backups. This field is immutable + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Backup Service" + BackupService BackupService `json:"backupService"` + + // Config is the free form configuration for the backup in YAML format. + // This config is used to trigger backups. It includes: aerospike-cluster, backup-routines. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Backup Config" + Config runtime.RawExtension `json:"config"` + + // OnDemandBackups is the configuration for on-demand backups. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="On Demand Backups" + // +kubebuilder:validation:MaxItems:=1 + OnDemandBackups []OnDemandBackupSpec `json:"onDemandBackups,omitempty"` +} + +type BackupService struct { + // Backup service name + Name string `json:"name"` + + // Backup service namespace + Namespace string `json:"namespace"` +} + +func (b *BackupService) String() string { + return fmt.Sprintf("%s/%s", b.Namespace, b.Name) +} + +type OnDemandBackupSpec struct { + // ID is the unique identifier for the on-demand backup. + // +kubebuilder:validation:MinLength=1 + ID string `json:"id"` + + // RoutineName is the routine name used to trigger on-demand backup. + RoutineName string `json:"routineName"` + + // Delay is the interval before starting the on-demand backup. + Delay metav1.Duration `json:"delay,omitempty"` +} + +// AerospikeBackupStatus defines the observed state of AerospikeBackup +type AerospikeBackupStatus struct { + // BackupService is the backup service reference i.e. name and namespace. + BackupService BackupService `json:"backupService"` + + // Config is the configuration for the backup in YAML format. + // This config is used to trigger backups. It includes: aerospike-cluster, backup-routines. + Config runtime.RawExtension `json:"config"` + + // OnDemandBackups is the configuration for on-demand backups. + OnDemandBackups []OnDemandBackupSpec `json:"onDemandBackups,omitempty"` + + // TODO: finalize the status and phase +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` +// +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// AerospikeBackup is the Schema for the aerospikebackup API +type AerospikeBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AerospikeBackupSpec `json:"spec,omitempty"` + Status AerospikeBackupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AerospikeBackupList contains a list of AerospikeBackup +type AerospikeBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AerospikeBackup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AerospikeBackup{}, &AerospikeBackupList{}) +} diff --git a/api/v1beta1/aerospikebackup_webhook.go b/api/v1beta1/aerospikebackup_webhook.go new file mode 100644 index 000000000..d0774f5b9 --- /dev/null +++ b/api/v1beta1/aerospikebackup_webhook.go @@ -0,0 +1,382 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilRuntime "k8s.io/apimachinery/pkg/util/runtime" + clientGoScheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/yaml" + + "github.com/abhishekdwivedi3060/aerospike-backup-service/pkg/model" + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +func (r *AerospikeBackup) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// Implemented Defaulter interface for future reference +var _ webhook.Defaulter = &AerospikeBackup{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *AerospikeBackup) Default() { + abLog := logf.Log.WithName(namespacedName(r)) + + abLog.Info("Setting defaults for aerospikeBackup") +} + +//nolint:lll // for readability +//+kubebuilder:webhook:path=/validate-asdb-aerospike-com-v1beta1-aerospikebackup,mutating=false,failurePolicy=fail,sideEffects=None,groups=asdb.aerospike.com,resources=aerospikebackups,verbs=create;update,versions=v1beta1,name=vaerospikebackup.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AerospikeBackup{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeBackup) ValidateCreate() (admission.Warnings, error) { + abLog := logf.Log.WithName(namespacedName(r)) + + abLog.Info("Validate create") + + if len(r.Spec.OnDemandBackups) != 0 { + return nil, fmt.Errorf("onDemand backups config cannot be specified while creating backup") + } + + if err := r.validateBackupConfig(); err != nil { + return nil, err + } + + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeBackup) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + abLog := logf.Log.WithName(namespacedName(r)) + + abLog.Info("Validate update") + + oldObj := old.(*AerospikeBackup) + + if !reflect.DeepEqual(r.Spec.BackupService, oldObj.Spec.BackupService) { + return nil, fmt.Errorf("backup service cannot be updated") + } + + if err := r.validateBackupConfig(); err != nil { + return nil, err + } + + if err := r.validateAerospikeClusterUpdate(oldObj); err != nil { + return nil, err + } + + if err := r.validateOnDemandBackupsUpdate(oldObj); err != nil { + return nil, err + } + + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeBackup) ValidateDelete() (admission.Warnings, error) { + abLog := logf.Log.WithName(namespacedName(r)) + + abLog.Info("Validate delete") + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +func (r *AerospikeBackup) validateBackupConfig() error { + backupConfig := make(map[string]interface{}) + + if err := yaml.Unmarshal(r.Spec.Config.Raw, &backupConfig); err != nil { + return err + } + + if _, ok := backupConfig[common.ServiceKey]; ok { + return fmt.Errorf("service field cannot be specified in backup config") + } + + if _, ok := backupConfig[common.BackupPoliciesKey]; ok { + return fmt.Errorf("backup-policies field cannot be specified in backup config") + } + + if _, ok := backupConfig[common.StorageKey]; ok { + return fmt.Errorf("storage field cannot be specified in backup config") + } + + if _, ok := backupConfig[common.SecretAgentsKey]; ok { + return fmt.Errorf("secret-agent field cannot be specified in backup config") + } + + var backupSvc AerospikeBackupService + + cl, gErr := getK8sClient() + if gErr != nil { + return gErr + } + + if err := cl.Get(context.TODO(), + types.NamespacedName{Name: r.Spec.BackupService.Name, Namespace: r.Spec.BackupService.Namespace}, + &backupSvc); err != nil { + return err + } + + var backupSvcConfig model.Config + + if err := yaml.UnmarshalStrict(backupSvc.Spec.Config.Raw, &backupSvcConfig); err != nil { + return err + } + + aeroClusters, err := r.getValidatedAerospikeClusters(backupConfig) + if err != nil { + return err + } + + backupRoutines, err := r.getValidatedBackupRoutines(backupConfig, aeroClusters) + if err != nil { + return err + } + + err = updateValidateBackupSvcConfig(aeroClusters, backupRoutines, &backupSvcConfig) + if err != nil { + return err + } + + // Validate on-demand backup + if len(r.Spec.OnDemandBackups) > 0 { + if _, ok := backupSvcConfig.BackupRoutines[r.Spec.OnDemandBackups[0].RoutineName]; !ok { + return fmt.Errorf("invalid onDemand config, backup routine %s not found", + r.Spec.OnDemandBackups[0].RoutineName) + } + } + + return nil +} + +func getK8sClient() (client.Client, error) { + restConfig := ctrl.GetConfigOrDie() + + scheme := runtime.NewScheme() + + utilRuntime.Must(asdbv1.AddToScheme(scheme)) + utilRuntime.Must(clientGoScheme.AddToScheme(scheme)) + utilRuntime.Must(AddToScheme(scheme)) + + cl, err := client.New(restConfig, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, err + } + + return cl, nil +} + +func (r *AerospikeBackup) getValidatedAerospikeClusters(backupConfig map[string]interface{}, +) (map[string]*model.AerospikeCluster, error) { + if _, ok := backupConfig[common.AerospikeClusterKey]; !ok { + return nil, fmt.Errorf("aerospike-cluster field is required field in backup config") + } + + cluster, ok := backupConfig[common.AerospikeClusterKey].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("aerospike-cluster field is not in the right format") + } + + clusterBytes, cErr := yaml.Marshal(cluster) + if cErr != nil { + return nil, cErr + } + + aeroClusters := make(map[string]*model.AerospikeCluster) + + if err := yaml.UnmarshalStrict(clusterBytes, &aeroClusters); err != nil { + return nil, err + } + + if len(aeroClusters) != 1 { + return aeroClusters, fmt.Errorf("only one aerospike cluster is allowed in backup config") + } + + for clusterName := range aeroClusters { + if err := validateName(r.NamePrefix(), clusterName); err != nil { + return nil, fmt.Errorf("invalid cluster name %s, %s", clusterName, err.Error()) + } + } + + return aeroClusters, nil +} + +func (r *AerospikeBackup) validateOnDemandBackupsUpdate(oldObj *AerospikeBackup) error { + // Check if backup config is updated along with onDemand backup add/update + if !reflect.DeepEqual(r.Spec.OnDemandBackups, r.Status.OnDemandBackups) && + !reflect.DeepEqual(r.Spec.Config.Raw, r.Status.Config.Raw) { + return fmt.Errorf("can not add/update onDemand backup along with backup config change") + } + + if len(r.Spec.OnDemandBackups) > 0 && len(oldObj.Spec.OnDemandBackups) > 0 { + // Check if onDemand backup spec is updated + if r.Spec.OnDemandBackups[0].ID == oldObj.Spec.OnDemandBackups[0].ID && + !reflect.DeepEqual(r.Spec.OnDemandBackups[0], oldObj.Spec.OnDemandBackups[0]) { + return fmt.Errorf("existing onDemand backup cannot be updated. " + + "However, It can be removed and a new onDemand backup can be added") + } + + // Check if previous onDemand backup is completed before allowing new onDemand backup + if r.Spec.OnDemandBackups[0].ID != oldObj.Spec.OnDemandBackups[0].ID && (len(r.Status.OnDemandBackups) == 0 || + r.Status.OnDemandBackups[0].ID != oldObj.Spec.OnDemandBackups[0].ID) { + return fmt.Errorf("can not add new onDemand backup when previous onDemand backup is not completed") + } + } + + return nil +} + +func (r *AerospikeBackup) validateAerospikeClusterUpdate(oldObj *AerospikeBackup) error { + oldObjConfig := make(map[string]interface{}) + currentConfig := make(map[string]interface{}) + + if err := yaml.Unmarshal(oldObj.Spec.Config.Raw, &oldObjConfig); err != nil { + return err + } + + if err := yaml.Unmarshal(r.Spec.Config.Raw, ¤tConfig); err != nil { + return err + } + + oldCluster := oldObjConfig[common.AerospikeClusterKey].(map[string]interface{}) + newCluster := currentConfig[common.AerospikeClusterKey].(map[string]interface{}) + + for clusterName := range newCluster { + if _, ok := oldCluster[clusterName]; !ok { + return fmt.Errorf("aerospike-cluster name cannot be updated") + } + } + + return nil +} + +func (r *AerospikeBackup) getValidatedBackupRoutines( + backupConfig map[string]interface{}, + aeroClusters map[string]*model.AerospikeCluster, +) (map[string]*model.BackupRoutine, error) { + if _, ok := backupConfig[common.BackupRoutinesKey]; !ok { + return nil, fmt.Errorf("backup-routines field is required in backup config") + } + + routines, ok := backupConfig[common.BackupRoutinesKey].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("backup-routines field is not in the right format") + } + + routineBytes, rErr := yaml.Marshal(routines) + if rErr != nil { + return nil, rErr + } + + backupRoutines := make(map[string]*model.BackupRoutine) + + if err := yaml.UnmarshalStrict(routineBytes, &backupRoutines); err != nil { + return nil, err + } + + if len(backupRoutines) == 0 { + return nil, fmt.Errorf("backup-routines field cannot be empty") + } + + // validate: + // 1. if the correct format name is given + // 2. if only correct aerospike cluster (the one referred in Backup CR) is used in backup routines + for routineName, routine := range backupRoutines { + if err := validateName(r.NamePrefix(), routineName); err != nil { + return nil, fmt.Errorf("invalid backup routine name %s, %s", routineName, err.Error()) + } + + if _, ok := aeroClusters[routine.SourceCluster]; !ok { + return nil, fmt.Errorf("cluster %s not found in backup aerospike-cluster config", routine.SourceCluster) + } + } + + return backupRoutines, nil +} + +func updateValidateBackupSvcConfig( + clusters map[string]*model.AerospikeCluster, + routines map[string]*model.BackupRoutine, + backupSvcConfig *model.Config, +) error { + if len(backupSvcConfig.AerospikeClusters) == 0 { + backupSvcConfig.AerospikeClusters = make(map[string]*model.AerospikeCluster) + } + + for name, cluster := range clusters { + backupSvcConfig.AerospikeClusters[name] = cluster + } + + if len(backupSvcConfig.BackupRoutines) == 0 { + backupSvcConfig.BackupRoutines = make(map[string]*model.BackupRoutine) + } + + for name, routine := range routines { + backupSvcConfig.BackupRoutines[name] = routine + } + + // Add empty placeholders for missing backupSvcConfig sections. This is required for validation to work. + if backupSvcConfig.ServiceConfig == nil { + backupSvcConfig.ServiceConfig = &model.BackupServiceConfig{} + } + + if backupSvcConfig.ServiceConfig.HTTPServer == nil { + backupSvcConfig.ServiceConfig.HTTPServer = &model.HTTPServerConfig{} + } + + if backupSvcConfig.ServiceConfig.Logger == nil { + backupSvcConfig.ServiceConfig.Logger = &model.LoggerConfig{} + } + + return backupSvcConfig.Validate() +} + +func (r *AerospikeBackup) NamePrefix() string { + return r.Namespace + "-" + r.Name +} + +func validateName(reqPrefix, name string) error { + if name == "" { + return fmt.Errorf("name cannot be empty") + } + + if !strings.HasPrefix(name, reqPrefix) { + return fmt.Errorf("name should start with %s", reqPrefix) + } + + return nil +} diff --git a/api/v1beta1/aerospikebackupservice_types.go b/api/v1beta1/aerospikebackupservice_types.go new file mode 100644 index 000000000..bec63bee8 --- /dev/null +++ b/api/v1beta1/aerospikebackupservice_types.go @@ -0,0 +1,147 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:validation:Enum=InProgress;Completed;Error +type AerospikeBackupServicePhase string + +// These are the valid phases of Aerospike Backup Service reconcile flow. +const ( + // AerospikeBackupServiceInProgress means the AerospikeBackupService CR is being reconciled and operations are + // in-progress state. This phase denotes that AerospikeBackupService resources are gradually getting deployed. + AerospikeBackupServiceInProgress AerospikeBackupServicePhase = "InProgress" + // AerospikeBackupServiceCompleted means the AerospikeBackupService CR has been reconciled. + // This phase denotes that the AerospikeBackupService resources have been deployed/upgraded successfully and is + // ready to use. + AerospikeBackupServiceCompleted AerospikeBackupServicePhase = "Completed" + // AerospikeBackupServiceError means the AerospikeBackupService operation is in error state because of some reason + // like incorrect backup service config, incorrect image, etc. + AerospikeBackupServiceError AerospikeBackupServicePhase = "Error" +) + +// AerospikeBackupServiceSpec defines the desired state of AerospikeBackupService +// +//nolint:govet // for readability +type AerospikeBackupServiceSpec struct { + // Image is the image for the backup service. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Backup Service Image" + Image string `json:"image"` + + // Config is the free form configuration for the backup service in YAML format. + // This config is used to start the backup service. The config is passed as a file to the backup service. + // It includes: service, backup-policies, storage, secret-agent. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Backup Service Config" + Config runtime.RawExtension `json:"config"` + + // Resources defines the requests and limits for the backup service container. + // Resources.Limits should be more than Resources.Requests. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Resources" + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // SecretMounts is the list of secret to be mounted in the backup service. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Backup Service SecretMounts" + SecretMounts []SecretMount `json:"secrets,omitempty"` + + // Service defines the Kubernetes service configuration for the backup service. + // It is used to expose the backup service deployment. By default, the service type is ClusterIP. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="K8s Service" + Service *Service `json:"service,omitempty"` +} + +// AerospikeBackupServiceStatus defines the observed state of AerospikeBackupService +// +//nolint:govet // for readbility +type AerospikeBackupServiceStatus struct { + // Image is the image for the backup service. + Image string `json:"image,omitempty"` + + // Config is the free form configuration for the backup service in YAML format. + // This config is used to start the backup service. The config is passed as a file to the backup service. + // It includes: service, backup-policies, storage, secret-agent. + Config runtime.RawExtension `json:"config,omitempty"` + + // Resources defines the requests and limits for the backup service container. + // Resources.Limits should be more than Resources.Requests. + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // SecretMounts is the list of secret to be mounted in the backup service. + SecretMounts []SecretMount `json:"secrets,omitempty"` + + // Service defines the Kubernetes service configuration for the backup service. + // It is used to expose the backup service deployment. By default, the service type is ClusterIP. + Service *Service `json:"service,omitempty"` + + // ContextPath is the backup service API context path + ContextPath string `json:"contextPath,omitempty"` + + // Phase denotes Backup service phase + Phase AerospikeBackupServicePhase `json:"phase"` + + // Port is the listening port of backup service + Port int32 `json:"port,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image` +// +kubebuilder:printcolumn:name="Service Type",type=string,JSONPath=`.spec.service.type` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// AerospikeBackupService is the Schema for the aerospikebackupservices API +type AerospikeBackupService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AerospikeBackupServiceSpec `json:"spec,omitempty"` + Status AerospikeBackupServiceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AerospikeBackupServiceList contains a list of AerospikeBackupService +type AerospikeBackupServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AerospikeBackupService `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AerospikeBackupService{}, &AerospikeBackupServiceList{}) +} + +// SecretMount specifies the secret and its corresponding volume mount options. +type SecretMount struct { + // SecretName is the name of the secret to be mounted. + SecretName string `json:"secretName"` + + // VolumeMount is the volume mount options for the secret. + VolumeMount corev1.VolumeMount `json:"volumeMount"` +} + +// Service specifies the Kubernetes service related configuration. +type Service struct { + // Type is the Kubernetes service type. + Type corev1.ServiceType `json:"type"` +} diff --git a/api/v1beta1/aerospikebackupservice_webhook.go b/api/v1beta1/aerospikebackupservice_webhook.go new file mode 100644 index 000000000..ba81310d3 --- /dev/null +++ b/api/v1beta1/aerospikebackupservice_webhook.go @@ -0,0 +1,118 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/yaml" + + "github.com/abhishekdwivedi3060/aerospike-backup-service/pkg/model" +) + +func (r *AerospikeBackupService) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// Implemented Defaulter interface for future reference +var _ webhook.Defaulter = &AerospikeBackupService{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *AerospikeBackupService) Default() { + absLog := logf.Log.WithName(namespacedName(r)) + + absLog.Info("Setting defaults for aerospikeBackupService") +} + +//nolint:lll // for readability +//+kubebuilder:webhook:path=/validate-asdb-aerospike-com-v1beta1-aerospikebackupservice,mutating=false,failurePolicy=fail,sideEffects=None,groups=asdb.aerospike.com,resources=aerospikebackupservices,verbs=create;update,versions=v1beta1,name=vaerospikebackupservice.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AerospikeBackupService{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeBackupService) ValidateCreate() (admission.Warnings, error) { + absLog := logf.Log.WithName(namespacedName(r)) + + absLog.Info("Validate create") + + if err := r.validateBackupServiceConfig(); err != nil { + return nil, err + } + + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeBackupService) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { + absLog := logf.Log.WithName(namespacedName(r)) + + absLog.Info("Validate update") + + if err := r.validateBackupServiceConfig(); err != nil { + return nil, err + } + + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeBackupService) ValidateDelete() (admission.Warnings, error) { + absLog := logf.Log.WithName(namespacedName(r)) + + absLog.Info("Validate delete") + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +func (r *AerospikeBackupService) validateBackupServiceConfig() error { + var config model.Config + + if err := yaml.UnmarshalStrict(r.Spec.Config.Raw, &config); err != nil { + return err + } + + if len(config.BackupRoutines) != 0 { + return fmt.Errorf("backup-routines field cannot be specified in backup service config") + } + + if len(config.AerospikeClusters) != 0 { + return fmt.Errorf("aerospike-clusters field cannot be specified in backup service config") + } + + // Add empty placeholders for missing config sections. This is required for validation to work. + if config.ServiceConfig == nil { + config.ServiceConfig = &model.BackupServiceConfig{} + } + + if config.ServiceConfig.HTTPServer == nil { + config.ServiceConfig.HTTPServer = &model.HTTPServerConfig{} + } + + if config.ServiceConfig.Logger == nil { + config.ServiceConfig.Logger = &model.LoggerConfig{} + } + + return config.Validate() +} diff --git a/api/v1beta1/aerospikerestore_types.go b/api/v1beta1/aerospikerestore_types.go new file mode 100644 index 000000000..fe64b8402 --- /dev/null +++ b/api/v1beta1/aerospikerestore_types.go @@ -0,0 +1,114 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +kubebuilder:validation:Enum=InProgress;Completed;Failed +type AerospikeRestorePhase string + +// These are the valid phases of Aerospike restore operation. +const ( + // AerospikeRestoreInProgress means the AerospikeRestore CR is being reconciled and restore operation is going on. + AerospikeRestoreInProgress AerospikeRestorePhase = "InProgress" + // AerospikeRestoreCompleted means the AerospikeRestore CR has been reconciled and restore operation is completed. + AerospikeRestoreCompleted AerospikeRestorePhase = "Completed" + // AerospikeRestoreFailed means the AerospikeRestore CR has been reconciled and restore operation is failed. + AerospikeRestoreFailed AerospikeRestorePhase = "Failed" +) + +type RestoreType string + +const ( + Full RestoreType = "Full" + Incremental RestoreType = "Incremental" + Timestamp RestoreType = "Timestamp" +) + +// AerospikeRestoreSpec defines the desired state of AerospikeRestore +// +k8s:openapi-gen=true +// +//nolint:govet // for readability +type AerospikeRestoreSpec struct { + // BackupService is the backup service reference i.e. name and namespace. + // It is used to communicate to the backup service to trigger restores. This field is immutable + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Backup Service" + BackupService BackupService `json:"backupService"` + + // Type is the type of restore. It can of type Full, Incremental, and Timestamp. + // Based on the restore type, the relevant restore config should be given. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Restore Type" + // +kubebuilder:validation:Enum=Full;Incremental;Timestamp + Type RestoreType `json:"type"` + + // Config is the free form configuration for the restore in YAML format. + // This config is used to trigger restores. It includes: destination, policy, source, secret-agent, time and routine. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Restore Config" + Config runtime.RawExtension `json:"config"` + + // PollingPeriod is the polling period for restore operation status. + // It is used to poll the restore service to fetch restore operation status. + // Default is 60 seconds. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Restore Service Polling Period" + PollingPeriod metav1.Duration `json:"pollingPeriod,omitempty"` +} + +// AerospikeRestoreStatus defines the observed state of AerospikeRestore +type AerospikeRestoreStatus struct { + // JobID is the restore operation job id. + JobID *int64 `json:"job-id,omitempty"` + + // RestoreResult is the result of the restore operation. + RestoreResult runtime.RawExtension `json:"restoreResult,omitempty"` + + // Phase denotes the current phase of Aerospike restore operation. + Phase AerospikeRestorePhase `json:"phase"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` +// +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// AerospikeRestore is the Schema for the aerospikerestores API +// +//nolint:govet // auto-generated +type AerospikeRestore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AerospikeRestoreSpec `json:"spec,omitempty"` + Status AerospikeRestoreStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AerospikeRestoreList contains a list of AerospikeRestore +type AerospikeRestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AerospikeRestore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AerospikeRestore{}, &AerospikeRestoreList{}) +} diff --git a/api/v1beta1/aerospikerestore_webhook.go b/api/v1beta1/aerospikerestore_webhook.go new file mode 100644 index 000000000..0c8f4bf22 --- /dev/null +++ b/api/v1beta1/aerospikerestore_webhook.go @@ -0,0 +1,144 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/yaml" + + "github.com/abhishekdwivedi3060/aerospike-backup-service/pkg/model" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +const defaultPollingPeriod time.Duration = 60 * time.Second + +func (r *AerospikeRestore) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//nolint:lll // for readability +//+kubebuilder:webhook:path=/mutate-asdb-aerospike-com-v1beta1-aerospikerestore,mutating=true,failurePolicy=fail,sideEffects=None,groups=asdb.aerospike.com,resources=aerospikerestores,verbs=create;update,versions=v1beta1,name=maerospikerestore.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &AerospikeRestore{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *AerospikeRestore) Default() { + arLog := logf.Log.WithName(namespacedName(r)) + + arLog.Info("Setting defaults for aerospikeRestore") + + if r.Spec.PollingPeriod.Duration.Seconds() == 0 { + r.Spec.PollingPeriod.Duration = defaultPollingPeriod + } +} + +//nolint:lll // for readability +//+kubebuilder:webhook:path=/validate-asdb-aerospike-com-v1beta1-aerospikerestore,mutating=false,failurePolicy=fail,sideEffects=None,groups=asdb.aerospike.com,resources=aerospikerestores,verbs=create;update,versions=v1beta1,name=vaerospikerestore.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &AerospikeRestore{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeRestore) ValidateCreate() (admission.Warnings, error) { + arLog := logf.Log.WithName(namespacedName(r)) + + arLog.Info("Validate create") + + if err := r.validateRestoreConfig(); err != nil { + return nil, err + } + + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeRestore) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + arLog := logf.Log.WithName(namespacedName(r)) + + arLog.Info("Validate update") + + oldRestore := old.(*AerospikeRestore) + + if !reflect.DeepEqual(oldRestore.Spec, r.Spec) { + return nil, fmt.Errorf("aerospikeRestore Spec is immutable") + } + + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *AerospikeRestore) ValidateDelete() (admission.Warnings, error) { + arLog := logf.Log.WithName(namespacedName(r)) + + arLog.Info("Validate delete") + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +func (r *AerospikeRestore) validateRestoreConfig() error { + restoreConfig := make(map[string]interface{}) + + if err := yaml.Unmarshal(r.Spec.Config.Raw, &restoreConfig); err != nil { + return err + } + + switch r.Spec.Type { + case Full, Incremental: + var restoreRequest model.RestoreRequest + + if _, ok := restoreConfig[common.RoutineKey]; ok { + return fmt.Errorf("routine field is not allowed in restore config for restore type %s", r.Spec.Type) + } + + if _, ok := restoreConfig[common.TimeKey]; ok { + return fmt.Errorf("time field is not allowed in restore config for restore type %s", r.Spec.Type) + } + + if err := yaml.UnmarshalStrict(r.Spec.Config.Raw, &restoreRequest); err != nil { + return err + } + + return restoreRequest.Validate() + + case Timestamp: + var restoreRequest model.RestoreTimestampRequest + + if _, ok := restoreConfig[common.SourceKey]; ok { + return fmt.Errorf("source field is not allowed in restore config for restore type %s", r.Spec.Type) + } + + if err := yaml.UnmarshalStrict(r.Spec.Config.Raw, &restoreRequest); err != nil { + return err + } + + return restoreRequest.Validate() + + default: + // Code flow should not come here + return fmt.Errorf("unknown restore type %s", r.Spec.Type) + } +} diff --git a/api/v1beta1/utils.go b/api/v1beta1/utils.go new file mode 100644 index 000000000..2219c1e42 --- /dev/null +++ b/api/v1beta1/utils.go @@ -0,0 +1,13 @@ +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func namespacedName(obj client.Object) string { + return types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + }.String() +} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index ba73d23bd..1930b2fcf 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ package v1beta1 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -62,6 +62,234 @@ func (in *AerospikeAccessControlSpec) DeepCopy() *AerospikeAccessControlSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackup) DeepCopyInto(out *AerospikeBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackup. +func (in *AerospikeBackup) DeepCopy() *AerospikeBackup { + if in == nil { + return nil + } + out := new(AerospikeBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AerospikeBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupList) DeepCopyInto(out *AerospikeBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AerospikeBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupList. +func (in *AerospikeBackupList) DeepCopy() *AerospikeBackupList { + if in == nil { + return nil + } + out := new(AerospikeBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AerospikeBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupService) DeepCopyInto(out *AerospikeBackupService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupService. +func (in *AerospikeBackupService) DeepCopy() *AerospikeBackupService { + if in == nil { + return nil + } + out := new(AerospikeBackupService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AerospikeBackupService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupServiceList) DeepCopyInto(out *AerospikeBackupServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AerospikeBackupService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupServiceList. +func (in *AerospikeBackupServiceList) DeepCopy() *AerospikeBackupServiceList { + if in == nil { + return nil + } + out := new(AerospikeBackupServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AerospikeBackupServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupServiceSpec) DeepCopyInto(out *AerospikeBackupServiceSpec) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.SecretMounts != nil { + in, out := &in.SecretMounts, &out.SecretMounts + *out = make([]SecretMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(Service) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupServiceSpec. +func (in *AerospikeBackupServiceSpec) DeepCopy() *AerospikeBackupServiceSpec { + if in == nil { + return nil + } + out := new(AerospikeBackupServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupServiceStatus) DeepCopyInto(out *AerospikeBackupServiceStatus) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.SecretMounts != nil { + in, out := &in.SecretMounts, &out.SecretMounts + *out = make([]SecretMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(Service) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupServiceStatus. +func (in *AerospikeBackupServiceStatus) DeepCopy() *AerospikeBackupServiceStatus { + if in == nil { + return nil + } + out := new(AerospikeBackupServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupSpec) DeepCopyInto(out *AerospikeBackupSpec) { + *out = *in + out.BackupService = in.BackupService + in.Config.DeepCopyInto(&out.Config) + if in.OnDemandBackups != nil { + in, out := &in.OnDemandBackups, &out.OnDemandBackups + *out = make([]OnDemandBackupSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupSpec. +func (in *AerospikeBackupSpec) DeepCopy() *AerospikeBackupSpec { + if in == nil { + return nil + } + out := new(AerospikeBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeBackupStatus) DeepCopyInto(out *AerospikeBackupStatus) { + *out = *in + out.BackupService = in.BackupService + in.Config.DeepCopyInto(&out.Config) + if in.OnDemandBackups != nil { + in, out := &in.OnDemandBackups, &out.OnDemandBackups + *out = make([]OnDemandBackupSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeBackupStatus. +func (in *AerospikeBackupStatus) DeepCopy() *AerospikeBackupStatus { + if in == nil { + return nil + } + out := new(AerospikeBackupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AerospikeCertPathInOperatorSource) DeepCopyInto(out *AerospikeCertPathInOperatorSource) { *out = *in @@ -591,6 +819,104 @@ func (in *AerospikePodStatus) DeepCopy() *AerospikePodStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeRestore) DeepCopyInto(out *AerospikeRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeRestore. +func (in *AerospikeRestore) DeepCopy() *AerospikeRestore { + if in == nil { + return nil + } + out := new(AerospikeRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AerospikeRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeRestoreList) DeepCopyInto(out *AerospikeRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AerospikeRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeRestoreList. +func (in *AerospikeRestoreList) DeepCopy() *AerospikeRestoreList { + if in == nil { + return nil + } + out := new(AerospikeRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AerospikeRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeRestoreSpec) DeepCopyInto(out *AerospikeRestoreSpec) { + *out = *in + out.BackupService = in.BackupService + in.Config.DeepCopyInto(&out.Config) + out.PollingPeriod = in.PollingPeriod +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeRestoreSpec. +func (in *AerospikeRestoreSpec) DeepCopy() *AerospikeRestoreSpec { + if in == nil { + return nil + } + out := new(AerospikeRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AerospikeRestoreStatus) DeepCopyInto(out *AerospikeRestoreStatus) { + *out = *in + if in.JobID != nil { + in, out := &in.JobID, &out.JobID + *out = new(int64) + **out = **in + } + in.RestoreResult.DeepCopyInto(&out.RestoreResult) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeRestoreStatus. +func (in *AerospikeRestoreStatus) DeepCopy() *AerospikeRestoreStatus { + if in == nil { + return nil + } + out := new(AerospikeRestoreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AerospikeRoleSpec) DeepCopyInto(out *AerospikeRoleSpec) { *out = *in @@ -712,6 +1038,21 @@ func (in *AttachmentOptions) DeepCopy() *AttachmentOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupService) DeepCopyInto(out *BackupService) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupService. +func (in *BackupService) DeepCopy() *BackupService { + if in == nil { + return nil + } + out := new(BackupService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CaCertsSource) DeepCopyInto(out *CaCertsSource) { *out = *in @@ -774,6 +1115,22 @@ func (in *MountOptions) DeepCopy() *MountOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnDemandBackupSpec) DeepCopyInto(out *OnDemandBackupSpec) { + *out = *in + out.Delay = in.Delay +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnDemandBackupSpec. +func (in *OnDemandBackupSpec) DeepCopy() *OnDemandBackupSpec { + if in == nil { + return nil + } + out := new(OnDemandBackupSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = *in @@ -915,6 +1272,22 @@ func (in *SchedulingPolicy) DeepCopy() *SchedulingPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretMount) DeepCopyInto(out *SecretMount) { + *out = *in + in.VolumeMount.DeepCopyInto(&out.VolumeMount) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretMount. +func (in *SecretMount) DeepCopy() *SecretMount { + if in == nil { + return nil + } + out := new(SecretMount) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SeedsFinderServices) DeepCopyInto(out *SeedsFinderServices) { *out = *in @@ -935,6 +1308,21 @@ func (in *SeedsFinderServices) DeepCopy() *SeedsFinderServices { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidationPolicySpec) DeepCopyInto(out *ValidationPolicySpec) { *out = *in diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml new file mode 100644 index 000000000..4bb228475 --- /dev/null +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml @@ -0,0 +1,147 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: aerospikebackups.asdb.aerospike.com +spec: + group: asdb.aerospike.com + names: + kind: AerospikeBackup + listKind: AerospikeBackupList + plural: aerospikebackups + singular: aerospikebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backupService.name + name: Backup Service Name + type: string + - jsonPath: .spec.backupService.namespace + name: Backup Service Namespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AerospikeBackup is the Schema for the aerospikebackup API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AerospikeBackupSpec defines the desired state of AerospikeBackup + for a given AerospikeCluster + properties: + backupService: + description: BackupService is the backup service reference i.e. name + and namespace. It is used to communicate to the backup service to + trigger backups. This field is immutable + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + description: 'Config is the free form configuration for the backup + in YAML format. This config is used to trigger backups. It includes: + aerospike-cluster, backup-routines.' + type: object + x-kubernetes-preserve-unknown-fields: true + onDemandBackups: + description: OnDemandBackups is the configuration for on-demand backups. + items: + properties: + delay: + description: Delay is the interval before starting the on-demand + backup. + type: string + id: + description: ID is the unique identifier for the on-demand backup. + minLength: 1 + type: string + routineName: + description: RoutineName is the routine name used to trigger + on-demand backup. + type: string + required: + - id + - routineName + type: object + maxItems: 1 + type: array + required: + - backupService + - config + type: object + status: + description: AerospikeBackupStatus defines the observed state of AerospikeBackup + properties: + backupService: + description: BackupService is the backup service reference i.e. name + and namespace. + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + description: 'Config is the configuration for the backup in YAML format. + This config is used to trigger backups. It includes: aerospike-cluster, + backup-routines.' + type: object + x-kubernetes-preserve-unknown-fields: true + onDemandBackups: + description: OnDemandBackups is the configuration for on-demand backups. + items: + properties: + delay: + description: Delay is the interval before starting the on-demand + backup. + type: string + id: + description: ID is the unique identifier for the on-demand backup. + minLength: 1 + type: string + routineName: + description: RoutineName is the routine name used to trigger + on-demand backup. + type: string + required: + - id + - routineName + type: object + type: array + required: + - backupService + - config + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml new file mode 100644 index 000000000..75dc3df7d --- /dev/null +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml @@ -0,0 +1,324 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: aerospikebackupservices.asdb.aerospike.com +spec: + group: asdb.aerospike.com + names: + kind: AerospikeBackupService + listKind: AerospikeBackupServiceList + plural: aerospikebackupservices + singular: aerospikebackupservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.image + name: Image + type: string + - jsonPath: .spec.service.type + name: Service Type + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AerospikeBackupService is the Schema for the aerospikebackupservices + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AerospikeBackupServiceSpec defines the desired state of AerospikeBackupService + properties: + config: + description: 'Config is the free form configuration for the backup + service in YAML format. This config is used to start the backup + service. The config is passed as a file to the backup service. It + includes: service, backup-policies, storage, secret-agent.' + type: object + x-kubernetes-preserve-unknown-fields: true + image: + description: Image is the image for the backup service. + type: string + resources: + description: Resources defines the requests and limits for the backup + service container. Resources.Limits should be more than Resources.Requests. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + secrets: + description: SecretMounts is the list of secret to be mounted in the + backup service. + items: + description: SecretMount specifies the secret and its corresponding + volume mount options. + properties: + secretName: + description: SecretName is the name of the secret to be mounted. + type: string + volumeMount: + description: VolumeMount is the volume mount options for the + secret. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + required: + - secretName + - volumeMount + type: object + type: array + service: + description: Service defines the Kubernetes service configuration + for the backup service. It is used to expose the backup service + deployment. By default, the service type is ClusterIP. + properties: + type: + description: Type is the Kubernetes service type. + type: string + required: + - type + type: object + required: + - config + - image + type: object + status: + description: AerospikeBackupServiceStatus defines the observed state of + AerospikeBackupService + properties: + config: + description: 'Config is the free form configuration for the backup + service in YAML format. This config is used to start the backup + service. The config is passed as a file to the backup service. It + includes: service, backup-policies, storage, secret-agent.' + type: object + x-kubernetes-preserve-unknown-fields: true + contextPath: + description: ContextPath is the backup service API context path + type: string + image: + description: Image is the image for the backup service. + type: string + phase: + description: Phase denotes Backup service phase + enum: + - InProgress + - Completed + - Error + type: string + port: + description: Port is the listening port of backup service + format: int32 + type: integer + resources: + description: Resources defines the requests and limits for the backup + service container. Resources.Limits should be more than Resources.Requests. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + secrets: + description: SecretMounts is the list of secret to be mounted in the + backup service. + items: + description: SecretMount specifies the secret and its corresponding + volume mount options. + properties: + secretName: + description: SecretName is the name of the secret to be mounted. + type: string + volumeMount: + description: VolumeMount is the volume mount options for the + secret. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + required: + - secretName + - volumeMount + type: object + type: array + service: + description: Service defines the Kubernetes service configuration + for the backup service. It is used to expose the backup service + deployment. By default, the service type is ClusterIP. + properties: + type: + description: Type is the Kubernetes service type. + type: string + required: + - type + type: object + required: + - phase + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml new file mode 100644 index 000000000..759a11933 --- /dev/null +++ b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml @@ -0,0 +1,116 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: aerospikerestores.asdb.aerospike.com +spec: + group: asdb.aerospike.com + names: + kind: AerospikeRestore + listKind: AerospikeRestoreList + plural: aerospikerestores + singular: aerospikerestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backupService.name + name: Backup Service Name + type: string + - jsonPath: .spec.backupService.namespace + name: Backup Service Namespace + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AerospikeRestore is the Schema for the aerospikerestores API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AerospikeRestoreSpec defines the desired state of AerospikeRestore + properties: + backupService: + description: BackupService is the backup service reference i.e. name + and namespace. It is used to communicate to the backup service to + trigger restores. This field is immutable + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + description: 'Config is the free form configuration for the restore + in YAML format. This config is used to trigger restores. It includes: + destination, policy, source, secret-agent, time and routine.' + type: object + x-kubernetes-preserve-unknown-fields: true + pollingPeriod: + description: PollingPeriod is the polling period for restore operation + status. It is used to poll the restore service to fetch restore + operation status. Default is 60 seconds. + type: string + type: + description: Type is the type of restore. It can of type Full, Incremental, + and Timestamp. Based on the restore type, the relevant restore config + should be given. + enum: + - Full + - Incremental + - Timestamp + type: string + required: + - backupService + - config + - type + type: object + status: + description: AerospikeRestoreStatus defines the observed state of AerospikeRestore + properties: + job-id: + description: JobID is the restore operation job id. + format: int64 + type: integer + phase: + description: Phase denotes the current phase of Aerospike restore + operation. + enum: + - InProgress + - Completed + - Failed + type: string + restoreResult: + description: RestoreResult is the result of the restore operation. + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - phase + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6389f6c3a..0d0bffda7 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,17 +3,26 @@ # It should be run by config/default resources: - bases/asdb.aerospike.com_aerospikeclusters.yaml +- bases/asdb.aerospike.com_aerospikebackups.yaml +- bases/asdb.aerospike.com_aerospikerestores.yaml +- bases/asdb.aerospike.com_aerospikebackupservices.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_aerospikeclusters.yaml +#- patches/webhook_in_aerospikebackups.yaml +#- patches/webhook_in_aerospikerestores.yaml +#- patches/webhook_in_aerospikebackupservices.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_aerospikeclusters.yaml +#- patches/cainjection_in_aerospikebackups.yaml +#- patches/cainjection_in_aerospikerestores.yaml +#- patches/cainjection_in_aerospikebackupservices.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_aerospikebackups.yaml b/config/crd/patches/cainjection_in_aerospikebackups.yaml new file mode 100644 index 000000000..53bb0340e --- /dev/null +++ b/config/crd/patches/cainjection_in_aerospikebackups.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: aerospikebackups.asdb.aerospike.com diff --git a/config/crd/patches/cainjection_in_aerospikebackupservices.yaml b/config/crd/patches/cainjection_in_aerospikebackupservices.yaml new file mode 100644 index 000000000..ceb89cd7b --- /dev/null +++ b/config/crd/patches/cainjection_in_aerospikebackupservices.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: aerospikebackupservices.asdb.aerospike.com diff --git a/config/crd/patches/cainjection_in_aerospikerestores.yaml b/config/crd/patches/cainjection_in_aerospikerestores.yaml new file mode 100644 index 000000000..a56539aaf --- /dev/null +++ b/config/crd/patches/cainjection_in_aerospikerestores.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: aerospikerestores.asdb.aerospike.com diff --git a/config/crd/patches/webhook_in_aerospikebackups.yaml b/config/crd/patches/webhook_in_aerospikebackups.yaml new file mode 100644 index 000000000..664157781 --- /dev/null +++ b/config/crd/patches/webhook_in_aerospikebackups.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: aerospikebackups.asdb.aerospike.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_aerospikebackupservices.yaml b/config/crd/patches/webhook_in_aerospikebackupservices.yaml new file mode 100644 index 000000000..c68b4942b --- /dev/null +++ b/config/crd/patches/webhook_in_aerospikebackupservices.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: aerospikebackupservices.asdb.aerospike.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_aerospikerestores.yaml b/config/crd/patches/webhook_in_aerospikerestores.yaml new file mode 100644 index 000000000..1f0d569eb --- /dev/null +++ b/config/crd/patches/webhook_in_aerospikerestores.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: aerospikerestores.asdb.aerospike.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml b/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml index e88283472..967ecbf88 100644 --- a/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml @@ -20,6 +20,54 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: AerospikeBackup is the Schema for the aerospikebackup API + displayName: Aerospike Backup + kind: AerospikeBackup + name: aerospikebackups.asdb.aerospike.com + specDescriptors: + - description: BackupService is the backup service reference i.e. name and namespace. + It is used to communicate to the backup service to trigger backups. This + field is immutable + displayName: Backup Service + path: backupService + - description: 'Config is the free form configuration for the backup in YAML + format. This config is used to trigger backups. It includes: aerospike-cluster, + backup-routines.' + displayName: Backup Config + path: config + - description: OnDemandBackups is the configuration for on-demand backups. + displayName: On Demand Backups + path: onDemandBackups + version: v1beta1 + - description: AerospikeBackupService is the Schema for the aerospikebackupservices + API + displayName: Aerospike Backup Service + kind: AerospikeBackupService + name: aerospikebackupservices.asdb.aerospike.com + specDescriptors: + - description: 'Config is the free form configuration for the backup service + in YAML format. This config is used to start the backup service. The config + is passed as a file to the backup service. It includes: service, backup-policies, + storage, secret-agent.' + displayName: Backup Service Config + path: config + - description: Image is the image for the backup service. + displayName: Backup Service Image + path: image + - description: Resources defines the requests and limits for the backup service + container. Resources.Limits should be more than Resources.Requests. + displayName: Resources + path: resources + - description: SecretMounts is the list of secret to be mounted in the backup + service. + displayName: Backup Service Volume + path: secrets + - description: Service defines the Kubernetes service configuration for the + backup service. It is used to expose the backup service deployment. By default, + the service type is ClusterIP. + displayName: Backup Service + path: service + version: v1beta1 - description: AerospikeCluster is the schema for the AerospikeCluster API displayName: Aerospike Cluster kind: AerospikeCluster @@ -166,6 +214,31 @@ spec: displayName: Cluster Size path: size version: v1beta1 + - description: AerospikeRestore is the Schema for the aerospikerestores API + displayName: Aerospike Restore + kind: AerospikeRestore + name: aerospikerestores.asdb.aerospike.com + specDescriptors: + - description: BackupService is the backup service reference i.e. name and namespace. + It is used to communicate to the backup service to trigger restores. This + field is immutable + displayName: Backup Service + path: backupService + - description: 'Config is the free form configuration for the restore in YAML + format. This config is used to trigger restores. It includes: destination, + policy, source, secret-agent, time and routine.' + displayName: Restore Config + path: config + - description: PollingPeriod is the polling period for restore operation status. + It is used to poll the restore service to fetch restore operation status. + Default is 60 seconds. + displayName: Restore Service Polling Period + path: pollingPeriod + - description: Type is the type of restore. It can of type Full, Incremental, + and Timestamp. Based on the restore type, relevant restore config is given. + displayName: Restore Type + path: type + version: v1beta1 description: | The Aerospike Kubernetes Operator automates the deployment and management of Aerospike enterprise clusters on Kubernetes. The operator allows you to deploy multi-node Aerospike clusters, recover automatically from node failures, scale up or down automatically as load changes, ensure nodes are evenly split across racks or zones, automatically update to new versions of Aerospike and manage configuration changes in your clusters. diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a7d8fb265..f25ee024e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,6 +4,17 @@ kind: ClusterRole metadata: name: manager-role rules: +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - patch + - update + - watch - apiGroups: - apps resources: @@ -16,6 +27,58 @@ rules: - patch - update - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups/finalizers + verbs: + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups/status + verbs: + - get + - patch + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices/finalizers + verbs: + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices/status + verbs: + - get + - patch + - update - apiGroups: - asdb.aerospike.com resources: @@ -42,6 +105,32 @@ rules: - get - patch - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores/finalizers + verbs: + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores/status + verbs: + - get + - patch + - update - apiGroups: - "" resources: diff --git a/config/samples/aerospikebackup.yaml b/config/samples/aerospikebackup.yaml new file mode 100644 index 000000000..2fa069139 --- /dev/null +++ b/config/samples/aerospikebackup.yaml @@ -0,0 +1,36 @@ +apiVersion: asdb.aerospike.com/v1beta1 +kind: AerospikeBackup +metadata: + name: aerospikebackup + namespace: aerospike +spec: + backupService: + name: aerospikebackupservice-sample + namespace: aerospike +# onDemandBackups: +# - id: first-ad-hoc-backup +# routineName: aerospike-aerospikebackup-test-routine + config: + aerospike-cluster: + aerospike-aerospikebackup-test-cluster: # Name format: -- + credentials: + password: admin123 + user: admin + seed-nodes: + - host-name: aerocluster.aerospike.svc.cluster.local + port: 3000 + backup-routines: + aerospike-aerospikebackup-test-routine: # Name format: -- + backup-policy: test-policy + interval-cron: "@daily" + incr-interval-cron: "@hourly" + namespaces: ["test"] + source-cluster: aerospike-aerospikebackup-test-cluster + storage: local + aerospike-aerospikebackup-test-routine1: + backup-policy: test-policy1 + interval-cron: "@daily" + incr-interval-cron: "@hourly" + namespaces: [ "test" ] + source-cluster: aerospike-aerospikebackup-test-cluster + storage: s3Storage diff --git a/config/samples/aerospikebackupservice.yaml b/config/samples/aerospikebackupservice.yaml new file mode 100644 index 000000000..e34f1d395 --- /dev/null +++ b/config/samples/aerospikebackupservice.yaml @@ -0,0 +1,40 @@ +apiVersion: asdb.aerospike.com/v1beta1 +kind: AerospikeBackupService +metadata: + name: aerospikebackupservice-sample + namespace: aerospike +spec: + image: aerospike/aerospike-backup-service:1.0.0 + config: + service: + http: + port: 8081 + backup-policies: + test-policy: + parallel: 3 + remove-files: KeepAll + type: 1 + test-policy1: + parallel: 3 + remove-files: KeepAll + type: 1 + storage: + local: + path: /localStorage + type: local + s3Storage: + type: aws-s3 + path: "s3://aerospike-kubernetes-operator-test" + s3-region: us-east-1 + s3-profile: default + + secrets: + - secretName: aws-secret + volumeMount: + name: aws-secret + mountPath: /root/.aws/credentials + subPath: credentials + + service: + type: LoadBalancer + diff --git a/config/samples/aerospikerestore.yaml b/config/samples/aerospikerestore.yaml new file mode 100644 index 000000000..c25daded4 --- /dev/null +++ b/config/samples/aerospikerestore.yaml @@ -0,0 +1,26 @@ +apiVersion: asdb.aerospike.com/v1beta1 +kind: AerospikeRestore +metadata: + name: aerospikerestore-sample + namespace: aerospike +spec: + backupService: + name: aerospikebackupservice-sample + namespace: aerospike + type: Full + config: + destination: + label: destinationCluster + credentials: + password: admin123 + user: admin + seed-nodes: + - host-name: aerocluster.test.svc.cluster.local + port: 3000 + policy: + parallel: 3 + no-generation: true + no-indexes: true + source: + "path": "/localStorage/aerospike-aerospikebackup-test-routine/backup/1722326391329/data/test" + "type": local diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index df57bb206..253954a06 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -24,6 +24,26 @@ webhooks: resources: - aerospikeclusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-asdb-aerospike-com-v1beta1-aerospikerestore + failurePolicy: Fail + name: maerospikerestore.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikerestores + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -50,3 +70,63 @@ webhooks: resources: - aerospikeclusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-asdb-aerospike-com-v1beta1-aerospikebackup + failurePolicy: Fail + name: vaerospikebackup.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-asdb-aerospike-com-v1beta1-aerospikebackupservice + failurePolicy: Fail + name: vaerospikebackupservice.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikebackupservices + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-asdb-aerospike-com-v1beta1-aerospikerestore + failurePolicy: Fail + name: vaerospikerestore.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikerestores + sideEffects: None diff --git a/controllers/aero_info_calls.go b/controllers/aero_info_calls.go index f7fbb4446..3aeaa1b27 100644 --- a/controllers/aero_info_calls.go +++ b/controllers/aero_info_calls.go @@ -23,6 +23,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/jsonpatch" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" "github.com/aerospike/aerospike-management-lib/asconfig" @@ -40,20 +41,20 @@ import ( // 2. given in ignorePodList by the user and are safe to ignore in stability checks func (r *SingleClusterReconciler) waitForMultipleNodesSafeStopReady( pods []*corev1.Pod, ignorablePodNames sets.Set[string], -) reconcileResult { +) common.ReconcileResult { if len(pods) == 0 { - return reconcileSuccess() + return common.ReconcileSuccess() } // Remove a node only if the cluster is stable if err := r.waitForAllSTSToBeReady(ignorablePodNames); err != nil { - return reconcileError(fmt.Errorf("failed to wait for cluster to be ready: %v", err)) + return common.ReconcileError(fmt.Errorf("failed to wait for cluster to be ready: %v", err)) } // This doesn't make actual connection, only objects having connection info are created allHostConns, err := r.newAllHostConnWithOption(ignorablePodNames) if err != nil { - return reconcileError(fmt.Errorf("failed to get hostConn for aerospike cluster nodes: %v", err)) + return common.ReconcileError(fmt.Errorf("failed to get hostConn for aerospike cluster nodes: %v", err)) } policy := r.getClientPolicy() @@ -64,21 +65,21 @@ func (r *SingleClusterReconciler) waitForMultipleNodesSafeStopReady( ) // Check for cluster stability - if res := r.waitForClusterStability(policy, allHostConns); !res.isSuccess { + if res := r.waitForClusterStability(policy, allHostConns); !res.IsSuccess { return res } // Setup roster after migration. if err = r.getAndSetRoster(policy, r.aeroCluster.Spec.RosterNodeBlockList, ignorablePodNames); err != nil { r.Log.Error(err, "Failed to set roster for cluster") - return reconcileRequeueAfter(1) + return common.ReconcileRequeueAfter(1) } if err := r.quiescePods(policy, allHostConns, pods, ignorablePodNames); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) quiescePods( @@ -106,7 +107,7 @@ func (r *SingleClusterReconciler) quiescePods( // TODO: Check only for migration func (r *SingleClusterReconciler) waitForClusterStability( policy *as.ClientPolicy, allHostConns []*deployment.HostConn, -) reconcileResult { +) common.ReconcileResult { const ( maxRetry = 6 retryInterval = time.Second * 10 @@ -129,7 +130,7 @@ func (r *SingleClusterReconciler) waitForClusterStability( r.Log, policy, allHostConns, ) if err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if isStable { @@ -139,10 +140,10 @@ func (r *SingleClusterReconciler) waitForClusterStability( } if !isStable { - return reconcileRequeueAfter(60) + return common.ReconcileRequeueAfter(60) } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) tipClearHostname( @@ -258,10 +259,10 @@ func hostID(hostName string, hostPort int) string { func (r *SingleClusterReconciler) setMigrateFillDelay( policy *as.ClientPolicy, asConfig *asdbv1.AerospikeConfigSpec, setToZero bool, ignorablePodNames sets.Set[string], -) reconcileResult { +) common.ReconcileResult { migrateFillDelay, err := asdbv1.GetMigrateFillDelay(asConfig) if err != nil { - reconcileError(err) + common.ReconcileError(err) } var oldMigrateFillDelay int @@ -269,13 +270,13 @@ func (r *SingleClusterReconciler) setMigrateFillDelay( if len(r.aeroCluster.Status.RackConfig.Racks) > 0 { oldMigrateFillDelay, err = asdbv1.GetMigrateFillDelay(&r.aeroCluster.Status.RackConfig.Racks[0].AerospikeConfig) if err != nil { - reconcileError(err) + common.ReconcileError(err) } } if migrateFillDelay == 0 && oldMigrateFillDelay == 0 { r.Log.Info("migrate-fill-delay config not present or 0, skipping it") - return reconcileSuccess() + return common.ReconcileSuccess() } // Set migrate-fill-delay to 0 if setToZero flag is set @@ -286,7 +287,7 @@ func (r *SingleClusterReconciler) setMigrateFillDelay( // This doesn't make actual connection, only objects having connection info are created allHostConns, err := r.newAllHostConnWithOption(ignorablePodNames) if err != nil { - return reconcileError( + return common.ReconcileError( fmt.Errorf( "failed to get hostConn for aerospike cluster nodes: %v", err, ), @@ -294,19 +295,19 @@ func (r *SingleClusterReconciler) setMigrateFillDelay( } if err := deployment.SetMigrateFillDelay(r.Log, policy, allHostConns, migrateFillDelay); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) setDynamicConfig( dynamicConfDiffPerPod map[string]asconfig.DynamicConfigMap, pods []*corev1.Pod, ignorablePodNames sets.Set[string], -) reconcileResult { +) common.ReconcileResult { // This doesn't make actual connection, only objects having connection info are created allHostConns, err := r.newAllHostConnWithOption(ignorablePodNames) if err != nil { - return reconcileError( + return common.ReconcileError( fmt.Errorf( "failed to get hostConn for aerospike cluster nodes: %v", err, ), @@ -323,7 +324,7 @@ func (r *SingleClusterReconciler) setDynamicConfig( selectedHostConns, err := r.newPodsHostConnWithOption(podList, ignorablePodNames) if err != nil { - return reconcileError( + return common.ReconcileError( fmt.Errorf( "failed to get hostConn for aerospike cluster nodes: %v", err, ), @@ -333,7 +334,7 @@ func (r *SingleClusterReconciler) setDynamicConfig( if len(selectedHostConns) == 0 { r.Log.Info("No pods selected for dynamic config change") - return reconcileSuccess() + return common.ReconcileSuccess() } for _, host := range selectedHostConns { @@ -343,7 +344,7 @@ func (r *SingleClusterReconciler) setDynamicConfig( if err != nil { // Assuming error returned here will not be a server error. - return reconcileError(err) + return common.ReconcileError(err) } r.Log.Info("Generated dynamic config commands", "commands", fmt.Sprintf("%v", asConfCmds), "pod", podName) @@ -369,16 +370,17 @@ func (r *SingleClusterReconciler) setDynamicConfig( if patchErr := r.patchPodStatus( context.TODO(), patches, ); patchErr != nil { - return reconcileError(fmt.Errorf("error updating status: %v, dynamic config command error: %v", patchErr, err)) + return common.ReconcileError( + fmt.Errorf("error updating status: %v, dynamic config command error: %v", patchErr, err)) } - return reconcileError(err) + return common.ReconcileError(err) } if err := r.updateAerospikeConfInPod(podName); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } } - return reconcileSuccess() + return common.ReconcileSuccess() } diff --git a/controllers/aerospikecluster_controller.go b/controllers/aerospikecluster_controller.go index 110d3032e..8f961c355 100644 --- a/controllers/aerospikecluster_controller.go +++ b/controllers/aerospikecluster_controller.go @@ -2,7 +2,6 @@ package controllers import ( "context" - "runtime" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" @@ -20,23 +19,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" ) const patchFieldOwner = "aerospike-kuberneter-operator" const finalizerName = "asdb.aerospike.com/storage-finalizer" -// Number of Reconcile threads to run Reconcile operations -var maxConcurrentReconciles = runtime.NumCPU() * 2 - -var ( - updateOption = &client.UpdateOptions{ - FieldManager: "aerospike-operator", - } - createOption = &client.CreateOptions{ - FieldManager: "aerospike-operator", - } -) - // AerospikeClusterReconciler reconciles AerospikeClusters type AerospikeClusterReconciler struct { client.Client @@ -65,7 +53,7 @@ func (r *AerospikeClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { ). WithOptions( controller.Options{ - MaxConcurrentReconciles: maxConcurrentReconciles, + MaxConcurrentReconciles: common.MaxConcurrentReconciles, }, ). WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})). @@ -108,7 +96,7 @@ func (r *AerospikeClusterReconciler) Reconcile( return reconcile.Result{}, nil } // Error reading the object - requeue the request. - return reconcile.Result{Requeue: true}, err + return reconcile.Result{}, err } cr := SingleClusterReconciler{ diff --git a/controllers/backup-service/aerospikebackupservice_controller.go b/controllers/backup-service/aerospikebackupservice_controller.go new file mode 100644 index 000000000..966d1517f --- /dev/null +++ b/controllers/backup-service/aerospikebackupservice_controller.go @@ -0,0 +1,87 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backupservice + +import ( + "context" + + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" +) + +// AerospikeBackupServiceReconciler reconciles a AerospikeBackupService object +type AerospikeBackupServiceReconciler struct { + Scheme *k8sruntime.Scheme + client.Client + Log logr.Logger +} + +//nolint:lll // for readability +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikebackupservices,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikebackupservices/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikebackupservices/finalizers,verbs=update +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;watch;create;update;patch;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *AerospikeBackupServiceReconciler) Reconcile(_ context.Context, request ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("aerospikebackupservice", request.NamespacedName) + + log.Info("Reconciling AerospikeBackupService") + + // Fetch the AerospikeBackupService instance + aeroBackupService := &asdbv1beta1.AerospikeBackupService{} + if err := r.Client.Get(context.TODO(), request.NamespacedName, aeroBackupService); err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after Reconcile request. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + cr := SingleBackupServiceReconciler{ + aeroBackupService: aeroBackupService, + Client: r.Client, + Log: log, + Scheme: r.Scheme, + } + + return cr.Reconcile() +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AerospikeBackupServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&asdbv1beta1.AerospikeBackupService{}). + WithOptions( + controller.Options{ + MaxConcurrentReconciles: common.MaxConcurrentReconciles, + }, + ). + WithEventFilter(predicate.GenerationChangedPredicate{}). + Complete(r) +} diff --git a/controllers/backup-service/reconciler.go b/controllers/backup-service/reconciler.go new file mode 100644 index 000000000..714c664b6 --- /dev/null +++ b/controllers/backup-service/reconciler.go @@ -0,0 +1,682 @@ +package backupservice + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + app "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" +) + +type serviceConfig struct { + portInfo map[string]int32 + contextPath string +} + +var defaultServiceConfig = serviceConfig{ + portInfo: map[string]int32{ + common.HTTPKey: 8080, + }, + contextPath: "/", +} + +// SingleBackupServiceReconciler reconciles a single AerospikeBackupService +type SingleBackupServiceReconciler struct { + client.Client + Recorder record.EventRecorder + aeroBackupService *asdbv1beta1.AerospikeBackupService + KubeConfig *rest.Config + Scheme *k8sRuntime.Scheme + Log logr.Logger +} + +func (r *SingleBackupServiceReconciler) Reconcile() (result ctrl.Result, recErr error) { + // Set the status phase to Error if the recErr is not nil + // recErr is only set when reconcile failure should result in Error phase of the Backup service operation + defer func() { + if recErr != nil { + r.Log.Error(recErr, "Reconcile failed") + + if err := r.setStatusPhase(asdbv1beta1.AerospikeBackupServiceError); err != nil { + recErr = err + } + } + }() + + if !r.aeroBackupService.ObjectMeta.DeletionTimestamp.IsZero() { + // Stop reconciliation as the Aerospike Backup service is being deleted + return reconcile.Result{}, nil + } + + // Set the status to AerospikeClusterInProgress before starting any operations + if err := r.setStatusPhase(asdbv1beta1.AerospikeBackupServiceInProgress); err != nil { + return reconcile.Result{}, err + } + + if err := r.reconcileConfigMap(); err != nil { + recErr = err + return ctrl.Result{}, err + } + + if err := r.reconcileDeployment(); err != nil { + recErr = err + return ctrl.Result{}, err + } + + if err := r.reconcileService(); err != nil { + recErr = err + return ctrl.Result{}, err + } + + if err := r.updateStatus(); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *SingleBackupServiceReconciler) reconcileConfigMap() error { + cm := &corev1.ConfigMap{} + + if err := r.Client.Get(context.TODO(), + types.NamespacedName{ + Namespace: r.aeroBackupService.Namespace, + Name: r.aeroBackupService.Name, + }, cm, + ); err != nil { + if !errors.IsNotFound(err) { + return err + } + + r.Log.Info("Create Backup Service ConfigMap", + "name", getBackupServiceName(r.aeroBackupService)) + + cm = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.aeroBackupService.Name, + Namespace: r.aeroBackupService.Namespace, + Labels: utils.LabelsForAerospikeBackupService(r.aeroBackupService.Name), + }, + Data: r.getConfigMapData(), + } + + // Set AerospikeBackupService instance as the owner and controller + err = controllerutil.SetControllerReference( + r.aeroBackupService, cm, r.Scheme, + ) + if err != nil { + return err + } + + if err = r.Client.Create( + context.TODO(), cm, common.CreateOption, + ); err != nil { + return fmt.Errorf( + "failed to create ConfigMap: %v", + err, + ) + } + + r.Log.Info("Created new Backup Service ConfigMap", + "name", getBackupServiceName(r.aeroBackupService)) + + return nil + } + + r.Log.Info( + "ConfigMap already exist. Updating existing ConfigMap if required", + "name", getBackupServiceName(r.aeroBackupService), + ) + + desiredDataMap := make(map[string]interface{}) + currentDataMap := make(map[string]interface{}) + + if err := yaml.Unmarshal(r.aeroBackupService.Spec.Config.Raw, &desiredDataMap); err != nil { + return err + } + + data := cm.Data[common.BackupServiceConfigYAML] + + if err := yaml.Unmarshal([]byte(data), ¤tDataMap); err != nil { + return err + } + + currentDataMap[common.ServiceKey] = desiredDataMap[common.ServiceKey] + currentDataMap[common.BackupPoliciesKey] = desiredDataMap[common.BackupPoliciesKey] + currentDataMap[common.StorageKey] = desiredDataMap[common.StorageKey] + currentDataMap[common.SecretAgentsKey] = desiredDataMap[common.SecretAgentsKey] + + updatedConfig, err := yaml.Marshal(currentDataMap) + if err != nil { + return err + } + + cm.Data[common.BackupServiceConfigYAML] = string(updatedConfig) + + if err = r.Client.Update( + context.TODO(), cm, common.UpdateOption, + ); err != nil { + return fmt.Errorf( + "failed to update Backup Service ConfigMap: %v", + err, + ) + } + + r.Log.Info("Updated Backup Service ConfigMap", + "name", getBackupServiceName(r.aeroBackupService)) + + return nil +} + +func (r *SingleBackupServiceReconciler) getConfigMapData() map[string]string { + data := make(map[string]string) + data[common.BackupServiceConfigYAML] = string(r.aeroBackupService.Spec.Config.Raw) + + return data +} + +func (r *SingleBackupServiceReconciler) reconcileDeployment() error { + var deploy app.Deployment + + if err := r.Client.Get(context.TODO(), + types.NamespacedName{ + Namespace: r.aeroBackupService.Namespace, + Name: r.aeroBackupService.Name, + }, &deploy, + ); err != nil { + if !errors.IsNotFound(err) { + return err + } + + r.Log.Info("Create Backup Service deployment", + "name", getBackupServiceName(r.aeroBackupService)) + + deployment, err := r.getDeploymentObject() + if err != nil { + return err + } + + // Set AerospikeBackupService instance as the owner and controller + err = controllerutil.SetControllerReference( + r.aeroBackupService, deployment, r.Scheme, + ) + if err != nil { + return err + } + + err = r.Client.Create(context.TODO(), deployment, common.CreateOption) + if err != nil { + return fmt.Errorf("failed to deploy Backup service deployment: %v", err) + } + + return r.waitForDeploymentToBeReady() + } + + r.Log.Info( + "Backup Service deployment already exist. Updating existing deployment if required", + "name", getBackupServiceName(r.aeroBackupService), + ) + + oldResourceVersion := deploy.ResourceVersion + + desiredDeployObj, err := r.getDeploymentObject() + if err != nil { + return err + } + + deploy.Spec = desiredDeployObj.Spec + + if err = r.Client.Update(context.TODO(), &deploy, common.UpdateOption); err != nil { + return fmt.Errorf("failed to update Backup service deployment: %v", err) + } + + if oldResourceVersion != deploy.ResourceVersion { + r.Log.Info("Deployment spec is updated, will result in rolling restart") + return r.waitForDeploymentToBeReady() + } + + // If status is empty then no need for config Hash comparison + if len(r.aeroBackupService.Status.Config.Raw) == 0 { + return r.waitForDeploymentToBeReady() + } + + desiredHash, err := utils.GetHash(string(r.aeroBackupService.Spec.Config.Raw)) + if err != nil { + return err + } + + currentHash, err := utils.GetHash(string(r.aeroBackupService.Status.Config.Raw)) + if err != nil { + return err + } + + // If there is a change in config hash, then restart the deployment pod + if desiredHash != currentHash { + r.Log.Info("BackupService config is updated, will result in rolling restart") + + podList, err := r.getBackupServicePodList() + if err != nil { + return err + } + + for idx := range podList.Items { + pod := &podList.Items[idx] + + err = r.Client.Delete(context.TODO(), pod) + if err != nil { + return err + } + } + } + + return r.waitForDeploymentToBeReady() +} + +func getBackupServiceName(aeroBackupService *asdbv1beta1.AerospikeBackupService) types.NamespacedName { + return types.NamespacedName{Name: aeroBackupService.Name, Namespace: aeroBackupService.Namespace} +} + +func (r *SingleBackupServiceReconciler) getBackupServicePodList() (*corev1.PodList, error) { + var podList corev1.PodList + + labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeBackupService(r.aeroBackupService.Name)) + listOps := &client.ListOptions{ + Namespace: r.aeroBackupService.Namespace, LabelSelector: labelSelector, + } + + if err := r.Client.List(context.TODO(), &podList, listOps); err != nil { + return nil, err + } + + return &podList, nil +} + +func (r *SingleBackupServiceReconciler) getDeploymentObject() (*app.Deployment, error) { + svcLabels := utils.LabelsForAerospikeBackupService(r.aeroBackupService.Name) + volumeMounts, volumes := r.getVolumeAndMounts() + + resources := corev1.ResourceRequirements{} + + if r.aeroBackupService.Spec.Resources != nil { + resources = *r.aeroBackupService.Spec.Resources + } + + svcConf, err := r.getBackupServiceConfig() + if err != nil { + return nil, err + } + + containerPorts := make([]corev1.ContainerPort, 0, len(svcConf.portInfo)) + + for name, port := range svcConf.portInfo { + containerPorts = append(containerPorts, corev1.ContainerPort{ + Name: name, + ContainerPort: port, + }) + } + + deploy := &app.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.aeroBackupService.Name, + Namespace: r.aeroBackupService.Namespace, + Labels: svcLabels, + }, + Spec: app.DeploymentSpec{ + Replicas: func(replica int32) *int32 { return &replica }(1), + Selector: &metav1.LabelSelector{ + MatchLabels: svcLabels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: svcLabels, + }, + Spec: corev1.PodSpec{ + // TODO: Finalise on this. Who should create this SA? + ServiceAccountName: common.AerospikeBackupService, + Containers: []corev1.Container{ + { + Name: common.AerospikeBackupService, + Image: r.aeroBackupService.Spec.Image, + ImagePullPolicy: corev1.PullIfNotPresent, + VolumeMounts: volumeMounts, + Resources: resources, + Ports: containerPorts, + }, + }, + // Init-container is used to copy configMap data to work-dir(emptyDir). + // There is a limitation of read-only file-system for mounted configMap volumes + // Remove this init-container when backup-service start supporting hot reload + InitContainers: []corev1.Container{ + { + Name: "init-backup-service", + Image: "busybox", + Command: []string{ + "sh", + "-c", + "cp /etc/aerospike-backup-service/aerospike-backup-service.yml /work-dir/aerospike-backup-service.yml", + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "backup-service-config-configmap", + MountPath: "/etc/aerospike-backup-service/", + }, + { + Name: "backup-service-config", + MountPath: "/work-dir", + }, + }, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + return deploy, nil +} + +func (r *SingleBackupServiceReconciler) getVolumeAndMounts() ([]corev1.VolumeMount, []corev1.Volume) { + volumes := make([]corev1.Volume, 0, len(r.aeroBackupService.Spec.SecretMounts)) + volumeMounts := make([]corev1.VolumeMount, 0, len(r.aeroBackupService.Spec.SecretMounts)) + + for idx := range r.aeroBackupService.Spec.SecretMounts { + secretMount := r.aeroBackupService.Spec.SecretMounts[idx] + volumeMounts = append(volumeMounts, secretMount.VolumeMount) + + volumes = append(volumes, corev1.Volume{ + Name: secretMount.VolumeMount.Name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretMount.SecretName, + }, + }, + }) + } + + // Backup service configMap mountPath + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "backup-service-config", + MountPath: fmt.Sprintf("/etc/aerospike-backup-service/%s", common.BackupServiceConfigYAML), + SubPath: common.BackupServiceConfigYAML, + }) + + // Backup service configMap + volumes = append(volumes, corev1.Volume{ + Name: "backup-service-config-configmap", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: r.aeroBackupService.Name, + }, + }, + }, + }) + + // EmptyDir for init-container to copy configMap data to work-dir + // Remove this volume when backup-service starts supporting hot reload + volumes = append(volumes, corev1.Volume{ + Name: "backup-service-config", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) + + return volumeMounts, volumes +} + +func (r *SingleBackupServiceReconciler) reconcileService() error { + var service corev1.Service + + if err := r.Client.Get(context.TODO(), + types.NamespacedName{ + Namespace: r.aeroBackupService.Namespace, + Name: r.aeroBackupService.Name, + }, &service, + ); err != nil { + if !errors.IsNotFound(err) { + return err + } + + r.Log.Info("Create Backup Service", + "name", getBackupServiceName(r.aeroBackupService)) + + svc, err := r.getServiceObject() + if err != nil { + return err + } + + // Set AerospikeBackupService instance as the owner and controller + err = controllerutil.SetControllerReference( + r.aeroBackupService, svc, r.Scheme, + ) + if err != nil { + return err + } + + err = r.Client.Create(context.TODO(), svc, common.CreateOption) + if err != nil { + return fmt.Errorf("failed to create Backup Service: %v", err) + } + + return nil + } + + r.Log.Info( + "Backup Service already exist. Updating existing service if required", + "name", getBackupServiceName(r.aeroBackupService), + ) + + svc, err := r.getServiceObject() + if err != nil { + return err + } + + service.Spec = svc.Spec + + if err = r.Client.Update(context.TODO(), &service, common.UpdateOption); err != nil { + return fmt.Errorf("failed to update Backup service: %v", err) + } + + r.Log.Info("Updated Backup Service", "name", getBackupServiceName(r.aeroBackupService)) + + return nil +} + +func (r *SingleBackupServiceReconciler) getServiceObject() (*corev1.Service, error) { + svcConfig, err := r.getBackupServiceConfig() + if err != nil { + return nil, err + } + + servicePort := make([]corev1.ServicePort, 0, len(svcConfig.portInfo)) + + for name, port := range svcConfig.portInfo { + servicePort = append(servicePort, corev1.ServicePort{ + Name: name, + Port: port, + }) + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: r.aeroBackupService.Name, + Namespace: r.aeroBackupService.Namespace, + Labels: utils.LabelsForAerospikeBackupService(r.aeroBackupService.Name), + }, + Spec: corev1.ServiceSpec{ + Selector: utils.LabelsForAerospikeBackupService(r.aeroBackupService.Name), + Ports: servicePort, + }, + } + + if r.aeroBackupService.Spec.Service != nil { + svc.Spec.Type = r.aeroBackupService.Spec.Service.Type + } + + return svc, nil +} + +func (r *SingleBackupServiceReconciler) getBackupServiceConfig() (*serviceConfig, error) { + config := make(map[string]interface{}) + + if err := yaml.Unmarshal(r.aeroBackupService.Spec.Config.Raw, &config); err != nil { + return nil, err + } + + if _, ok := config[common.ServiceKey]; !ok { + r.Log.Info("Service config not found") + return &defaultServiceConfig, nil + } + + svc, ok := config[common.ServiceKey].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("service config is not in correct format") + } + + if _, ok = svc[common.HTTPKey]; !ok { + r.Log.Info("HTTP config not found") + return &defaultServiceConfig, nil + } + + httpConf, ok := svc[common.HTTPKey].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("http config is not in correct format") + } + + var svcConfig serviceConfig + + port, ok := httpConf["port"] + if !ok { + svcConfig.portInfo = defaultServiceConfig.portInfo + } else { + svcConfig.portInfo = map[string]int32{common.HTTPKey: int32(port.(float64))} + } + + ctxPath, ok := httpConf["context-path"] + if !ok { + svcConfig.contextPath = defaultServiceConfig.contextPath + } else { + svcConfig.contextPath = ctxPath.(string) + } + + return &svcConfig, nil +} + +func (r *SingleBackupServiceReconciler) waitForDeploymentToBeReady() error { + const ( + podStatusTimeout = 2 * time.Minute + podStatusRetryInterval = 5 * time.Second + ) + + r.Log.Info( + "Waiting for deployment to be ready", "WaitTimePerPod", podStatusTimeout, + ) + + if err := wait.PollUntilContextTimeout(context.TODO(), + podStatusRetryInterval, podStatusTimeout, true, func(ctx context.Context) (done bool, err error) { + podList, err := r.getBackupServicePodList() + if err != nil { + return false, err + } + + if len(podList.Items) == 0 { + r.Log.Info("No pod found for deployment") + return false, nil + } + + for idx := range podList.Items { + pod := &podList.Items[idx] + + if err := utils.CheckPodFailed(pod); err != nil { + return false, fmt.Errorf("pod %s failed: %v", pod.Name, err) + } + + if !utils.IsPodRunningAndReady(pod) { + r.Log.Info("Pod is not ready", "pod", pod.Name) + return false, nil + } + } + + var deploy app.Deployment + if err := r.Client.Get( + ctx, + types.NamespacedName{Name: r.aeroBackupService.Name, Namespace: r.aeroBackupService.Namespace}, + &deploy, + ); err != nil { + return false, err + } + + if deploy.Status.Replicas != *deploy.Spec.Replicas { + return false, nil + } + + return true, nil + }, + ); err != nil { + return err + } + + r.Log.Info("Deployment is ready") + + return nil +} + +func (r *SingleBackupServiceReconciler) setStatusPhase(phase asdbv1beta1.AerospikeBackupServicePhase) error { + if r.aeroBackupService.Status.Phase != phase { + r.aeroBackupService.Status.Phase = phase + + if err := r.Client.Status().Update(context.Background(), r.aeroBackupService); err != nil { + r.Log.Error(err, fmt.Sprintf("Failed to set backup service status to %s", phase)) + return err + } + } + + return nil +} + +func (r *SingleBackupServiceReconciler) updateStatus() error { + svcConfig, err := r.getBackupServiceConfig() + if err != nil { + return err + } + + status := r.CopySpecToStatus() + status.ContextPath = svcConfig.contextPath + status.Port = svcConfig.portInfo[common.HTTPKey] + status.Phase = asdbv1beta1.AerospikeBackupServiceCompleted + + r.aeroBackupService.Status = *status + + return r.Client.Status().Update(context.Background(), r.aeroBackupService) +} + +func (r *SingleBackupServiceReconciler) CopySpecToStatus() *asdbv1beta1.AerospikeBackupServiceStatus { + status := asdbv1beta1.AerospikeBackupServiceStatus{} + status.Image = r.aeroBackupService.Spec.Image + status.Config = r.aeroBackupService.Spec.Config + status.Resources = r.aeroBackupService.Spec.Resources + status.SecretMounts = r.aeroBackupService.Spec.SecretMounts + status.Service = r.aeroBackupService.Spec.Service + + return &status +} diff --git a/controllers/backup/aerospikebackup_controller.go b/controllers/backup/aerospikebackup_controller.go new file mode 100644 index 000000000..e3cb49c9c --- /dev/null +++ b/controllers/backup/aerospikebackup_controller.go @@ -0,0 +1,87 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +const finalizerName = "asdb.aerospike.com/backup-finalizer" + +// AerospikeBackupReconciler reconciles a AerospikeBackup object +type AerospikeBackupReconciler struct { + client.Client + Scheme *k8sruntime.Scheme + Log logr.Logger +} + +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikebackups,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikebackups/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikebackups/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *AerospikeBackupReconciler) Reconcile(_ context.Context, request ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("aerospikebackup", request.NamespacedName) + + log.Info("Reconciling AerospikeBackup") + + // Fetch the AerospikeBackup instance + aeroBackup := &asdbv1beta1.AerospikeBackup{} + if err := r.Client.Get(context.TODO(), request.NamespacedName, aeroBackup); err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after Reconcile request. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + cr := SingleBackupReconciler{ + aeroBackup: aeroBackup, + Client: r.Client, + Log: log, + Scheme: r.Scheme, + } + + return cr.Reconcile() +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AerospikeBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&asdbv1beta1.AerospikeBackup{}). + WithOptions( + controller.Options{ + MaxConcurrentReconciles: common.MaxConcurrentReconciles, + }, + ). + WithEventFilter(predicate.GenerationChangedPredicate{}). + Complete(r) +} diff --git a/controllers/backup/reconciler.go b/controllers/backup/reconciler.go new file mode 100644 index 000000000..ac4c3bb30 --- /dev/null +++ b/controllers/backup/reconciler.go @@ -0,0 +1,581 @@ +package backup + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + backup_service "github.com/aerospike/aerospike-kubernetes-operator/pkg/backup-service" + "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" +) + +// SingleBackupReconciler reconciles a single AerospikeBackup object +type SingleBackupReconciler struct { + client.Client + Recorder record.EventRecorder + aeroBackup *asdbv1beta1.AerospikeBackup + KubeConfig *rest.Config + Scheme *k8sRuntime.Scheme + Log logr.Logger +} + +func (r *SingleBackupReconciler) Reconcile() (result ctrl.Result, recErr error) { + // Check DeletionTimestamp to see if the backup is being deleted + if !r.aeroBackup.ObjectMeta.DeletionTimestamp.IsZero() { + if err := r.removeFinalizer(finalizerName); err != nil { + r.Log.Error(err, "Failed to remove finalizer") + return reconcile.Result{}, err + } + + // Stop reconciliation as the backup is being deleted + return reconcile.Result{}, nil + } + + // The backup is not being deleted, add finalizer if not added already + if err := r.addFinalizer(finalizerName); err != nil { + r.Log.Error(err, "Failed to add finalizer") + return reconcile.Result{}, err + } + + if err := r.reconcileConfigMap(); err != nil { + r.Log.Error(err, "Failed to reconcile config map") + return reconcile.Result{}, err + } + + if err := r.reconcileBackup(); err != nil { + r.Log.Error(err, "Failed to reconcile backup") + return reconcile.Result{}, err + } + + if err := r.updateStatus(); err != nil { + r.Log.Error(err, "Failed to update status") + return reconcile.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *SingleBackupReconciler) addFinalizer(finalizerName string) error { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !utils.ContainsString( + r.aeroBackup.ObjectMeta.Finalizers, finalizerName, + ) { + r.aeroBackup.ObjectMeta.Finalizers = append( + r.aeroBackup.ObjectMeta.Finalizers, finalizerName, + ) + + if err := r.Client.Update(context.TODO(), r.aeroBackup); err != nil { + return err + } + } + + return nil +} + +func (r *SingleBackupReconciler) removeFinalizer(finalizerName string) error { + if utils.ContainsString(r.aeroBackup.ObjectMeta.Finalizers, finalizerName) { + if err := r.removeBackupInfoFromConfigMap(); err != nil { + return err + } + + if err := r.unregisterBackup(); err != nil { + return err + } + + r.Log.Info("Removing finalizer") + // Remove finalizer from the list + r.aeroBackup.ObjectMeta.Finalizers = utils.RemoveString( + r.aeroBackup.ObjectMeta.Finalizers, finalizerName, + ) + + if err := r.Client.Update(context.TODO(), r.aeroBackup); err != nil { + return err + } + } + + return nil +} + +func (r *SingleBackupReconciler) reconcileConfigMap() error { + cm, err := r.getBackupSvcConfigMap() + if err != nil { + return fmt.Errorf("failed to fetch Backup Service configMap, name: %s, error %v", + r.aeroBackup.Spec.BackupService.String(), err.Error()) + } + + r.Log.Info("Updating existing ConfigMap for Backup", + "name", r.aeroBackup.Spec.BackupService.String(), + ) + + specBackupConfig, err := r.getBackupConfigInMap() + if err != nil { + return err + } + + backupSvcConfig := make(map[string]interface{}) + + data := cm.Data[common.BackupServiceConfigYAML] + + err = yaml.Unmarshal([]byte(data), &backupSvcConfig) + if err != nil { + return err + } + + clusterMap, err := common.GetConfigSection(backupSvcConfig, common.AerospikeClustersKey) + if err != nil { + return err + } + + cluster := specBackupConfig[common.AerospikeClusterKey].(map[string]interface{}) + + var clusterName string + + // There will always be only one cluster in the backup config. + // Cluster name in the CR will always be unique. + // Uniqueness is maintained by having a prefix with format --. + // It is enforced by the webhook. + for name, clusterInfo := range cluster { + clusterName = name + clusterMap[name] = clusterInfo + } + + backupSvcConfig[common.AerospikeClustersKey] = clusterMap + + routineMap, err := common.GetConfigSection(backupSvcConfig, common.BackupRoutinesKey) + if err != nil { + return err + } + + routines := specBackupConfig[common.BackupRoutinesKey].(map[string]interface{}) + + // Remove the routines which are not in spec + routinesToBeDeleted := r.routinesToDelete(routines, routineMap, clusterName) + + for idx := range routinesToBeDeleted { + delete(routineMap, routinesToBeDeleted[idx]) + } + + // Add/update spec routines + for name, routine := range routines { + routineMap[name] = routine + } + + backupSvcConfig[common.BackupRoutinesKey] = routineMap + + updatedConfig, err := yaml.Marshal(backupSvcConfig) + if err != nil { + return err + } + + cm.Data[common.BackupServiceConfigYAML] = string(updatedConfig) + + if err := r.Client.Update( + context.TODO(), cm, common.UpdateOption, + ); err != nil { + return fmt.Errorf( + "failed to update Backup Service ConfigMap, name: %s, error %v", + r.aeroBackup.Spec.BackupService.String(), err, + ) + } + + r.Log.Info("Updated Backup Service ConfigMap for Backup", + "name", r.aeroBackup.Spec.BackupService.String(), + ) + + return nil +} + +func (r *SingleBackupReconciler) removeBackupInfoFromConfigMap() error { + cm, err := r.getBackupSvcConfigMap() + if err != nil { + if errors.IsNotFound(err) { + r.Log.Info("Backup Service ConfigMap not found, skip updating", + "name", r.aeroBackup.Spec.BackupService.String()) + return nil + } + + return err + } + + r.Log.Info("Removing Backup info from existing ConfigMap", + "name", r.aeroBackup.Spec.BackupService.String(), + ) + + specBackupConfig, err := r.getBackupConfigInMap() + if err != nil { + return err + } + + backupSvcConfig := make(map[string]interface{}) + + data := cm.Data[common.BackupServiceConfigYAML] + + err = yaml.Unmarshal([]byte(data), &backupSvcConfig) + if err != nil { + return err + } + + var clusterName string + + if clusterIface, ok := backupSvcConfig[common.AerospikeClustersKey]; ok { + if clusterMap, ok := clusterIface.(map[string]interface{}); ok { + currentCluster := specBackupConfig[common.AerospikeClusterKey].(map[string]interface{}) + for name := range currentCluster { + clusterName = name + delete(clusterMap, name) + } + + backupSvcConfig[common.AerospikeClustersKey] = clusterMap + } + } + + if routineIface, ok := backupSvcConfig[common.BackupRoutinesKey]; ok { + if routineMap, ok := routineIface.(map[string]interface{}); ok { + routinesToBeDelete := r.routinesToDelete(nil, routineMap, clusterName) + + for idx := range routinesToBeDelete { + delete(routineMap, routinesToBeDelete[idx]) + } + + backupSvcConfig[common.BackupRoutinesKey] = routineMap + } + } + + updatedConfig, err := yaml.Marshal(backupSvcConfig) + if err != nil { + return err + } + + cm.Data[common.BackupServiceConfigYAML] = string(updatedConfig) + + if err := r.Client.Update( + context.TODO(), cm, common.UpdateOption, + ); err != nil { + return fmt.Errorf( + "failed to update Backup Service ConfigMap, name: %s, error %v", + r.aeroBackup.Spec.BackupService.String(), err, + ) + } + + r.Log.Info("Removed Backup info from existing ConfigMap", + "name", r.aeroBackup.Spec.BackupService.String(), + ) + + return nil +} + +func (r *SingleBackupReconciler) scheduleOnDemandBackup() error { + r.Log.Info("Reconciling on-demand backup") + + // There can be only one on-demand backup allowed right now. + if len(r.aeroBackup.Status.OnDemandBackups) > 0 && + r.aeroBackup.Spec.OnDemandBackups[0].ID == r.aeroBackup.Status.OnDemandBackups[0].ID { + r.Log.Info("On-demand backup already scheduled for the same ID", + "ID", r.aeroBackup.Status.OnDemandBackups[0].ID) + return nil + } + + r.Log.Info("Schedule on-demand backup", + "ID", r.aeroBackup.Spec.OnDemandBackups[0].ID, "routine", r.aeroBackup.Spec.OnDemandBackups[0].RoutineName) + + backupServiceClient, err := backup_service.GetBackupServiceClient(r.Client, &r.aeroBackup.Spec.BackupService) + if err != nil { + return err + } + + if err = backupServiceClient.ScheduleBackup(r.aeroBackup.Spec.OnDemandBackups[0].RoutineName, + r.aeroBackup.Spec.OnDemandBackups[0].Delay); err != nil { + r.Log.Error(err, "Failed to schedule on-demand backup") + return err + } + + r.Log.Info("Scheduled on-demand backup", "ID", r.aeroBackup.Spec.OnDemandBackups[0].ID, + "routine", r.aeroBackup.Spec.OnDemandBackups[0].RoutineName) + + r.Log.Info("Reconciled scheduled backup") + + return nil +} + +func (r *SingleBackupReconciler) reconcileBackup() error { + if err := r.reconcileScheduledBackup(); err != nil { + return err + } + + return r.reconcileOnDemandBackup() +} + +func (r *SingleBackupReconciler) reconcileScheduledBackup() error { + r.Log.Info("Reconciling scheduled backup") + + serviceClient, err := backup_service.GetBackupServiceClient(r.Client, &r.aeroBackup.Spec.BackupService) + if err != nil { + return err + } + + backupSvcConfig, err := serviceClient.GetBackupServiceConfig() + if err != nil { + return err + } + + r.Log.Info("Fetched backup service config", "config", backupSvcConfig) + + specBackupConfig, err := r.getBackupConfigInMap() + if err != nil { + return err + } + + if specBackupConfig[common.AerospikeClusterKey] != nil { + cluster := specBackupConfig[common.AerospikeClusterKey].(map[string]interface{}) + + currentClusters, gErr := common.GetConfigSection(backupSvcConfig, common.AerospikeClustersKey) + if gErr != nil { + return gErr + } + + // TODO: Remove these API calls when hot reload is implemented + for name, clusterConfig := range cluster { + if _, ok := currentClusters[name]; ok { + // Only update if there is any change + if !reflect.DeepEqual(currentClusters[name], clusterConfig) { + r.Log.Info("Cluster config has been changed, updating it", "cluster", name) + + err = serviceClient.PutCluster(name, clusterConfig) + if err != nil { + return err + } + } + } else { + r.Log.Info("Adding new cluster", "cluster", name) + + err = serviceClient.AddCluster(name, clusterConfig) + if err != nil { + return err + } + + r.Log.Info("Added new cluster", "cluster", name) + } + } + } + + if specBackupConfig[common.BackupRoutinesKey] != nil { + routines := specBackupConfig[common.BackupRoutinesKey].(map[string]interface{}) + + currentRoutines, gErr := common.GetConfigSection(backupSvcConfig, common.BackupRoutinesKey) + if gErr != nil { + return gErr + } + + // TODO: Remove these API calls when hot reload is implemented + for name, routine := range routines { + if _, ok := currentRoutines[name]; ok { + // Only update if there is any change + if !reflect.DeepEqual(currentRoutines[name], routine) { + r.Log.Info("Routine config has been changed, updating it", "routine", name) + + err = serviceClient.PutBackupRoutine(name, routine) + if err != nil { + return err + } + } + } else { + r.Log.Info("Adding new backup routine", "routine", name) + + err = serviceClient.AddBackupRoutine(name, routine) + if err != nil { + return err + } + + r.Log.Info("Added new backup routine", "routine", name) + } + } + } + + // If there are routines that are removed, unregister them + err = r.deregisterBackupRoutines(serviceClient, backupSvcConfig, specBackupConfig) + if err != nil { + return err + } + + // Apply the updated configuration for the changes to take effect + err = serviceClient.ApplyConfig() + if err != nil { + return err + } + + r.Log.Info("Reconciled scheduled backup") + + return nil +} + +func (r *SingleBackupReconciler) reconcileOnDemandBackup() error { + // Schedule on-demand backup if given + if len(r.aeroBackup.Spec.OnDemandBackups) > 0 { + if err := r.scheduleOnDemandBackup(); err != nil { + r.Log.Error(err, "Failed to schedule backup") + return err + } + } + + return nil +} + +func (r *SingleBackupReconciler) unregisterBackup() error { + serviceClient, err := backup_service.GetBackupServiceClient(r.Client, &r.aeroBackup.Spec.BackupService) + if err != nil { + return err + } + + backupSvcConfig, err := serviceClient.GetBackupServiceConfig() + if err != nil { + return err + } + + specBackupConfig, err := r.getBackupConfigInMap() + if err != nil { + return err + } + + err = r.deregisterBackupRoutines(serviceClient, backupSvcConfig, specBackupConfig) + if err != nil { + return err + } + + if specBackupConfig[common.AerospikeClusterKey] != nil { + cluster := specBackupConfig[common.AerospikeClusterKey].(map[string]interface{}) + + currentClusters, gErr := common.GetConfigSection(backupSvcConfig, common.AerospikeClustersKey) + if gErr != nil { + return gErr + } + + for name := range cluster { + if _, ok := currentClusters[name]; ok { + err = serviceClient.DeleteCluster(name) + if err != nil { + return err + } + } + } + } + + // Apply the updated configuration for the changes to take effect + err = serviceClient.ApplyConfig() + if err != nil { + return err + } + + return nil +} + +func (r *SingleBackupReconciler) deregisterBackupRoutines( + serviceClient *backup_service.Client, + backupSvcConfig, + specBackupConfig map[string]interface{}, +) error { + allRoutines, err := common.GetConfigSection(backupSvcConfig, common.BackupRoutinesKey) + if err != nil { + return err + } + + cluster := specBackupConfig[common.AerospikeClusterKey].(map[string]interface{}) + + var clusterName string + + // There will always be only one cluster in the backup config + for name := range cluster { + clusterName = name + } + + specRoutines := make(map[string]interface{}) + + // Ignore routines from the spec if the backup is being deleted + if r.aeroBackup.DeletionTimestamp.IsZero() { + specRoutines = specBackupConfig[common.BackupRoutinesKey].(map[string]interface{}) + } + + routinesToBeDelete := r.routinesToDelete(specRoutines, allRoutines, clusterName) + + for idx := range routinesToBeDelete { + r.Log.Info("Unregistering backup routine", "routine", routinesToBeDelete[idx]) + + if err := serviceClient.DeleteBackupRoutine(routinesToBeDelete[idx]); err != nil { + return err + } + + r.Log.Info("Unregistered backup routine", "routine", routinesToBeDelete[idx]) + } + + return nil +} + +func (r *SingleBackupReconciler) updateStatus() error { + r.aeroBackup.Status.BackupService = r.aeroBackup.Spec.BackupService + r.aeroBackup.Status.Config = r.aeroBackup.Spec.Config + r.aeroBackup.Status.OnDemandBackups = r.aeroBackup.Spec.OnDemandBackups + + return r.Client.Status().Update(context.Background(), r.aeroBackup) +} + +func (r *SingleBackupReconciler) getBackupSvcConfigMap() (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{} + + if err := r.Client.Get(context.TODO(), + types.NamespacedName{ + Namespace: r.aeroBackup.Spec.BackupService.Namespace, + Name: r.aeroBackup.Spec.BackupService.Name, + }, cm, + ); err != nil { + return nil, err + } + + return cm, nil +} + +func (r *SingleBackupReconciler) routinesToDelete( + specRoutines, allRoutines map[string]interface{}, clusterName string, +) []string { + var routinesTobeDeleted []string + + for name := range allRoutines { + if _, ok := specRoutines[name]; ok { + continue + } + + // Delete any dangling backup-routines related to this cluster + // Strict prefix check might fail for cases where the prefix is same. + if strings.HasPrefix(name, r.aeroBackup.NamePrefix()) && + allRoutines[name].(map[string]interface{})[common.SourceClusterKey].(string) == clusterName { + routinesTobeDeleted = append(routinesTobeDeleted, name) + } + } + + return routinesTobeDeleted +} + +func (r *SingleBackupReconciler) getBackupConfigInMap() (map[string]interface{}, error) { + backupConfig := make(map[string]interface{}) + + if err := yaml.Unmarshal(r.aeroBackup.Spec.Config.Raw, &backupConfig); err != nil { + return backupConfig, err + } + + return backupConfig, nil +} diff --git a/controllers/common/backup_config_util.go b/controllers/common/backup_config_util.go new file mode 100644 index 000000000..6248f08d9 --- /dev/null +++ b/controllers/common/backup_config_util.go @@ -0,0 +1,18 @@ +package common + +import "fmt" + +// GetConfigSection returns the section of the config with the given name. +func GetConfigSection(config map[string]interface{}, section string) (map[string]interface{}, error) { + sectionIface, ok := config[section] + if !ok { + return map[string]interface{}{}, nil + } + + sectionMap, ok := sectionIface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%s is not a map", section) + } + + return sectionMap, nil +} diff --git a/controllers/common/constant.go b/controllers/common/constant.go new file mode 100644 index 000000000..e863bf3b9 --- /dev/null +++ b/controllers/common/constant.go @@ -0,0 +1,26 @@ +package common + +// Backup Config relate keys +const ( + ServiceKey = "service" + AerospikeClustersKey = "aerospike-clusters" + AerospikeClusterKey = "aerospike-cluster" + StorageKey = "storage" + BackupRoutinesKey = "backup-routines" + BackupPoliciesKey = "backup-policies" + SecretAgentsKey = "secret-agent" + SourceClusterKey = "source-cluster" + BackupServiceConfigYAML = "aerospike-backup-service.yml" +) + +// Restore config fields +const ( + RoutineKey = "routine" + TimeKey = "time" + SourceKey = "source" +) + +const ( + HTTPKey = "http" + AerospikeBackupService = "aerospike-backup-service" +) diff --git a/controllers/common/result.go b/controllers/common/result.go new file mode 100644 index 000000000..1d45e4b30 --- /dev/null +++ b/controllers/common/result.go @@ -0,0 +1,35 @@ +package common + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ReconcileResult struct { + Err error + Result reconcile.Result + IsSuccess bool +} + +func (r ReconcileResult) GetResult() (reconcile.Result, error) { + return r.Result, r.Err +} + +func ReconcileSuccess() ReconcileResult { + return ReconcileResult{IsSuccess: true, Result: reconcile.Result{}} +} + +func ReconcileRequeueAfter(secs int) ReconcileResult { + t := time.Duration(secs) * time.Second + + return ReconcileResult{ + Result: reconcile.Result{ + Requeue: true, RequeueAfter: t, + }, + } +} + +func ReconcileError(e error) ReconcileResult { + return ReconcileResult{Result: reconcile.Result{}, Err: e} +} diff --git a/controllers/common/variable.go b/controllers/common/variable.go new file mode 100644 index 000000000..6d3ecf230 --- /dev/null +++ b/controllers/common/variable.go @@ -0,0 +1,19 @@ +package common + +import ( + "runtime" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MaxConcurrentReconciles is the Number of Reconcile threads to run Reconcile operations +var MaxConcurrentReconciles = runtime.NumCPU() * 2 + +var ( + UpdateOption = &client.UpdateOptions{ + FieldManager: "aerospike-operator", + } + CreateOption = &client.CreateOptions{ + FieldManager: "aerospike-operator", + } +) diff --git a/controllers/pod.go b/controllers/pod.go index 05451e987..25204a597 100644 --- a/controllers/pod.go +++ b/controllers/pod.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/jsonpatch" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" lib "github.com/aerospike/aerospike-management-lib" @@ -239,14 +240,14 @@ func (r *SingleClusterReconciler) getRollingRestartTypePod( func (r *SingleClusterReconciler) rollingRestartPods( rackState *RackState, podsToRestart []*corev1.Pod, ignorablePodNames sets.Set[string], restartTypeMap map[string]RestartType, -) reconcileResult { +) common.ReconcileResult { failedPods, activePods := getFailedAndActivePods(podsToRestart) // If already dead node (failed pod) then no need to check node safety, migration if len(failedPods) != 0 { r.Log.Info("Restart failed pods", "pods", getPodNames(failedPods)) - if res := r.restartPods(rackState, failedPods, restartTypeMap); !res.isSuccess { + if res := r.restartPods(rackState, failedPods, restartTypeMap); !res.IsSuccess { return res } } @@ -254,16 +255,16 @@ func (r *SingleClusterReconciler) rollingRestartPods( if len(activePods) != 0 { r.Log.Info("Restart active pods", "pods", getPodNames(activePods)) - if res := r.waitForMultipleNodesSafeStopReady(activePods, ignorablePodNames); !res.isSuccess { + if res := r.waitForMultipleNodesSafeStopReady(activePods, ignorablePodNames); !res.IsSuccess { return res } - if res := r.restartPods(rackState, activePods, restartTypeMap); !res.isSuccess { + if res := r.restartPods(rackState, activePods, restartTypeMap); !res.IsSuccess { return res } } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) restartASDOrUpdateAerospikeConf(podName string, @@ -362,11 +363,11 @@ func (r *SingleClusterReconciler) restartASDOrUpdateAerospikeConf(podName string func (r *SingleClusterReconciler) restartPods( rackState *RackState, podsToRestart []*corev1.Pod, restartTypeMap map[string]RestartType, -) reconcileResult { +) common.ReconcileResult { // For each block volume removed from a namespace, pod status dirtyVolumes is appended with that volume name. // For each file removed from a namespace, it is deleted right away. if err := r.handleNSOrDeviceRemoval(rackState, podsToRestart); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } restartedPods := make([]*corev1.Pod, 0, len(podsToRestart)) @@ -383,7 +384,7 @@ func (r *SingleClusterReconciler) restartPods( // We assume that the pod server image supports pod warm restart. if err := r.restartASDOrUpdateAerospikeConf(pod.Name, quickRestart); err != nil { r.Log.Error(err, "Failed to warm restart pod", "podName", pod.Name) - return reconcileError(err) + return common.ReconcileError(err) } restartedASDPodNames = append(restartedASDPodNames, pod.Name) @@ -393,13 +394,13 @@ func (r *SingleClusterReconciler) restartPods( "podName", pod.Name) if err := r.deleteLocalPVCs(rackState, pod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } } if err := r.Client.Delete(context.TODO(), pod); err != nil { r.Log.Error(err, "Failed to delete pod") - return reconcileError(err) + return common.ReconcileError(err) } restartedPods = append(restartedPods, pod) @@ -410,16 +411,16 @@ func (r *SingleClusterReconciler) restartPods( } if err := r.updateOperationStatus(restartedASDPodNames, restartedPodNames); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if len(restartedPods) > 0 { - if result := r.ensurePodsRunningAndReady(restartedPods); !result.isSuccess { + if result := r.ensurePodsRunningAndReady(restartedPods); !result.IsSuccess { return result } } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) updateAerospikeConfInPod(podName string) error { @@ -434,7 +435,7 @@ func (r *SingleClusterReconciler) updateAerospikeConfInPod(podName string) error return nil } -func (r *SingleClusterReconciler) ensurePodsRunningAndReady(podsToCheck []*corev1.Pod) reconcileResult { +func (r *SingleClusterReconciler) ensurePodsRunningAndReady(podsToCheck []*corev1.Pod) common.ReconcileResult { podNames := getPodNames(podsToCheck) readyPods := map[string]bool{} @@ -461,11 +462,11 @@ func (r *SingleClusterReconciler) ensurePodsRunningAndReady(podsToCheck []*corev podName := types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace} if err := r.Client.Get(context.TODO(), podName, updatedPod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if err := utils.CheckPodFailed(updatedPod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if !utils.IsPodRunningAndReady(updatedPod) { @@ -487,7 +488,7 @@ func (r *SingleClusterReconciler) ensurePodsRunningAndReady(podsToCheck []*corev podNames, ) - return reconcileSuccess() + return common.ReconcileSuccess() } time.Sleep(retryInterval) @@ -498,7 +499,7 @@ func (r *SingleClusterReconciler) ensurePodsRunningAndReady(podsToCheck []*corev podNames, ) - return reconcileRequeueAfter(10) + return common.ReconcileRequeueAfter(10) } func getFailedAndActivePods(pods []*corev1.Pod) (failedPods, activePods []*corev1.Pod) { @@ -534,14 +535,14 @@ func getNonIgnorablePods(pods []*corev1.Pod, ignorablePodNames sets.Set[string], func (r *SingleClusterReconciler) safelyDeletePodsAndEnsureImageUpdated( rackState *RackState, podsToUpdate []*corev1.Pod, ignorablePodNames sets.Set[string], -) reconcileResult { +) common.ReconcileResult { failedPods, activePods := getFailedAndActivePods(podsToUpdate) // If already dead node (failed pod) then no need to check node safety, migration if len(failedPods) != 0 { r.Log.Info("Restart failed pods with updated container image", "pods", getPodNames(failedPods)) - if res := r.deletePodAndEnsureImageUpdated(rackState, failedPods); !res.isSuccess { + if res := r.deletePodAndEnsureImageUpdated(rackState, failedPods); !res.IsSuccess { return res } } @@ -549,25 +550,25 @@ func (r *SingleClusterReconciler) safelyDeletePodsAndEnsureImageUpdated( if len(activePods) != 0 { r.Log.Info("Restart active pods with updated container image", "pods", getPodNames(activePods)) - if res := r.waitForMultipleNodesSafeStopReady(activePods, ignorablePodNames); !res.isSuccess { + if res := r.waitForMultipleNodesSafeStopReady(activePods, ignorablePodNames); !res.IsSuccess { return res } - if res := r.deletePodAndEnsureImageUpdated(rackState, activePods); !res.isSuccess { + if res := r.deletePodAndEnsureImageUpdated(rackState, activePods); !res.IsSuccess { return res } } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( rackState *RackState, podsToUpdate []*corev1.Pod, -) reconcileResult { +) common.ReconcileResult { // For each block volume removed from a namespace, pod status dirtyVolumes is appended with that volume name. // For each file removed from a namespace, it is deleted right away. if err := r.handleNSOrDeviceRemoval(rackState, podsToUpdate); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) @@ -579,12 +580,12 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( "podName", pod.Name) if err := r.deleteLocalPVCs(rackState, pod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } } if err := r.Client.Delete(context.TODO(), pod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } r.Log.V(1).Info("Pod deleted", "podName", pod.Name) @@ -597,7 +598,7 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( return r.ensurePodsImageUpdated(podsToUpdate) } -func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.Pod) reconcileResult { +func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.Pod) common.ReconcileResult { podNames := getPodNames(podsToCheck) updatedPods := sets.Set[string]{} @@ -624,11 +625,11 @@ func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.P podName := types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace} if err := r.Client.Get(context.TODO(), podName, updatedPod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if err := utils.CheckPodFailed(updatedPod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if !r.isPodUpgraded(updatedPod) { @@ -642,7 +643,7 @@ func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.P if len(updatedPods) == len(podsToCheck) { r.Log.Info("Pods are upgraded/downgraded", "pod", podNames) - return reconcileSuccess() + return common.ReconcileSuccess() } time.Sleep(retryInterval) @@ -653,7 +654,7 @@ func (r *SingleClusterReconciler) ensurePodsImageUpdated(podsToCheck []*corev1.P podNames, ) - return reconcileRequeueAfter(10) + return common.ReconcileRequeueAfter(10) } // cleanupPods checks pods and status before scale-up to detect and fix any diff --git a/controllers/poddistruptionbudget.go b/controllers/poddistruptionbudget.go index f0d70a662..9899a2d68 100644 --- a/controllers/poddistruptionbudget.go +++ b/controllers/poddistruptionbudget.go @@ -11,6 +11,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" ) @@ -109,7 +110,7 @@ func (r *SingleClusterReconciler) createOrUpdatePDB() error { } if err = r.Client.Create( - context.TODO(), pdb, createOption, + context.TODO(), pdb, common.CreateOption, ); err != nil { return fmt.Errorf( "failed to create PodDisruptionBudget: %v", @@ -146,7 +147,7 @@ func (r *SingleClusterReconciler) createOrUpdatePDB() error { pdb.Spec.MaxUnavailable = r.aeroCluster.Spec.MaxUnavailable if err := r.Client.Update( - context.TODO(), pdb, updateOption, + context.TODO(), pdb, common.UpdateOption, ); err != nil { return fmt.Errorf( "failed to update PodDisruptionBudget: %v", diff --git a/controllers/rack.go b/controllers/rack.go index d15ae8917..1fec331f3 100644 --- a/controllers/rack.go +++ b/controllers/rack.go @@ -16,6 +16,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/asconfig" @@ -26,19 +27,19 @@ type scaledDownRack struct { rackState *RackState } -func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { +func (r *SingleClusterReconciler) reconcileRacks() common.ReconcileResult { r.Log.Info("Reconciling rack for AerospikeCluster") var ( scaledDownRackList []scaledDownRack - res reconcileResult + res common.ReconcileResult ) rackStateList := getConfiguredRackStateList(r.aeroCluster) racksToDelete, err := r.getRacksToDelete(rackStateList) if err != nil { - return reconcileError(err) + return common.ReconcileError(err) } rackIDsToDelete := make([]int, 0, len(racksToDelete)) @@ -48,7 +49,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { ignorablePodNames, err := r.getIgnorablePods(racksToDelete, rackStateList) if err != nil { - return reconcileError(err) + return common.ReconcileError(err) } r.Log.Info( @@ -66,7 +67,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { if err = r.Client.Get(context.TODO(), stsName, found); err != nil { if !errors.IsNotFound(err) { - return reconcileError(err) + return common.ReconcileError(err) } continue @@ -75,7 +76,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { // 1. Fetch the pods for the rack and if there are failed pods then reconcile rack podList, err = r.getOrderedRackPodList(state.Rack.ID) if err != nil { - return reconcileError( + return common.ReconcileError( fmt.Errorf( "failed to list pods: %v", err, ), @@ -92,7 +93,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { if res = r.reconcileRack( found, state, ignorablePodNames, failedPods, - ); !res.isSuccess { + ); !res.IsSuccess { return res } @@ -107,7 +108,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { // e.g. configuring unschedulable resources in CR podSpec and reverting them to old value. podList, err = r.getOrderedRackPodList(state.Rack.ID) if err != nil { - return reconcileError( + return common.ReconcileError( fmt.Errorf( "failed to list pods: %v", err, ), @@ -125,7 +126,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { if _, res = r.rollingRestartRack( found, state, ignorablePodNames, nil, failedPods, - ); !res.isSuccess { + ); !res.IsSuccess { return res } @@ -133,7 +134,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { "Restarted the failed pods in the Rack", "rackID", state.Rack.ID, "failedPods", getPodNames(failedPods), ) // Requeue after 1 second to fetch latest CR object with updated pod status - return reconcileRequeueAfter(1) + return common.ReconcileRequeueAfter(1) } } @@ -144,14 +145,14 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { if err = r.Client.Get(context.TODO(), stsName, found); err != nil { if !errors.IsNotFound(err) { - return reconcileError(err) + return common.ReconcileError(err) } // Create statefulset with 0 size rack and then scaleUp later in Reconcile zeroSizedRack := &RackState{Rack: state.Rack, Size: 0} found, res = r.createEmptyRack(zeroSizedRack) - if !res.isSuccess { + if !res.IsSuccess { return res } } @@ -163,7 +164,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { // Reconcile other statefulset if res = r.reconcileRack( found, state, ignorablePodNames, nil, - ); !res.isSuccess { + ); !res.IsSuccess { return res } } @@ -174,18 +175,18 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { state := scaledDownRackList[idx].rackState sts := scaledDownRackList[idx].rackSTS - if res = r.reconcileRack(sts, state, ignorablePodNames, nil); !res.isSuccess { + if res = r.reconcileRack(sts, state, ignorablePodNames, nil); !res.IsSuccess { return res } } if len(r.aeroCluster.Status.RackConfig.Racks) != 0 { // Remove removed racks - if res = r.deleteRacks(racksToDelete, ignorablePodNames); !res.isSuccess { - if res.err != nil { + if res = r.deleteRacks(racksToDelete, ignorablePodNames); !res.IsSuccess { + if res.Err != nil { r.Log.Error( err, "Failed to remove statefulset for removed racks", - "err", res.err, + "err", res.Err, ) } @@ -204,14 +205,14 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { if err := r.Client.Get(context.TODO(), stsName, found); err != nil { if !errors.IsNotFound(err) { - return reconcileError(err) + return common.ReconcileError(err) } // Create statefulset with 0 size rack and then scaleUp later in Reconcile zeroSizedRack := &RackState{Rack: state.Rack, Size: 0} found, res = r.createEmptyRack(zeroSizedRack) - if !res.isSuccess { + if !res.IsSuccess { return res } } @@ -230,15 +231,15 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult { "STS", stsName, ) - return reconcileRequeueAfter(1) + return common.ReconcileRequeueAfter(1) } } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) createEmptyRack(rackState *RackState) ( - *appsv1.StatefulSet, reconcileResult, + *appsv1.StatefulSet, common.ReconcileResult, ) { r.Log.Info("Create new Aerospike cluster if needed") @@ -249,7 +250,7 @@ func (r *SingleClusterReconciler) createEmptyRack(rackState *RackState) ( cmName := utils.GetNamespacedNameForSTSOrConfigMap(r.aeroCluster, rackState.Rack.ID) if err := r.buildSTSConfigMap(cmName, rackState.Rack); err != nil { r.Log.Error(err, "Failed to create configMap from AerospikeConfig") - return nil, reconcileError(err) + return nil, common.ReconcileError(err) } stsName := utils.GetNamespacedNameForSTSOrConfigMap(r.aeroCluster, rackState.Rack.ID) @@ -264,7 +265,7 @@ func (r *SingleClusterReconciler) createEmptyRack(rackState *RackState) ( // Delete statefulset and everything related so that it can be properly created and updated in next run _ = r.deleteSTS(found) - return nil, reconcileError(err) + return nil, common.ReconcileError(err) } r.Recorder.Eventf( @@ -272,7 +273,7 @@ func (r *SingleClusterReconciler) createEmptyRack(rackState *RackState) ( "[rack-%d] Created Rack", rackState.Rack.ID, ) - return found, reconcileSuccess() + return found, common.ReconcileSuccess() } func (r *SingleClusterReconciler) getRacksToDelete(rackStateList []RackState) ( @@ -305,7 +306,7 @@ func (r *SingleClusterReconciler) getRacksToDelete(rackStateList []RackState) ( func (r *SingleClusterReconciler) deleteRacks( racksToDelete []asdbv1.Rack, ignorablePodNames sets.Set[string], -) reconcileResult { +) common.ReconcileResult { for idx := range racksToDelete { rack := &racksToDelete[idx] found := &appsv1.StatefulSet{} @@ -318,14 +319,14 @@ func (r *SingleClusterReconciler) deleteRacks( continue } - return reconcileError(err) + return common.ReconcileError(err) } // TODO: Add option for quick delete of rack. DefaultRackID should always be removed gracefully rackState := &RackState{Size: 0, Rack: rack} found, res := r.scaleDownRack(found, rackState, ignorablePodNames) - if !res.isSuccess { + if !res.IsSuccess { return res } @@ -337,19 +338,19 @@ func (r *SingleClusterReconciler) deleteRacks( found.Namespace, found.Name, ) - return reconcileError(err) + return common.ReconcileError(err) } // Delete configMap cmName := utils.GetNamespacedNameForSTSOrConfigMap(r.aeroCluster, rack.ID) if err = r.deleteRackConfigMap(cmName); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } // Rack cleanup is done. Take time and cleanup dangling nodes and related resources that may not have been // cleaned up previously due to errors. if err = r.cleanupDanglingPodsRack(found, rackState); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } r.Recorder.Eventf( @@ -358,14 +359,14 @@ func (r *SingleClusterReconciler) deleteRacks( ) } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( found *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], failedPods []*corev1.Pod, -) (*appsv1.StatefulSet, reconcileResult) { - var res reconcileResult +) (*appsv1.StatefulSet, common.ReconcileResult) { + var res common.ReconcileResult // Always update configMap. We won't be able to find if a rack's config, and it's pod config is in sync or not // Checking rack.spec, rack.status will not work. // We may change config, let some pods restart with new config and then change config back to original value. @@ -382,7 +383,7 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( found.Name, ) - return found, reconcileError(err) + return found, common.ReconcileError(err) } // Handle enable security just after updating configMap. @@ -390,21 +391,21 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( // Update for security is verified by checking the config hash of the pod with the // config hash present in config map if err := r.handleEnableSecurity(rackState, ignorablePodNames); err != nil { - return found, reconcileError(err) + return found, common.ReconcileError(err) } // Upgrade upgradeNeeded, err := r.isRackUpgradeNeeded(rackState.Rack.ID, ignorablePodNames) if err != nil { - return found, reconcileError(err) + return found, common.ReconcileError(err) } if upgradeNeeded { found, res = r.upgradeRack(found, rackState, ignorablePodNames, failedPods) - if !res.isSuccess { - if res.err != nil { + if !res.IsSuccess { + if res.Err != nil { r.Log.Error( - res.err, "Failed to update StatefulSet image", "stsName", + res.Err, "Failed to update StatefulSet image", "stsName", found.Name, ) @@ -421,17 +422,17 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( } else { var rollingRestartInfo, nErr = r.getRollingRestartInfo(rackState, ignorablePodNames) if nErr != nil { - return found, reconcileError(nErr) + return found, common.ReconcileError(nErr) } if rollingRestartInfo.needRestart { found, res = r.rollingRestartRack( found, rackState, ignorablePodNames, rollingRestartInfo.restartTypeMap, failedPods, ) - if !res.isSuccess { - if res.err != nil { + if !res.IsSuccess { + if res.Err != nil { r.Log.Error( - res.err, "Failed to do rolling restart", "stsName", + res.Err, "Failed to do rolling restart", "stsName", found.Name, ) @@ -452,10 +453,10 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( rackState, ignorablePodNames, rollingRestartInfo.restartTypeMap, rollingRestartInfo.dynamicConfDiffPerPod, ) - if !res.isSuccess { - if res.err != nil { + if !res.IsSuccess { + if res.Err != nil { r.Log.Error( - res.err, "Failed to do dynamic update", "stsName", + res.Err, "Failed to do dynamic update", "stsName", found.Name, ) @@ -473,7 +474,7 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( } if r.aeroCluster.Spec.RackConfig.MaxIgnorablePods != nil { - if res = r.handleNSOrDeviceRemovalForIgnorablePods(rackState, ignorablePodNames); !res.isSuccess { + if res = r.handleNSOrDeviceRemovalForIgnorablePods(rackState, ignorablePodNames); !res.IsSuccess { return found, res } } @@ -481,19 +482,19 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack( // handle k8sNodeBlockList pods only if it is changed if !reflect.DeepEqual(r.aeroCluster.Spec.K8sNodeBlockList, r.aeroCluster.Status.K8sNodeBlockList) { found, res = r.handleK8sNodeBlockListPods(found, rackState, ignorablePodNames, failedPods) - if !res.isSuccess { + if !res.IsSuccess { return found, res } } - return found, reconcileSuccess() + return found, common.ReconcileSuccess() } func (r *SingleClusterReconciler) updateDynamicConfig( rackState *RackState, ignorablePodNames sets.Set[string], restartTypeMap map[string]RestartType, dynamicConfDiffPerPod map[string]asconfig.DynamicConfigMap, -) reconcileResult { +) common.ReconcileResult { r.Log.Info("Update dynamic config in Aerospike pods") r.Recorder.Eventf( @@ -509,7 +510,7 @@ func (r *SingleClusterReconciler) updateDynamicConfig( // List the pods for this aeroCluster's statefulset podList, err = r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return reconcileError(fmt.Errorf("failed to list pods: %v", err)) + return common.ReconcileError(fmt.Errorf("failed to list pods: %v", err)) } // Find pods which needs restart @@ -527,7 +528,7 @@ func (r *SingleClusterReconciler) updateDynamicConfig( podsToUpdate = append(podsToUpdate, pod) } - if res := r.setDynamicConfig(dynamicConfDiffPerPod, podsToUpdate, ignorablePodNames); !res.isSuccess { + if res := r.setDynamicConfig(dynamicConfDiffPerPod, podsToUpdate, ignorablePodNames); !res.IsSuccess { return res } @@ -536,15 +537,15 @@ func (r *SingleClusterReconciler) updateDynamicConfig( "[rack-%d] Finished Dynamic config update", rackState.Rack.ID, ) - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) handleNSOrDeviceRemovalForIgnorablePods( rackState *RackState, ignorablePodNames sets.Set[string], -) reconcileResult { +) common.ReconcileResult { podList, err := r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return reconcileError(fmt.Errorf("failed to list pods: %v", err)) + return common.ReconcileError(fmt.Errorf("failed to list pods: %v", err)) } // Filter ignoredPods to update their dirtyVolumes in the status. // IgnoredPods are skipped from upgrade/rolling restart, and as a result in case of device removal, dirtyVolumes @@ -564,22 +565,22 @@ func (r *SingleClusterReconciler) handleNSOrDeviceRemovalForIgnorablePods( if len(ignoredPod) > 0 { if err := r.handleNSOrDeviceRemoval(rackState, ignoredPod); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) reconcileRack( found *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], failedPods []*corev1.Pod, -) reconcileResult { +) common.ReconcileResult { r.Log.Info( "Reconcile existing Aerospike cluster statefulset", "stsName", found.Name, ) - var res reconcileResult + var res common.ReconcileResult r.Log.Info( "Ensure rack StatefulSet size is the same as the spec", "stsName", @@ -592,10 +593,10 @@ func (r *SingleClusterReconciler) reconcileRack( // Scale down if currentSize > desiredSize { found, res = r.scaleDownRack(found, rackState, ignorablePodNames) - if !res.isSuccess { - if res.err != nil { + if !res.IsSuccess { + if res.Err != nil { r.Log.Error( - res.err, "Failed to scaleDown StatefulSet pods", "stsName", + res.Err, "Failed to scaleDown StatefulSet pods", "stsName", found.Name, ) @@ -604,7 +605,7 @@ func (r *SingleClusterReconciler) reconcileRack( "RackScaleDownFailed", "[rack-%d] Failed to scale-down {STS %s/%s, currentSize: %d desiredSize: %d}: %s", rackState.Rack.ID, found.Namespace, found.Name, currentSize, - desiredSize, res.err, + desiredSize, res.Err, ) } @@ -622,8 +623,8 @@ func (r *SingleClusterReconciler) reconcileRack( if res = r.setMigrateFillDelay( r.getClientPolicy(), &rackState.Rack.AerospikeConfig, false, nil, - ); !res.isSuccess { - r.Log.Error(res.err, "Failed to revert migrate-fill-delay after scale down") + ); !res.IsSuccess { + r.Log.Error(res.Err, "Failed to revert migrate-fill-delay after scale down") return res } } @@ -635,11 +636,11 @@ func (r *SingleClusterReconciler) reconcileRack( found.Name, ) - return reconcileError(err) + return common.ReconcileError(err) } found, res = r.upgradeOrRollingRestartRack(found, rackState, ignorablePodNames, failedPods) - if !res.isSuccess { + if !res.IsSuccess { return res } @@ -647,9 +648,9 @@ func (r *SingleClusterReconciler) reconcileRack( currentSize = *found.Spec.Replicas if currentSize < desiredSize { found, res = r.scaleUpRack(found, rackState, ignorablePodNames) - if !res.isSuccess { + if !res.IsSuccess { r.Log.Error( - res.err, "Failed to scaleUp StatefulSet pods", "stsName", + res.Err, "Failed to scaleUp StatefulSet pods", "stsName", found.Name, ) @@ -657,7 +658,7 @@ func (r *SingleClusterReconciler) reconcileRack( r.aeroCluster, corev1.EventTypeWarning, "RackScaleUpFailed", "[rack-%d] Failed to scale-up {STS %s/%s, currentSize: %d desiredSize: %d}: %s", rackState.Rack.ID, found.Namespace, found.Name, currentSize, - desiredSize, res.err, + desiredSize, res.Err, ) return res @@ -667,7 +668,7 @@ func (r *SingleClusterReconciler) reconcileRack( // All regular operations are complete. Take time and cleanup dangling nodes that have not been cleaned up // previously due to errors. if err := r.cleanupDanglingPodsRack(found, rackState); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } // Safe check to delete all dangling pod services which are no longer required @@ -675,17 +676,17 @@ func (r *SingleClusterReconciler) reconcileRack( if asdbv1.GetBool(r.aeroCluster.Spec.PodSpec.MultiPodPerHost) && !podServiceNeeded(r.aeroCluster.Spec.PodSpec.MultiPodPerHost, &r.aeroCluster.Spec.AerospikeNetworkPolicy) { if err := r.cleanupDanglingPodServices(rackState); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) scaleUpRack( found *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], ) ( - *appsv1.StatefulSet, reconcileResult, + *appsv1.StatefulSet, common.ReconcileResult, ) { desiredSize := int32(rackState.Size) @@ -702,11 +703,11 @@ func (r *SingleClusterReconciler) scaleUpRack( // with bad node. podList, err := r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return found, reconcileError(fmt.Errorf("failed to list pods: %v", err)) + return found, common.ReconcileError(fmt.Errorf("failed to list pods: %v", err)) } if r.isAnyPodInImageFailedState(podList, ignorablePodNames) { - return found, reconcileError(fmt.Errorf("cannot scale up AerospikeCluster. A pod is already in failed state")) + return found, common.ReconcileError(fmt.Errorf("cannot scale up AerospikeCluster. A pod is already in failed state")) } var newPodNames []string @@ -718,7 +719,7 @@ func (r *SingleClusterReconciler) scaleUpRack( for _, newPodName := range newPodNames { for idx := range podList { if podList[idx].Name == newPodName { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf( "pod %s yet to be launched is still present", newPodName, @@ -729,7 +730,7 @@ func (r *SingleClusterReconciler) scaleUpRack( } if err = r.cleanupDanglingPodsRack(found, rackState); err != nil { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf( "failed scale up pre-check: %v", err, ), @@ -738,15 +739,15 @@ func (r *SingleClusterReconciler) scaleUpRack( // Create pod service for the scaled up pod when node network is used in network policy if err = r.createOrUpdatePodServiceIfNeeded(newPodNames); err != nil { - return nil, reconcileError(err) + return nil, common.ReconcileError(err) } // update replicas here to avoid new replicas count comparison while cleaning up dangling pods of rack found.Spec.Replicas = &desiredSize // Scale up the statefulset - if err = r.Client.Update(context.TODO(), found, updateOption); err != nil { - return found, reconcileError( + if err = r.Client.Update(context.TODO(), found, common.UpdateOption); err != nil { + return found, common.ReconcileError( fmt.Errorf( "failed to update StatefulSet pods: %v", err, ), @@ -756,7 +757,7 @@ func (r *SingleClusterReconciler) scaleUpRack( // return a fresh copy found, err = r.getSTS(rackState) if err != nil { - return found, reconcileError(err) + return found, common.ReconcileError(err) } r.Recorder.Eventf( @@ -766,13 +767,13 @@ func (r *SingleClusterReconciler) scaleUpRack( desiredSize, ) - return found, reconcileSuccess() + return found, common.ReconcileSuccess() } func (r *SingleClusterReconciler) upgradeRack( statefulSet *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], failedPods []*corev1.Pod, -) (*appsv1.StatefulSet, reconcileResult) { +) (*appsv1.StatefulSet, common.ReconcileResult) { var ( err error podList []*corev1.Pod @@ -784,7 +785,7 @@ func (r *SingleClusterReconciler) upgradeRack( // List the pods for this aeroCluster's statefulset podList, err = r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return statefulSet, reconcileError( + return statefulSet, common.ReconcileError( fmt.Errorf( "failed to list pods: %v", err, ), @@ -802,7 +803,7 @@ func (r *SingleClusterReconciler) upgradeRack( // Repeat the above process. err = r.updateSTS(statefulSet, rackState) if err != nil { - return statefulSet, reconcileError( + return statefulSet, common.ReconcileError( fmt.Errorf("upgrade rack : %v", err), ) } @@ -856,7 +857,7 @@ func (r *SingleClusterReconciler) upgradeRack( podNames := getPodNames(podsBatch) if err = r.createOrUpdatePodServiceIfNeeded(podNames); err != nil { - return nil, reconcileError(err) + return nil, common.ReconcileError(err) } r.Recorder.Eventf( @@ -865,7 +866,7 @@ func (r *SingleClusterReconciler) upgradeRack( ) res := r.safelyDeletePodsAndEnsureImageUpdated(rackState, podsBatch, ignorablePodNames) - if !res.isSuccess { + if !res.IsSuccess { return statefulSet, res } @@ -876,14 +877,14 @@ func (r *SingleClusterReconciler) upgradeRack( // Handle the next batch in subsequent Reconcile. if len(podsBatchList) > 1 { - return statefulSet, reconcileRequeueAfter(1) + return statefulSet, common.ReconcileRequeueAfter(1) } } // If it was last batch then go ahead return a fresh copy statefulSet, err = r.getSTS(rackState) if err != nil { - return statefulSet, reconcileError(err) + return statefulSet, common.ReconcileError(err) } r.Recorder.Eventf( @@ -891,17 +892,17 @@ func (r *SingleClusterReconciler) upgradeRack( "[rack-%d] Image Updated {STS: %s/%s}", rackState.Rack.ID, statefulSet.Namespace, statefulSet.Name, ) - return statefulSet, reconcileSuccess() + return statefulSet, common.ReconcileSuccess() } func (r *SingleClusterReconciler) scaleDownRack( found *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], -) (*appsv1.StatefulSet, reconcileResult) { +) (*appsv1.StatefulSet, common.ReconcileResult) { desiredSize := int32(rackState.Size) // Continue if scaleDown is not needed if *found.Spec.Replicas <= desiredSize { - return found, reconcileSuccess() + return found, common.ReconcileSuccess() } r.Log.Info( @@ -917,11 +918,12 @@ func (r *SingleClusterReconciler) scaleDownRack( oldPodList, err := r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return found, reconcileError(fmt.Errorf("failed to list pods: %v", err)) + return found, common.ReconcileError(fmt.Errorf("failed to list pods: %v", err)) } if r.isAnyPodInImageFailedState(oldPodList, ignorablePodNames) { - return found, reconcileError(fmt.Errorf("cannot scale down AerospikeCluster. A pod is already in failed state")) + return found, common.ReconcileError( + fmt.Errorf("cannot scale down AerospikeCluster. A pod is already in failed state")) } // Code flow will reach this stage only when found.Spec.Replicas > desiredSize @@ -963,7 +965,7 @@ func (r *SingleClusterReconciler) scaleDownRack( // Ignore safe stop check if all pods in the batch are not running. // Ignore migrate-fill-delay if pod is not running. Deleting this pod will not lead to any migration. if isAnyPodRunningAndReady { - if res := r.waitForMultipleNodesSafeStopReady(runningPods, ignorablePodNames); !res.isSuccess { + if res := r.waitForMultipleNodesSafeStopReady(runningPods, ignorablePodNames); !res.IsSuccess { // The pod is running and is unsafe to terminate. return found, res } @@ -974,7 +976,7 @@ func (r *SingleClusterReconciler) scaleDownRack( // setting migrate-fill-delay will fail if there are any failed pod if res := r.setMigrateFillDelay( policy, &rackState.Rack.AerospikeConfig, true, ignorablePodNames, - ); !res.isSuccess { + ); !res.IsSuccess { return found, res } } @@ -984,9 +986,9 @@ func (r *SingleClusterReconciler) scaleDownRack( found.Spec.Replicas = &newSize if err = r.Client.Update( - context.TODO(), found, updateOption, + context.TODO(), found, common.UpdateOption, ); err != nil { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf( "failed to update pod size %d StatefulSet pods: %v", newSize, err, @@ -1001,7 +1003,7 @@ func (r *SingleClusterReconciler) scaleDownRack( // Wait for pods to get terminated if err = r.waitForSTSToBeReady(found, ignorablePodNames); err != nil { r.Log.Error(err, "Failed to wait for statefulset to be ready") - return found, reconcileRequeueAfter(1) + return found, common.ReconcileRequeueAfter(1) } // This check is added only in scale down but not in rolling restart. @@ -1020,9 +1022,9 @@ func (r *SingleClusterReconciler) scaleDownRack( ) if err = r.Client.Update( - context.TODO(), found, updateOption, + context.TODO(), found, common.UpdateOption, ); err != nil { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf( "failed to update pod size %d StatefulSet pods: %v", newSize, err, @@ -1034,14 +1036,14 @@ func (r *SingleClusterReconciler) scaleDownRack( r.Log.Error(err, "Failed to wait for statefulset to be ready") } - return found, reconcileRequeueAfter(1) + return found, common.ReconcileRequeueAfter(1) } } // Fetch new object nFound, err := r.getSTS(rackState) if err != nil { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf( "failed to get StatefulSet pods: %v", err, ), @@ -1053,7 +1055,7 @@ func (r *SingleClusterReconciler) scaleDownRack( podNames := getPodNames(podsBatch) if err := r.cleanupPods(podNames, rackState); err != nil { - return nFound, reconcileError( + return nFound, common.ReconcileError( fmt.Errorf( "failed to cleanup pod %s: %v", podNames, err, ), @@ -1073,14 +1075,14 @@ func (r *SingleClusterReconciler) scaleDownRack( desiredSize, ) - return found, reconcileRequeueAfter(1) + return found, common.ReconcileRequeueAfter(1) } func (r *SingleClusterReconciler) rollingRestartRack( found *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], restartTypeMap map[string]RestartType, failedPods []*corev1.Pod, -) (*appsv1.StatefulSet, reconcileResult) { +) (*appsv1.StatefulSet, common.ReconcileResult) { r.Log.Info("Rolling restart AerospikeCluster statefulset pods") r.Recorder.Eventf( @@ -1104,12 +1106,12 @@ func (r *SingleClusterReconciler) rollingRestartRack( // List the pods for this aeroCluster's statefulset podList, err = r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return found, reconcileError(fmt.Errorf("failed to list pods: %v", err)) + return found, common.ReconcileError(fmt.Errorf("failed to list pods: %v", err)) } } if len(failedPods) != 0 && r.isAnyPodInImageFailedState(podList, ignorablePodNames) { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf( "cannot Rolling restart AerospikeCluster. " + "A pod is already in failed state due to image related issues", @@ -1119,7 +1121,7 @@ func (r *SingleClusterReconciler) rollingRestartRack( err = r.updateSTS(found, rackState) if err != nil { - return found, reconcileError( + return found, common.ReconcileError( fmt.Errorf("rolling restart failed: %v", err), ) } @@ -1178,16 +1180,16 @@ func (r *SingleClusterReconciler) rollingRestartRack( podNames := getPodNames(podsBatch) if err = r.createOrUpdatePodServiceIfNeeded(podNames); err != nil { - return nil, reconcileError(err) + return nil, common.ReconcileError(err) } - if res := r.rollingRestartPods(rackState, podsBatch, ignorablePodNames, restartTypeMap); !res.isSuccess { + if res := r.rollingRestartPods(rackState, podsBatch, ignorablePodNames, restartTypeMap); !res.IsSuccess { return found, res } // Handle next batch in subsequent Reconcile. if len(podsBatchList) > 1 { - return found, reconcileRequeueAfter(1) + return found, common.ReconcileRequeueAfter(1) } } // It's last batch, go ahead @@ -1195,7 +1197,7 @@ func (r *SingleClusterReconciler) rollingRestartRack( // Return a fresh copy found, err = r.getSTS(rackState) if err != nil { - return found, reconcileError(err) + return found, common.ReconcileError(err) } r.Recorder.Eventf( @@ -1203,15 +1205,15 @@ func (r *SingleClusterReconciler) rollingRestartRack( "[rack-%d] Finished Rolling restart", rackState.Rack.ID, ) - return found, reconcileSuccess() + return found, common.ReconcileSuccess() } func (r *SingleClusterReconciler) handleK8sNodeBlockListPods( statefulSet *appsv1.StatefulSet, rackState *RackState, ignorablePodNames sets.Set[string], failedPods []*corev1.Pod, -) (*appsv1.StatefulSet, reconcileResult) { +) (*appsv1.StatefulSet, common.ReconcileResult) { if err := r.updateSTS(statefulSet, rackState); err != nil { - return statefulSet, reconcileError( + return statefulSet, common.ReconcileError( fmt.Errorf("k8s node block list processing failed: %v", err), ) } @@ -1227,7 +1229,7 @@ func (r *SingleClusterReconciler) handleK8sNodeBlockListPods( // List the pods for this aeroCluster's statefulset podList, err = r.getOrderedRackPodList(rackState.Rack.ID) if err != nil { - return statefulSet, reconcileError(fmt.Errorf("failed to list pods: %v", err)) + return statefulSet, common.ReconcileError(fmt.Errorf("failed to list pods: %v", err)) } } @@ -1269,17 +1271,17 @@ func (r *SingleClusterReconciler) handleK8sNodeBlockListPods( "rollingUpdateBatchSize", r.aeroCluster.Spec.RackConfig.RollingUpdateBatchSize, ) - if res := r.rollingRestartPods(rackState, podsBatch, ignorablePodNames, restartTypeMap); !res.isSuccess { + if res := r.rollingRestartPods(rackState, podsBatch, ignorablePodNames, restartTypeMap); !res.IsSuccess { return statefulSet, res } // Handle next batch in subsequent Reconcile. if len(podsBatchList) > 1 { - return statefulSet, reconcileRequeueAfter(1) + return statefulSet, common.ReconcileRequeueAfter(1) } } - return statefulSet, reconcileSuccess() + return statefulSet, common.ReconcileSuccess() } type rollingRestartInfo struct { diff --git a/controllers/reconciler.go b/controllers/reconciler.go index 41e91dc8a..3d7866475 100644 --- a/controllers/reconciler.go +++ b/controllers/reconciler.go @@ -22,6 +22,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/jsonpatch" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" lib "github.com/aerospike/aerospike-management-lib" @@ -127,18 +128,18 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error) } // Reconcile all racks - if res := r.reconcileRacks(); !res.isSuccess { - if res.err != nil { + if res := r.reconcileRacks(); !res.IsSuccess { + if res.Err != nil { r.Recorder.Eventf( r.aeroCluster, corev1.EventTypeWarning, "UpdateFailed", "Failed to reconcile Racks for cluster %s/%s", r.aeroCluster.Namespace, r.aeroCluster.Name, ) - recErr = res.err + recErr = res.Err } - return res.result, recErr + return res.Result, recErr } if err := r.reconcilePDB(); err != nil { @@ -222,20 +223,20 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error) if res := r.setMigrateFillDelay( policy, &r.aeroCluster.Spec.RackConfig.Racks[0].AerospikeConfig, false, ignorablePodNames, - ); !res.isSuccess { - r.Log.Error(res.err, "Failed to revert migrate-fill-delay") + ); !res.IsSuccess { + r.Log.Error(res.Err, "Failed to revert migrate-fill-delay") - recErr = res.err + recErr = res.Err return reconcile.Result{}, recErr } if asdbv1.IsClusterSCEnabled(r.aeroCluster) { if !r.IsStatusEmpty() { - if res := r.waitForClusterStability(policy, allHostConns); !res.isSuccess { - recErr = res.err + if res := r.waitForClusterStability(policy, allHostConns); !res.IsSuccess { + recErr = res.Err - return res.result, recErr + return res.Result, recErr } } @@ -262,8 +263,8 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error) // Try to recover pods only when MaxIgnorablePods is set if r.aeroCluster.Spec.RackConfig.MaxIgnorablePods != nil { - if res := r.recoverIgnorablePods(); !res.isSuccess { - return res.getResult() + if res := r.recoverIgnorablePods(); !res.IsSuccess { + return res.GetResult() } } @@ -272,11 +273,11 @@ func (r *SingleClusterReconciler) Reconcile() (result ctrl.Result, recErr error) return reconcile.Result{}, nil } -func (r *SingleClusterReconciler) recoverIgnorablePods() reconcileResult { +func (r *SingleClusterReconciler) recoverIgnorablePods() common.ReconcileResult { podList, gErr := r.getClusterPodList() if gErr != nil { r.Log.Error(gErr, "Failed to get cluster pod list") - return reconcileError(gErr) + return common.ReconcileError(gErr) } r.Log.Info("Try to recover failed/pending pods if any") @@ -288,12 +289,12 @@ func (r *SingleClusterReconciler) recoverIgnorablePods() reconcileResult { anyPodFailed = true if err := r.createOrUpdatePodServiceIfNeeded([]string{podList.Items[idx].Name}); err != nil { - return reconcileError(err) + return common.ReconcileError(err) } if err := r.Client.Delete(context.TODO(), &podList.Items[idx]); err != nil { r.Log.Error(err, "Failed to delete pod", "pod", podList.Items[idx].Name) - return reconcileError(err) + return common.ReconcileError(err) } r.Log.Info("Deleted pod", "pod", podList.Items[idx].Name) @@ -302,10 +303,10 @@ func (r *SingleClusterReconciler) recoverIgnorablePods() reconcileResult { if anyPodFailed { r.Log.Info("Found failed/pending pod(s), requeuing") - return reconcileRequeueAfter(0) + return common.ReconcileRequeueAfter(0) } - return reconcileSuccess() + return common.ReconcileSuccess() } func (r *SingleClusterReconciler) validateAndReconcileAccessControl( @@ -1020,5 +1021,5 @@ func (r *SingleClusterReconciler) AddAPIVersionLabel(ctx context.Context) error aeroCluster.Labels[asdbv1.AerospikeAPIVersionLabel] = asdbv1.AerospikeAPIVersion - return r.Client.Update(ctx, aeroCluster, updateOption) + return r.Client.Update(ctx, aeroCluster, common.UpdateOption) } diff --git a/controllers/restore/aerospikerestore_controller.go b/controllers/restore/aerospikerestore_controller.go new file mode 100644 index 000000000..6270b377e --- /dev/null +++ b/controllers/restore/aerospikerestore_controller.go @@ -0,0 +1,86 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// AerospikeRestoreReconciler reconciles a AerospikeRestore object +type AerospikeRestoreReconciler struct { + client.Client + Scheme *k8sRuntime.Scheme + Log logr.Logger +} + +//nolint:lll // for readability +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikerestores,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikerestores/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=asdb.aerospike.com,resources=aerospikerestores/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *AerospikeRestoreReconciler) Reconcile(_ context.Context, request ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("aerospikerestore", request.NamespacedName) + + log.Info("Reconciling AerospikeRestore") + + // Fetch the AerospikeRestore instance + aeroRestore := &asdbv1beta1.AerospikeRestore{} + if err := r.Client.Get(context.TODO(), request.NamespacedName, aeroRestore); err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after Reconcile request. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + cr := SingleRestoreReconciler{ + aeroRestore: aeroRestore, + Client: r.Client, + Log: log, + Scheme: r.Scheme, + } + + return cr.Reconcile() +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AerospikeRestoreReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&asdbv1beta1.AerospikeRestore{}). + WithOptions( + controller.Options{ + MaxConcurrentReconciles: common.MaxConcurrentReconciles, + }, + ). + WithEventFilter(predicate.GenerationChangedPredicate{}). + Complete(r) +} diff --git a/controllers/restore/reconciler.go b/controllers/restore/reconciler.go new file mode 100644 index 000000000..597a8f1a1 --- /dev/null +++ b/controllers/restore/reconciler.go @@ -0,0 +1,180 @@ +package restore + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/go-logr/logr" + k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + backup_service "github.com/aerospike/aerospike-kubernetes-operator/pkg/backup-service" +) + +// SingleRestoreReconciler reconciles a single AerospikeRestore +type SingleRestoreReconciler struct { + client.Client + Recorder record.EventRecorder + aeroRestore *asdbv1beta1.AerospikeRestore + KubeConfig *rest.Config + Scheme *k8sRuntime.Scheme + Log logr.Logger +} + +func (r *SingleRestoreReconciler) Reconcile() (result ctrl.Result, recErr error) { + if !r.aeroRestore.ObjectMeta.DeletionTimestamp.IsZero() { + // Stop reconciliation as the Aerospike restore is being deleted + return reconcile.Result{}, nil + } + + if err := r.setStatusPhase(asdbv1beta1.AerospikeRestoreInProgress); err != nil { + return ctrl.Result{}, err + } + + if res := r.reconcileRestore(); !res.IsSuccess { + if res.Err != nil { + return res.Result, res.Err + } + + return res.Result, nil + } + + if err := r.checkRestoreStatus(); err != nil { + return ctrl.Result{}, err + } + + if r.aeroRestore.Status.Phase == asdbv1beta1.AerospikeRestoreInProgress { + return ctrl.Result{RequeueAfter: r.aeroRestore.Spec.PollingPeriod.Duration}, nil + } + + return ctrl.Result{}, nil +} + +func (r *SingleRestoreReconciler) reconcileRestore() common.ReconcileResult { + if r.aeroRestore.Status.JobID != nil { + r.Log.Info("Restore already running, checking the restore status") + return common.ReconcileSuccess() + } + + serviceClient, err := backup_service.GetBackupServiceClient(r.Client, &r.aeroRestore.Spec.BackupService) + if err != nil { + return common.ReconcileError(err) + } + + var ( + jobID *int64 + statusCode *int + ) + + switch r.aeroRestore.Spec.Type { + case asdbv1beta1.Full: + jobID, statusCode, err = serviceClient.TriggerRestoreWithType(r.Log, string(asdbv1beta1.Full), + r.aeroRestore.Spec.Config.Raw) + + case asdbv1beta1.Incremental: + jobID, statusCode, err = serviceClient.TriggerRestoreWithType(r.Log, string(asdbv1beta1.Incremental), + r.aeroRestore.Spec.Config.Raw) + + case asdbv1beta1.Timestamp: + jobID, statusCode, err = serviceClient.TriggerRestoreWithType(r.Log, string(asdbv1beta1.Timestamp), + r.aeroRestore.Spec.Config.Raw) + + default: + return common.ReconcileError(fmt.Errorf("unsupported restore type")) + } + + if err != nil { + if statusCode != nil && *statusCode == http.StatusBadRequest { + r.Log.Error(err, fmt.Sprintf("Failed to trigger restore with status code %d", *statusCode)) + + r.aeroRestore.Status.Phase = asdbv1beta1.AerospikeRestoreFailed + + if err = r.Client.Status().Update(context.Background(), r.aeroRestore); err != nil { + r.Log.Error(err, fmt.Sprintf("Failed to update restore status to %+v", err)) + return common.ReconcileError(err) + } + + // Don't requeue if the error is due to bad request. + return common.ReconcileError(reconcile.TerminalError(err)) + } + + return common.ReconcileError(err) + } + + r.aeroRestore.Status.JobID = jobID + + if err = r.Client.Status().Update(context.Background(), r.aeroRestore); err != nil { + r.Log.Error(err, fmt.Sprintf("Failed to update restore status to %+v", err)) + return common.ReconcileError(err) + } + + return common.ReconcileRequeueAfter(1) +} + +func (r *SingleRestoreReconciler) checkRestoreStatus() error { + serviceClient, err := backup_service.GetBackupServiceClient(r.Client, &r.aeroRestore.Spec.BackupService) + if err != nil { + return err + } + + restoreStatus, err := serviceClient.CheckRestoreStatus(r.aeroRestore.Status.JobID) + if err != nil { + return err + } + + r.Log.Info(fmt.Sprintf("Restore status: %+v", restoreStatus)) + + if status, ok := restoreStatus["status"]; ok { + r.aeroRestore.Status.Phase = statusToPhase(status.(string)) + } + + statusBytes, err := json.Marshal(restoreStatus) + if err != nil { + return err + } + + r.aeroRestore.Status.RestoreResult.Raw = statusBytes + + if err = r.Client.Status().Update(context.Background(), r.aeroRestore); err != nil { + r.Log.Error(err, fmt.Sprintf("Failed to update restore status to %+v", err)) + return err + } + + return nil +} + +func (r *SingleRestoreReconciler) setStatusPhase(phase asdbv1beta1.AerospikeRestorePhase) error { + if r.aeroRestore.Status.Phase != phase { + r.aeroRestore.Status.Phase = phase + + if err := r.Client.Status().Update(context.Background(), r.aeroRestore); err != nil { + r.Log.Error(err, fmt.Sprintf("Failed to set restore status to %s", phase)) + return err + } + } + + return nil +} + +func statusToPhase(status string) asdbv1beta1.AerospikeRestorePhase { + switch status { + case "Done": + return asdbv1beta1.AerospikeRestoreCompleted + + case "Running": + return asdbv1beta1.AerospikeRestoreInProgress + + case "Failed": + return asdbv1beta1.AerospikeRestoreFailed + } + + return "" +} diff --git a/controllers/result.go b/controllers/result.go deleted file mode 100644 index d3ae7be88..000000000 --- a/controllers/result.go +++ /dev/null @@ -1,35 +0,0 @@ -package controllers - -import ( - "time" - - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type reconcileResult struct { - err error - result reconcile.Result - isSuccess bool -} - -func (r reconcileResult) getResult() (reconcile.Result, error) { - return r.result, r.err -} - -func reconcileSuccess() reconcileResult { - return reconcileResult{isSuccess: true, result: reconcile.Result{}} -} - -func reconcileRequeueAfter(secs int) reconcileResult { - t := time.Duration(secs) * time.Second - - return reconcileResult{ - result: reconcile.Result{ - Requeue: true, RequeueAfter: t, - }, - } -} - -func reconcileError(e error) reconcileResult { - return reconcileResult{result: reconcile.Result{}, err: e} -} diff --git a/controllers/service.go b/controllers/service.go index 9d12e583e..3c5f86b07 100644 --- a/controllers/service.go +++ b/controllers/service.go @@ -14,6 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" ) @@ -69,7 +70,7 @@ func (r *SingleClusterReconciler) createOrUpdateSTSHeadlessSvc() error { } if err = r.Client.Create( - context.TODO(), service, createOption, + context.TODO(), service, common.CreateOption, ); err != nil { return fmt.Errorf( "failed to create headless service for statefulset: %v", @@ -141,7 +142,7 @@ func (r *SingleClusterReconciler) createOrUpdateSTSLoadBalancerSvc() error { } if nErr := r.Client.Create( - context.TODO(), service, createOption, + context.TODO(), service, common.CreateOption, ); nErr != nil { return nErr } @@ -191,7 +192,7 @@ func (r *SingleClusterReconciler) updateLBService(service *corev1.Service, servi if updateLBService { if err := r.Client.Update( - context.TODO(), service, updateOption, + context.TODO(), service, common.UpdateOption, ); err != nil { return fmt.Errorf( "failed to update service %s: %v", service.Name, err, @@ -250,7 +251,7 @@ func (r *SingleClusterReconciler) createOrUpdatePodService(pName, pNamespace str } if err := r.Client.Create( - context.TODO(), service, createOption, + context.TODO(), service, common.CreateOption, ); err != nil { return fmt.Errorf( "failed to create new service for pod %s: %v", pName, err, @@ -315,7 +316,7 @@ func (r *SingleClusterReconciler) updateServicePorts(service *corev1.Service) er service.Spec.Ports = servicePorts if err := r.Client.Update( - context.TODO(), service, updateOption, + context.TODO(), service, common.UpdateOption, ); err != nil { return fmt.Errorf( "failed to update service %s: %v", service.Name, err, diff --git a/controllers/statefulset.go b/controllers/statefulset.go index 1528f725b..f213b6d74 100644 --- a/controllers/statefulset.go +++ b/controllers/statefulset.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" lib "github.com/aerospike/aerospike-management-lib" ) @@ -185,7 +186,7 @@ func (r *SingleClusterReconciler) createSTS( return nil, err } - if err := r.Client.Create(context.TODO(), st, createOption); err != nil { + if err := r.Client.Create(context.TODO(), st, common.CreateOption); err != nil { return nil, fmt.Errorf("failed to create new StatefulSet: %v", err) } @@ -432,7 +433,7 @@ func (r *SingleClusterReconciler) buildSTSConfigMap( } if err = r.Client.Create( - context.TODO(), confMap, createOption, + context.TODO(), confMap, common.CreateOption, ); err != nil { return fmt.Errorf( "failed to create new confMap for StatefulSet: %v", err, @@ -474,7 +475,7 @@ func (r *SingleClusterReconciler) buildSTSConfigMap( confMap.Data = configMapData if err := r.Client.Update( - context.TODO(), confMap, updateOption, + context.TODO(), confMap, common.UpdateOption, ); err != nil { return fmt.Errorf("failed to update ConfigMap for StatefulSet: %v", err) } @@ -504,7 +505,7 @@ func (r *SingleClusterReconciler) updateSTSConfigMap( } if err := r.Client.Update( - context.TODO(), confMap, updateOption, + context.TODO(), confMap, common.UpdateOption, ); err != nil { return fmt.Errorf("failed to update confMap for StatefulSet: %v", err) } @@ -660,7 +661,7 @@ func (r *SingleClusterReconciler) updateSTS( // Save the updated stateful set. found.Spec = statefulSet.Spec - return r.Client.Update(context.TODO(), found, updateOption) + return r.Client.Update(context.TODO(), found, common.UpdateOption) }); err != nil { return fmt.Errorf( "failed to update StatefulSet %s: %v", @@ -1197,7 +1198,7 @@ func (r *SingleClusterReconciler) updateAerospikeInitContainerImage(statefulSet statefulSet.Spec.Template.Spec.InitContainers[idx].Image = desiredImage - if err := r.Client.Update(context.TODO(), statefulSet, updateOption); err != nil { + if err := r.Client.Update(context.TODO(), statefulSet, common.UpdateOption); err != nil { return fmt.Errorf( "failed to update StatefulSet %s: %v", statefulSet.Name, diff --git a/go.mod b/go.mod index 8ec75a195..e8752f31a 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( ) require ( + github.com/abhishekdwivedi3060/aerospike-backup-service v0.0.0-20240709182036-38038c5c38c7 github.com/aerospike/aerospike-client-go/v7 v7.4.0 github.com/deckarep/golang-set/v2 v2.3.1 github.com/sirupsen/logrus v1.9.0 @@ -29,9 +30,11 @@ require ( golang.org/x/net v0.24.0 gomodules.xyz/jsonpatch/v2 v2.3.0 k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/yaml v1.3.0 ) require ( + github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -56,7 +59,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -64,10 +66,11 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/reugn/go-quartz v0.11.2 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect @@ -94,5 +97,4 @@ require ( k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 595aeb2b0..1c1019664 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/abhishekdwivedi3060/aerospike-backup-service v0.0.0-20240709182036-38038c5c38c7 h1:BP+LMehEXEsUE2HGT/Wn1ib0ZarBLHjjzg+x6XGIJ1s= +github.com/abhishekdwivedi3060/aerospike-backup-service v0.0.0-20240709182036-38038c5c38c7/go.mod h1:CMA+bHRLvL/Kj/aLlbu95iNnlPnvP67q62X81b5e2G4= github.com/aerospike/aerospike-client-go/v7 v7.4.0 h1:g8/7v8RHhQhTArhW3C7Au7o+u8j8x5eySZL6MXfpHKU= github.com/aerospike/aerospike-client-go/v7 v7.4.0/go.mod h1:pPKnWiS8VDJcH4IeB1b8SA2TWnkjcVLHwAAJ+BHfGK8= github.com/aerospike/aerospike-management-lib v1.4.0 h1:wT0l3kwzXv5DV5Cd+hD0BQq3hjSIyaPX1HaUb1304TI= @@ -6,6 +8,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -44,7 +48,6 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -84,8 +87,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -106,14 +107,16 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/reugn/go-quartz v0.11.2 h1:+jc54Ji06n/D/endEPmc+CuG/Jc8466nda1oxtFRrks= +github.com/reugn/go-quartz v0.11.2/go.mod h1:no4ktgYbAAuY0E1SchR8cTx1LF4jYIzdgaQhzRPSkpk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= @@ -180,7 +183,6 @@ golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml new file mode 100644 index 000000000..4bb228475 --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml @@ -0,0 +1,147 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: aerospikebackups.asdb.aerospike.com +spec: + group: asdb.aerospike.com + names: + kind: AerospikeBackup + listKind: AerospikeBackupList + plural: aerospikebackups + singular: aerospikebackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backupService.name + name: Backup Service Name + type: string + - jsonPath: .spec.backupService.namespace + name: Backup Service Namespace + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AerospikeBackup is the Schema for the aerospikebackup API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AerospikeBackupSpec defines the desired state of AerospikeBackup + for a given AerospikeCluster + properties: + backupService: + description: BackupService is the backup service reference i.e. name + and namespace. It is used to communicate to the backup service to + trigger backups. This field is immutable + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + description: 'Config is the free form configuration for the backup + in YAML format. This config is used to trigger backups. It includes: + aerospike-cluster, backup-routines.' + type: object + x-kubernetes-preserve-unknown-fields: true + onDemandBackups: + description: OnDemandBackups is the configuration for on-demand backups. + items: + properties: + delay: + description: Delay is the interval before starting the on-demand + backup. + type: string + id: + description: ID is the unique identifier for the on-demand backup. + minLength: 1 + type: string + routineName: + description: RoutineName is the routine name used to trigger + on-demand backup. + type: string + required: + - id + - routineName + type: object + maxItems: 1 + type: array + required: + - backupService + - config + type: object + status: + description: AerospikeBackupStatus defines the observed state of AerospikeBackup + properties: + backupService: + description: BackupService is the backup service reference i.e. name + and namespace. + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + description: 'Config is the configuration for the backup in YAML format. + This config is used to trigger backups. It includes: aerospike-cluster, + backup-routines.' + type: object + x-kubernetes-preserve-unknown-fields: true + onDemandBackups: + description: OnDemandBackups is the configuration for on-demand backups. + items: + properties: + delay: + description: Delay is the interval before starting the on-demand + backup. + type: string + id: + description: ID is the unique identifier for the on-demand backup. + minLength: 1 + type: string + routineName: + description: RoutineName is the routine name used to trigger + on-demand backup. + type: string + required: + - id + - routineName + type: object + type: array + required: + - backupService + - config + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml new file mode 100644 index 000000000..75dc3df7d --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml @@ -0,0 +1,324 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: aerospikebackupservices.asdb.aerospike.com +spec: + group: asdb.aerospike.com + names: + kind: AerospikeBackupService + listKind: AerospikeBackupServiceList + plural: aerospikebackupservices + singular: aerospikebackupservice + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.image + name: Image + type: string + - jsonPath: .spec.service.type + name: Service Type + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AerospikeBackupService is the Schema for the aerospikebackupservices + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AerospikeBackupServiceSpec defines the desired state of AerospikeBackupService + properties: + config: + description: 'Config is the free form configuration for the backup + service in YAML format. This config is used to start the backup + service. The config is passed as a file to the backup service. It + includes: service, backup-policies, storage, secret-agent.' + type: object + x-kubernetes-preserve-unknown-fields: true + image: + description: Image is the image for the backup service. + type: string + resources: + description: Resources defines the requests and limits for the backup + service container. Resources.Limits should be more than Resources.Requests. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + secrets: + description: SecretMounts is the list of secret to be mounted in the + backup service. + items: + description: SecretMount specifies the secret and its corresponding + volume mount options. + properties: + secretName: + description: SecretName is the name of the secret to be mounted. + type: string + volumeMount: + description: VolumeMount is the volume mount options for the + secret. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + required: + - secretName + - volumeMount + type: object + type: array + service: + description: Service defines the Kubernetes service configuration + for the backup service. It is used to expose the backup service + deployment. By default, the service type is ClusterIP. + properties: + type: + description: Type is the Kubernetes service type. + type: string + required: + - type + type: object + required: + - config + - image + type: object + status: + description: AerospikeBackupServiceStatus defines the observed state of + AerospikeBackupService + properties: + config: + description: 'Config is the free form configuration for the backup + service in YAML format. This config is used to start the backup + service. The config is passed as a file to the backup service. It + includes: service, backup-policies, storage, secret-agent.' + type: object + x-kubernetes-preserve-unknown-fields: true + contextPath: + description: ContextPath is the backup service API context path + type: string + image: + description: Image is the image for the backup service. + type: string + phase: + description: Phase denotes Backup service phase + enum: + - InProgress + - Completed + - Error + type: string + port: + description: Port is the listening port of backup service + format: int32 + type: integer + resources: + description: Resources defines the requests and limits for the backup + service container. Resources.Limits should be more than Resources.Requests. + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be set + for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + secrets: + description: SecretMounts is the list of secret to be mounted in the + backup service. + items: + description: SecretMount specifies the secret and its corresponding + volume mount options. + properties: + secretName: + description: SecretName is the name of the secret to be mounted. + type: string + volumeMount: + description: VolumeMount is the volume mount options for the + secret. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + required: + - secretName + - volumeMount + type: object + type: array + service: + description: Service defines the Kubernetes service configuration + for the backup service. It is used to expose the backup service + deployment. By default, the service type is ClusterIP. + properties: + type: + description: Type is the Kubernetes service type. + type: string + required: + - type + type: object + required: + - phase + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml new file mode 100644 index 000000000..759a11933 --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml @@ -0,0 +1,116 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.1 + name: aerospikerestores.asdb.aerospike.com +spec: + group: asdb.aerospike.com + names: + kind: AerospikeRestore + listKind: AerospikeRestoreList + plural: aerospikerestores + singular: aerospikerestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.backupService.name + name: Backup Service Name + type: string + - jsonPath: .spec.backupService.namespace + name: Backup Service Namespace + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: AerospikeRestore is the Schema for the aerospikerestores API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AerospikeRestoreSpec defines the desired state of AerospikeRestore + properties: + backupService: + description: BackupService is the backup service reference i.e. name + and namespace. It is used to communicate to the backup service to + trigger restores. This field is immutable + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + description: 'Config is the free form configuration for the restore + in YAML format. This config is used to trigger restores. It includes: + destination, policy, source, secret-agent, time and routine.' + type: object + x-kubernetes-preserve-unknown-fields: true + pollingPeriod: + description: PollingPeriod is the polling period for restore operation + status. It is used to poll the restore service to fetch restore + operation status. Default is 60 seconds. + type: string + type: + description: Type is the type of restore. It can of type Full, Incremental, + and Timestamp. Based on the restore type, the relevant restore config + should be given. + enum: + - Full + - Incremental + - Timestamp + type: string + required: + - backupService + - config + - type + type: object + status: + description: AerospikeRestoreStatus defines the observed state of AerospikeRestore + properties: + job-id: + description: JobID is the restore operation job id. + format: int64 + type: integer + phase: + description: Phase denotes the current phase of Aerospike restore + operation. + enum: + - InProgress + - Completed + - Failed + type: string + restoreResult: + description: RestoreResult is the result of the restore operation. + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - phase + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/main.go b/main.go index 83f4a9c3f..af7d3f1f4 100644 --- a/main.go +++ b/main.go @@ -20,14 +20,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" - // +kubebuilder:scaffold:imports - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + "github.com/aerospike/aerospike-management-lib/asconfig" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + // +kubebuilder:scaffold:imports asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" aerospikecluster "github.com/aerospike/aerospike-kubernetes-operator/controllers" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/backup" + backupservice "github.com/aerospike/aerospike-kubernetes-operator/controllers/backup-service" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/restore" "github.com/aerospike/aerospike-kubernetes-operator/pkg/configschema" - "github.com/aerospike/aerospike-management-lib/asconfig" ) var ( @@ -93,13 +95,14 @@ func main() { options.Cache.Namespaces = []string{watchNs} } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options) + kubeConfig := ctrl.GetConfigOrDie() + + mgr, err := ctrl.NewManager(kubeConfig, options) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } - kubeConfig := ctrl.GetConfigOrDie() kubeClient := kubernetes.NewForConfigOrDie(kubeConfig) // This client will read/write directly from api-server @@ -155,6 +158,49 @@ func main() { setupLog.Error(err, "unable to create webhook", "v1-webhook", "AerospikeCluster") os.Exit(1) } + + if err = (&backupservice.AerospikeBackupServiceReconciler{ + Client: client, + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("AerospikeBackupService"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AerospikeBackupService") + os.Exit(1) + } + + if err = (&asdbv1beta1.AerospikeBackupService{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AerospikeBackupService") + os.Exit(1) + } + + if err = (&backup.AerospikeBackupReconciler{ + Client: client, + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("AerospikeBackup"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AerospikeBackup") + os.Exit(1) + } + + if err = (&asdbv1beta1.AerospikeBackup{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AerospikeBackup") + os.Exit(1) + } + + if err = (&restore.AerospikeRestoreReconciler{ + Client: client, + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("AerospikeRestore"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AerospikeRestore") + os.Exit(1) + } + + if err = (&asdbv1beta1.AerospikeRestore{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AerospikeRestore") + os.Exit(1) + } + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { diff --git a/pkg/backup-service/client.go b/pkg/backup-service/client.go new file mode 100644 index 000000000..d8373edeb --- /dev/null +++ b/pkg/backup-service/client.go @@ -0,0 +1,725 @@ +//nolint:gosec // to ignore potential HTTP request made with variable url (gosec) +package backupservice + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + url2 "net/url" + "strings" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" +) + +const restAPIVersion = "v1" +const defaultContextPath = "/" + +type Client struct { + // The address to listen on. + Address string `json:"address,omitempty"` + + // ContextPath customizes path for the API endpoints. + ContextPath string `json:"context-path,omitempty"` + + // The port to listen on. + Port int32 `json:"port,omitempty"` +} + +func GetBackupServiceClient(k8sClient client.Client, svc *v1beta1.BackupService) (*Client, error) { + backupSvc := &v1beta1.AerospikeBackupService{} + + if err := k8sClient.Get(context.TODO(), + types.NamespacedName{ + Namespace: svc.Namespace, + Name: svc.Name, + }, backupSvc, + ); err != nil { + return nil, err + } + + return &Client{ + Address: fmt.Sprintf("%s.%s.svc", backupSvc.Name, backupSvc.Namespace), + Port: backupSvc.Status.Port, + ContextPath: backupSvc.Status.ContextPath, + }, nil +} + +func (c *Client) getAddress() string { + return c.Address +} + +func (c *Client) getPort() int32 { + return c.Port +} + +func (c *Client) getContextPath() string { + if c.ContextPath != "" { + return c.ContextPath + } + + return defaultContextPath +} + +func (c *Client) CheckBackupServiceHealth() error { + url := c.API("/health") + + resp, err := http.Get(url) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("backup service is not healthy") + } + + return nil +} + +func (c *Client) GetBackupServiceConfig() (map[string]interface{}, error) { + url := c.API("/config") + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get backup service config") + } + + conf := make(map[string]interface{}) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &conf); err != nil { + return nil, err + } + + return conf, nil +} + +func (c *Client) ApplyConfig() error { + url := c.API("/config/apply") + + resp, err := http.Get(url) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to apply latest config, error: %s", string(body)) + } + + return nil +} + +func (c *Client) GetClusters() (map[string]interface{}, error) { + url := c.API("/config/clusters") + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get aerospike clusters") + } + + aerospikeClusters := make(map[string]interface{}) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &aerospikeClusters); err != nil { + return nil, err + } + + return aerospikeClusters, nil +} + +func (c *Client) PutCluster(name, cluster interface{}) error { + url := c.API(fmt.Sprintf("/config/clusters/%s", name)) + + jsonBody, err := json.Marshal(cluster) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + req, err := http.NewRequest(http.MethodPut, url, bodyReader) + if err != nil { + return err + } + + cl := &http.Client{} + + resp, err := cl.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to put aerospike cluster, error: %s", string(body)) + } + + return nil +} + +func (c *Client) DeleteCluster(name string) error { + url := c.API(fmt.Sprintf("/config/clusters/%s", name)) + + req, err := http.NewRequest(http.MethodDelete, url, http.NoBody) + if err != nil { + return err + } + + cl := &http.Client{} + + resp, err := cl.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to delete aerospike cluster, error: %s", string(body)) + } + + return nil +} + +func (c *Client) AddCluster(name, cluster interface{}) error { + url := c.API(fmt.Sprintf("/config/clusters/%s", name)) + + jsonBody, err := json.Marshal(cluster) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + resp, err := http.Post(url, "application/json", bodyReader) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to update aerospike cluster, error: %s", string(body)) + } + + return nil +} + +func (c *Client) GetBackupPolicies() (map[string]interface{}, error) { + url := c.API("/config/policies") + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get backup policies") + } + + policies := make(map[string]interface{}) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &policies); err != nil { + return nil, err + } + + return policies, nil +} + +func (c *Client) PutBackupPolicy(name string, policy interface{}) error { + url := c.API(fmt.Sprintf("/config/policies/%s", name)) + + jsonBody, err := json.Marshal(policy) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + req, err := http.NewRequest(http.MethodPut, url, bodyReader) + if err != nil { + return err + } + + cl := &http.Client{} + + resp, err := cl.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to put backup policy, error: %s", string(body)) + } + + return nil +} + +func (c *Client) AddBackupPolicy(name string, policy interface{}) error { + url := c.API(fmt.Sprintf("/config/policies/%s", name)) + + jsonBody, err := json.Marshal(policy) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + resp, err := http.Post(url, "application/json", bodyReader) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to update backup policy, error: %s", string(body)) + } + + return nil +} + +func (c *Client) GetBackupRoutines() {} + +func (c *Client) PutBackupRoutine(name string, routine interface{}) error { + url := c.API(fmt.Sprintf("/config/routines/%s", name)) + + jsonBody, err := json.Marshal(routine) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + req, err := http.NewRequest(http.MethodPut, url, bodyReader) + if err != nil { + return err + } + + cl := &http.Client{} + + resp, err := cl.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to put backup routine, error: %s", string(body)) + } + + return nil +} + +func (c *Client) AddBackupRoutine(name string, routine interface{}) error { + url := c.API(fmt.Sprintf("/config/routines/%s", name)) + + jsonBody, err := json.Marshal(routine) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + resp, err := http.Post(url, "application/json", bodyReader) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to update backup routine, error: %s", string(body)) + } + + return nil +} + +func (c *Client) DeleteBackupRoutine(name string) error { + url := c.API(fmt.Sprintf("/config/routines/%s", name)) + + req, err := http.NewRequest(http.MethodDelete, url, http.NoBody) + if err != nil { + return err + } + + cl := &http.Client{} + + resp, err := cl.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to delete backup routine, error: %s", string(body)) + } + + return nil +} + +func (c *Client) GetStorage() (map[string]interface{}, error) { + url := c.API("/config/storage") + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get backup storage") + } + + storage := make(map[string]interface{}) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &storage); err != nil { + return nil, err + } + + return storage, nil +} + +func (c *Client) PutStorage(name string, storage interface{}) error { + url := c.API(fmt.Sprintf("/config/storage/%s", name)) + + jsonBody, err := json.Marshal(storage) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + req, err := http.NewRequest(http.MethodPut, url, bodyReader) + if err != nil { + return err + } + + cl := &http.Client{} + + resp, err := cl.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to put backup storage, error: %s", string(body)) + } + + return nil +} + +func (c *Client) AddStorage(name string, storage interface{}) error { + url := c.API(fmt.Sprintf("/config/storage/%s", name)) + + jsonBody, err := json.Marshal(storage) + if err != nil { + return err + } + + bodyReader := bytes.NewReader(jsonBody) + + resp, err := http.Post(url, "application/json", bodyReader) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated { + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + return fmt.Errorf("failed to update backup storage, error: %s", string(body)) + } + + return nil +} + +func (c *Client) GetFullBackups() (map[string][]interface{}, error) { + url := c.API("/backups/full") + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get backups") + } + + backups := make(map[string][]interface{}) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &backups); err != nil { + return nil, err + } + + return backups, nil +} + +func (c *Client) GetFullBackupsForRoutine(routineName string) ([]interface{}, error) { + url := c.API(fmt.Sprintf("/backups/full/%s", routineName)) + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to get backups") + } + + var backups []interface{} + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &backups); err != nil { + return nil, err + } + + return backups, nil +} + +func (c *Client) ScheduleBackup(routineName string, delay metav1.Duration) error { + url, err := url2.Parse(c.API(fmt.Sprintf("/backups/schedule/%s", routineName))) + if err != nil { + return err + } + + if delay.Duration.Milliseconds() > 0 { + query := url.Query() + query.Add("delay", fmt.Sprintf("%d", delay.Duration.Milliseconds())) + url.RawQuery = query.Encode() + } + + resp, err := http.Post(url.String(), "application/json", nil) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("failed to schedule backup") + } + + return nil +} + +func (c *Client) TriggerRestoreWithType(log logr.Logger, restoreType string, + request []byte) (jobID *int64, statusCode *int, err error) { + log.Info(fmt.Sprintf("Triggering %s restore", restoreType)) + + var url string + + switch restoreType { + case "Full": + url = c.API("/restore/full") + + case "Incremental": + url = c.API("/restore/incremental") + + case "Timestamp": + url = c.API("/restore/timestamp") + + default: + return nil, nil, fmt.Errorf("unsupported restore type") + } + + jsonBody, err := yaml.YAMLToJSON(request) + if err != nil { + return nil, nil, err + } + + bodyReader := bytes.NewReader(jsonBody) + + resp, err := http.Post(url, "application/json", bodyReader) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusAccepted { + log.Info("Response", "status-code", resp.StatusCode) + + body, rErr := io.ReadAll(resp.Body) + if rErr != nil { + return nil, &resp.StatusCode, rErr + } + + return nil, &resp.StatusCode, + fmt.Errorf("failed to trigger %s restore, error: %s", restoreType, string(body)) + } + + jobID = new(int64) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, &resp.StatusCode, err + } + + if err := json.Unmarshal(body, jobID); err != nil { + return nil, &resp.StatusCode, err + } + + log.Info(fmt.Sprintf("Triggered %s restore", restoreType)) + + return jobID, &resp.StatusCode, nil +} + +func (c *Client) CheckRestoreStatus(jobID *int64) (map[string]interface{}, error) { + url := c.API(fmt.Sprintf("/restore/status/%d", *jobID)) + + resp, err := http.Get(url) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to check restore restoreStatus") + } + + restoreStatus := make(map[string]interface{}) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(body, &restoreStatus); err != nil { + return nil, err + } + + return restoreStatus, nil +} + +func (c *Client) API(pattern string) string { + contextPath := c.getContextPath() + + if !strings.HasSuffix(contextPath, "/") { + contextPath += "/" + } + + address := fmt.Sprintf("%s:%d", c.getAddress(), c.getPort()) + + return fmt.Sprintf("http://%s%s%s%s", address, contextPath, restAPIVersion, pattern) +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 730361f22..bb5d46bb5 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/types" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" ) const ( @@ -142,6 +143,15 @@ func LabelsForPodAntiAffinity(clName string) map[string]string { return labels } +// LabelsForAerospikeBackupService returns the labels for selecting the resources +// belonging to the given AerospikeBackupService CR name. +func LabelsForAerospikeBackupService(clName string) map[string]string { + return map[string]string{ + asdbv1.AerospikeAppLabel: common.AerospikeBackupService, + asdbv1.AerospikeCustomResourceLabel: clName, + } +} + // MergeLabels merges operator an user defined labels func MergeLabels(operatorLabels, userLabels map[string]string) map[string]string { mergedMap := make(map[string]string, len(operatorLabels)+len(userLabels)) diff --git a/test/backup/backup_suite_test.go b/test/backup/backup_suite_test.go new file mode 100644 index 000000000..e53be0e8f --- /dev/null +++ b/test/backup/backup_suite_test.go @@ -0,0 +1,99 @@ +package backup + +import ( + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8Runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/test" + backupservice "github.com/aerospike/aerospike-kubernetes-operator/test/backup_service" + "github.com/aerospike/aerospike-kubernetes-operator/test/cluster" +) + +var testEnv *envtest.Environment + +var k8sClient client.Client + +var scheme = k8Runtime.NewScheme() + +func TestBackup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Backup Suite") +} + +var _ = BeforeSuite( + func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environment") + var err error + + testEnv, _, k8sClient, _, err = test.BootStrapTestEnv(scheme) + Expect(err).NotTo(HaveOccurred()) + + By("Deploy Backup Service") + backupService, err := backupservice.NewBackupService() + Expect(err).ToNot(HaveOccurred()) + + backupService.Spec.Service = &asdbv1beta1.Service{ + Type: corev1.ServiceTypeLoadBalancer, + } + + backupServiceName = backupService.Name + backupServiceNamespace = backupService.Namespace + + err = backupservice.DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + By("Deploy Aerospike Cluster") + cascadeDeleteTrue := true + aeroCluster := cluster.CreateDummyAerospikeCluster(aerospikeNsNm, 2) + aeroCluster.Spec.Storage.BlockVolumePolicy.InputCascadeDelete = &cascadeDeleteTrue + aeroCluster.Spec.Storage.FileSystemVolumePolicy.InputCascadeDelete = &cascadeDeleteTrue + + err = cluster.DeployCluster(k8sClient, testCtx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + }) + +var _ = AfterSuite( + func() { + By("Delete Aerospike Cluster") + aeroCluster := asdbv1.AerospikeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: aerospikeNsNm.Name, + Namespace: aerospikeNsNm.Namespace, + }, + } + + err := cluster.DeleteCluster(k8sClient, testCtx, &aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Delete Backup Service") + backupService := asdbv1beta1.AerospikeBackupService{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + } + + err = backupservice.DeleteBackupService(k8sClient, &backupService) + Expect(err).ToNot(HaveOccurred()) + + By("tearing down the test environment") + gexec.KillAndWait(5 * time.Second) + err = testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }, +) diff --git a/test/backup/backup_test.go b/test/backup/backup_test.go new file mode 100644 index 000000000..0d1d80ae8 --- /dev/null +++ b/test/backup/backup_test.go @@ -0,0 +1,528 @@ +package backup + +import ( + "encoding/json" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/yaml" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +var _ = Describe( + "Backup Service Test", func() { + + var ( + backup *asdbv1beta1.AerospikeBackup + err error + backupNsNm = types.NamespacedName{ + Namespace: namespace, + Name: "sample-backup", + } + ) + + AfterEach(func() { + Expect(DeleteBackup(k8sClient, backup)).ToNot(HaveOccurred()) + }) + + Context( + "When doing Invalid operations", func() { + It("Should fail when wrong format backup config is given", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + badConfig, gErr := getWrongBackupConfBytes(namePrefix(backupNsNm)) + Expect(gErr).ToNot(HaveOccurred()) + backup.Spec.Config.Raw = badConfig + + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when improper format name is used in config", func() { + config := getBackupConfigInMap("wrong-prefix") + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("name should start with %s", namePrefix(backupNsNm))) + }) + + It("Should fail when un-supported field is given in backup config", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines[namePrefix(backupNsNm)+"-"+"test-routine"].(map[string]interface{})["unknown"] = "unknown" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unknown field")) + }) + + It("Should fail when more than 1 cluster is given in backup config", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + aeroCluster := config[common.AerospikeClusterKey].(map[string]interface{}) + aeroCluster["cluster-two"] = aeroCluster["test-cluster"] + config[common.AerospikeClusterKey] = aeroCluster + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("only one aerospike cluster is allowed in backup config")) + }) + + It("Should fail when on-demand backup is given at the time of creation", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemandBackups = []asdbv1beta1.OnDemandBackupSpec{ + { + ID: "on-demand", + RoutineName: "test-routine", + }, + } + + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("onDemand backups config cannot be specified while creating backup")) + }) + + It("Should fail when non-existing routine is given in on-demand backup", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemandBackups = []asdbv1beta1.OnDemandBackupSpec{ + { + ID: "on-demand", + RoutineName: "non-existing-routine", + }, + } + + err = updateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("invalid onDemand config, backup routine non-existing-routine not found")) + }) + + It("Should fail when backup service is not present", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.BackupService.Name = "wrong-backup-service" + + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("not found")) + }) + + It("Should fail when backup service reference is updated", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.BackupService.Name = "updated-backup-service" + + err = updateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("backup service cannot be updated")) + }) + + It("Should fail when non-existing policy is referred in Backup routine", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines[namePrefix(backupNsNm)+"-"+"test-routine"].(map[string]interface{})["backup-policy"] = + "non-existing-policy" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when non-existing cluster is referred in Backup routine", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines[namePrefix(backupNsNm)+"-"+"test-routine"].(map[string]interface{})[common.SourceClusterKey] = + "non-existing-cluster" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when non-existing storage is referred in Backup routine", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines[namePrefix(backupNsNm)+"-"+"test-routine"].(map[string]interface{})["storage"] = + "non-existing-storage" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when empty backup config is given", func() { + backup = newBackupWithEmptyConfig(backupNsNm) + backup.Spec.Config.Raw = []byte("{}") + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when service field is given in backup config", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + config[common.ServiceKey] = map[string]interface{}{ + "http": map[string]interface{}{ + "port": 8081, + }, + } + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("service field cannot be specified in backup config")) + }) + + It("Should fail when backup-policies field is given in backup config", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + config[common.BackupPoliciesKey] = map[string]interface{}{ + "test-policy": map[string]interface{}{ + "parallel": 3, + "remove-files": "KeepAll", + "type": 1, + }, + } + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("backup-policies field cannot be specified in backup config")) + }) + + It("Should fail when storage field is given in backup config", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + config[common.StorageKey] = map[string]interface{}{ + "local": map[string]interface{}{ + "path": "/localStorage", + "type": "local", + }, + } + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("storage field cannot be specified in backup config")) + }) + + It("Should fail when secret-agent is given in backup config", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + config[common.SecretAgentsKey] = map[string]interface{}{ + "test-agent": map[string]interface{}{ + "address": "localhost", + "port": 4000, + }, + } + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("secret-agent field cannot be specified in backup config")) + }) + + It("Should fail when aerospike-cluster name is updated", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + // Change prefix to generate new names + prefix := namePrefix(backupNsNm) + "-1" + config := getBackupConfigInMap(prefix) + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup.Spec.Config.Raw = configBytes + + err = updateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("aerospike-cluster name cannot be updated")) + }) + + It("Should fail when on-demand backup is added along with backup-config update", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemandBackups = []asdbv1beta1.OnDemandBackupSpec{ + { + ID: "on-demand", + RoutineName: namePrefix(backupNsNm) + "-" + "test-routine", + }, + } + + // change storage to change overall backup config + config := getBackupConfigInMap(namePrefix(backupNsNm)) + backupRoutines := config[common.BackupRoutinesKey].(map[string]interface{}) + backupRoutines[namePrefix(backupNsNm)+"-"+"test-routine"].(map[string]interface{})[common.StorageKey] = + "s3Storage" + + config[common.BackupRoutinesKey] = backupRoutines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup.Spec.Config.Raw = configBytes + + err = updateBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring( + "can not add/update onDemand backup along with backup config change")) + }) + }, + ) + + Context("When doing Valid operations", func() { + It("Should trigger backup when correct backup config with local storage is given", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Should trigger backup when correct backup config with s3 storage is given", func() { + config := getBackupConfigInMap(namePrefix(backupNsNm)) + backupRoutines := config[common.BackupRoutinesKey].(map[string]interface{}) + backupRoutines[namePrefix(backupNsNm)+"-"+"test-routine"].(map[string]interface{})[common.StorageKey] = + "s3Storage" + + config[common.BackupRoutinesKey] = backupRoutines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should delete dangling routines from the Backup service configMap", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + By("Get Backup service configmap to update new dangling backup routine") + var cm corev1.ConfigMap + + err = k8sClient.Get(testCtx, + types.NamespacedName{Name: backupServiceName, Namespace: backupServiceNamespace}, + &cm) + Expect(err).ToNot(HaveOccurred()) + + // Add a routine to the configMap + data := cm.Data[common.BackupServiceConfigYAML] + backupSvcConfig := make(map[string]interface{}) + + err = yaml.Unmarshal([]byte(data), &backupSvcConfig) + Expect(err).ToNot(HaveOccurred()) + + backupRoutines := backupSvcConfig[common.BackupRoutinesKey].(map[string]interface{}) + // Add a new routine with a different name + newRoutineName := namePrefix(backupNsNm) + "-" + "test-routine1" + backupRoutines[newRoutineName] = + backupRoutines[namePrefix(backupNsNm)+"-"+"test-routine"] + + backupSvcConfig[common.BackupRoutinesKey] = backupRoutines + + newData, mErr := yaml.Marshal(backupSvcConfig) + Expect(mErr).ToNot(HaveOccurred()) + + cm.Data[common.BackupServiceConfigYAML] = string(newData) + + err = k8sClient.Update(testCtx, &cm) + Expect(err).ToNot(HaveOccurred()) + + By("Update backup CR to add on-demand backup") + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemandBackups = []asdbv1beta1.OnDemandBackupSpec{ + { + ID: "on-demand", + RoutineName: namePrefix(backupNsNm) + "-" + "test-routine", + }, + } + + err = updateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + By("Validate the routine is removed from the Backup service configMap") + err = k8sClient.Get(testCtx, + types.NamespacedName{Name: backupServiceName, Namespace: backupServiceNamespace}, + &cm) + Expect(err).ToNot(HaveOccurred()) + + data = cm.Data[common.BackupServiceConfigYAML] + backupSvcConfig = make(map[string]interface{}) + + err = yaml.Unmarshal([]byte(data), &backupSvcConfig) + Expect(err).ToNot(HaveOccurred()) + + backupRoutines = backupSvcConfig[common.BackupRoutinesKey].(map[string]interface{}) + _, ok := backupRoutines[namePrefix(backupNsNm)+"-"+"test-routine1"] + Expect(ok).To(BeFalse()) + }) + + It("Should trigger on-demand backup when given", func() { + backup, err = NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemandBackups = []asdbv1beta1.OnDemandBackupSpec{ + { + ID: "on-demand", + RoutineName: namePrefix(backupNsNm) + "-" + "test-routine", + }, + } + + err = updateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should unregister backup-routines when removed from backup CR", func() { + backupConfig := getBackupConfigInMap(namePrefix(backupNsNm)) + backupRoutines := backupConfig[common.BackupRoutinesKey].(map[string]interface{}) + backupRoutines[namePrefix(backupNsNm)+"-"+"test-routine1"] = map[string]interface{}{ + "backup-policy": "test-policy1", + "interval-cron": "@daily", + "incr-interval-cron": "@hourly", + "namespaces": []string{"test"}, + "source-cluster": namePrefix(backupNsNm) + "-" + "test-cluster", + "storage": "local", + } + + backupConfig[common.BackupRoutinesKey] = backupRoutines + + configBytes, err := json.Marshal(backupConfig) + Expect(err).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(backupNsNm, configBytes) + err = CreateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + By("Removing 1 backup-routine from backup CR") + backupConfig = getBackupConfigInMap(namePrefix(backupNsNm)) + + configBytes, err = json.Marshal(backupConfig) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.Config.Raw = configBytes + + err = updateBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + }) + + }) + }, +) diff --git a/test/backup/test_utils.go b/test/backup/test_utils.go new file mode 100644 index 000000000..79eead9cd --- /dev/null +++ b/test/backup/test_utils.go @@ -0,0 +1,392 @@ +package backup + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/abhishekdwivedi3060/aerospike-backup-service/pkg/model" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + backup_service "github.com/aerospike/aerospike-kubernetes-operator/pkg/backup-service" +) + +const ( + timeout = 2 * time.Minute + interval = 2 * time.Second + namespace = "test" +) + +var testCtx = context.TODO() + +var backupServiceName, backupServiceNamespace string + +var pkgLog = ctrl.Log.WithName("backup") + +var aerospikeNsNm = types.NamespacedName{ + Name: "aerocluster", + Namespace: namespace, +} + +func NewBackup(backupNsNm types.NamespacedName) (*asdbv1beta1.AerospikeBackup, error) { + configBytes, err := getBackupConfBytes(namePrefix(backupNsNm)) + if err != nil { + return nil, err + } + + backup := newBackupWithEmptyConfig(backupNsNm) + + backup.Spec.Config = runtime.RawExtension{ + Raw: configBytes, + } + + return backup, nil +} + +func newBackupWithConfig(backupNsNm types.NamespacedName, conf []byte) *asdbv1beta1.AerospikeBackup { + backup := newBackupWithEmptyConfig(backupNsNm) + + backup.Spec.Config = runtime.RawExtension{ + Raw: conf, + } + + return backup +} + +func newBackupWithEmptyConfig(backupNsNm types.NamespacedName) *asdbv1beta1.AerospikeBackup { + return &asdbv1beta1.AerospikeBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupNsNm.Name, + Namespace: backupNsNm.Namespace, + }, + Spec: asdbv1beta1.AerospikeBackupSpec{ + BackupService: asdbv1beta1.BackupService{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + }, + } +} + +func getBackupConfBytes(prefix string) ([]byte, error) { + backupConfig := getBackupConfigInMap(prefix) + + configBytes, err := json.Marshal(backupConfig) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +func getBackupConfigInMap(prefix string) map[string]interface{} { + return map[string]interface{}{ + common.AerospikeClusterKey: map[string]interface{}{ + fmt.Sprintf("%s-%s", prefix, "test-cluster"): map[string]interface{}{ + "credentials": map[string]interface{}{ + "password": "admin123", + "user": "admin", + }, + "seed-nodes": []map[string]interface{}{ + { + "host-name": fmt.Sprintf("%s.%s.svc.cluster.local", + aerospikeNsNm.Name, aerospikeNsNm.Namespace, + ), + "port": 3000, + }, + }, + }, + }, + common.BackupRoutinesKey: map[string]interface{}{ + fmt.Sprintf("%s-%s", prefix, "test-routine"): map[string]interface{}{ + "backup-policy": "test-policy", + "interval-cron": "@daily", + "incr-interval-cron": "@hourly", + "namespaces": []string{"test"}, + "source-cluster": fmt.Sprintf("%s-%s", prefix, "test-cluster"), + "storage": "local", + }, + }, + } +} + +func getWrongBackupConfBytes(prefix string) ([]byte, error) { + backupConfig := getBackupConfigInMap(prefix) + + // change the format from map to list + backupConfig[common.BackupRoutinesKey] = []interface{}{ + backupConfig[common.BackupRoutinesKey], + } + + configBytes, err := json.Marshal(backupConfig) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +func getBackupObj(cl client.Client, name, namespace string) (*asdbv1beta1.AerospikeBackup, error) { + var backup asdbv1beta1.AerospikeBackup + + if err := cl.Get(testCtx, types.NamespacedName{Name: name, Namespace: namespace}, &backup); err != nil { + return nil, err + } + + return &backup, nil +} + +func CreateBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup) error { + if err := cl.Create(testCtx, backup); err != nil { + return err + } + + return waitForBackup(cl, backup, timeout) +} + +func updateBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup) error { + if err := cl.Update(testCtx, backup); err != nil { + return err + } + + return waitForBackup(cl, backup, timeout) +} + +func DeleteBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup) error { + if err := cl.Delete(testCtx, backup); err != nil && !k8serrors.IsNotFound(err) { + return err + } + + // Wait for the finalizer to be removed + for { + _, err := getBackupObj(cl, backup.Name, backup.Namespace) + + if err != nil { + if k8serrors.IsNotFound(err) { + break + } + + return err + } + + time.Sleep(1 * time.Second) + } + + return nil +} + +func waitForBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup, + timeout time.Duration) error { + namespaceName := types.NamespacedName{ + Name: backup.Name, Namespace: backup.Namespace, + } + + return wait.PollUntilContextTimeout( + testCtx, 1*time.Second, + timeout, true, func(ctx context.Context) (bool, error) { + if err := cl.Get(ctx, namespaceName, backup); err != nil { + return false, nil + } + + status := asdbv1beta1.AerospikeBackupStatus{} + status.BackupService = backup.Spec.BackupService + status.Config = backup.Spec.Config + status.OnDemandBackups = backup.Spec.OnDemandBackups + + if !reflect.DeepEqual(status, backup.Status) { + pkgLog.Info("Backup status not updated yet") + return false, nil + } + return true, nil + }) +} + +// validateTriggeredBackup validates if the backup is triggered by checking the current config of backup-service +func validateTriggeredBackup(k8sClient client.Client, backup *asdbv1beta1.AerospikeBackup) error { + var backupK8sService corev1.Service + + validateNewEntries := func(currentConfigInMap map[string]interface{}, desiredConfigInMap map[string]interface{}, + fieldPath string) error { + newCluster := desiredConfigInMap[common.AerospikeClusterKey].(map[string]interface{}) + + for clusterName := range newCluster { + if _, ok := currentConfigInMap[common.AerospikeClustersKey].(map[string]interface{})[clusterName]; !ok { + return fmt.Errorf("cluster %s not found in %s backup config", clusterName, fieldPath) + } + } + + pkgLog.Info(fmt.Sprintf("Cluster info is found in %s backup config", fieldPath)) + + routines := desiredConfigInMap[common.BackupRoutinesKey].(map[string]interface{}) + + for routineName := range routines { + if _, ok := currentConfigInMap[common.BackupRoutinesKey].(map[string]interface{})[routineName]; !ok { + return fmt.Errorf("routine %s not found in %s backup config", routineName, fieldPath) + } + } + + if len(routines) != len(currentConfigInMap[common.BackupRoutinesKey].(map[string]interface{})) { + return fmt.Errorf("backup routine count mismatch in %s backup config", fieldPath) + } + + pkgLog.Info(fmt.Sprintf("Backup routines info is found in %s backup config", fieldPath)) + + return nil + } + + // Validate from backup service configmap + var configmap corev1.ConfigMap + if err := k8sClient.Get(testCtx, + types.NamespacedName{ + Name: backup.Spec.BackupService.Name, + Namespace: backup.Spec.BackupService.Namespace, + }, &configmap, + ); err != nil { + return err + } + + backupSvcConfig := make(map[string]interface{}) + + if err := yaml.Unmarshal([]byte(configmap.Data[common.BackupServiceConfigYAML]), &backupSvcConfig); err != nil { + return err + } + + desiredConfigInMap := make(map[string]interface{}) + + if err := yaml.Unmarshal(backup.Spec.Config.Raw, &desiredConfigInMap); err != nil { + return err + } + + if err := validateNewEntries(backupSvcConfig, desiredConfigInMap, "configMap"); err != nil { + return err + } + + // Wait for Service LB IP to be populated + if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, + func(ctx context.Context) (bool, error) { + if err := k8sClient.Get(testCtx, + types.NamespacedName{ + Name: backup.Spec.BackupService.Name, + Namespace: backup.Spec.BackupService.Namespace, + }, + &backupK8sService); err != nil { + return false, err + } + + if backupK8sService.Status.LoadBalancer.Ingress == nil { + return false, nil + } + + return true, nil + }); err != nil { + return err + } + + serviceClient := backup_service.Client{ + Address: backupK8sService.Status.LoadBalancer.Ingress[0].IP, + Port: 8081, + } + + // Wait for Backup service to be ready + if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, + func(ctx context.Context) (bool, error) { + config, err := serviceClient.GetBackupServiceConfig() + if err != nil { + pkgLog.Error(err, "Failed to get backup service config") + return false, nil + } + + backupSvcConfig = config + return true, nil + }); err != nil { + return err + } + + return validateNewEntries(backupSvcConfig, desiredConfigInMap, "backup-service API") +} + +func namePrefix(nsNm types.NamespacedName) string { + return nsNm.Namespace + "-" + nsNm.Name +} + +func GetBackupDataPaths(k8sClient client.Client, backup *asdbv1beta1.AerospikeBackup) ([]string, error) { + var backupK8sService corev1.Service + + // Wait for Service LB IP to be populated + if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, + func(ctx context.Context) (bool, error) { + if err := k8sClient.Get(testCtx, + types.NamespacedName{ + Name: backup.Spec.BackupService.Name, + Namespace: backup.Spec.BackupService.Namespace, + }, + &backupK8sService, + ); err != nil { + return false, err + } + + if backupK8sService.Status.LoadBalancer.Ingress == nil { + return false, nil + } + + return true, nil + }); err != nil { + return nil, err + } + + var ( + config model.Config + backupDataPaths []string + ) + + if err := yaml.Unmarshal(backup.Spec.Config.Raw, &config); err != nil { + return backupDataPaths, err + } + + serviceClient := backup_service.Client{ + Address: backupK8sService.Status.LoadBalancer.Ingress[0].IP, + Port: 8081, + } + + if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, + func(ctx context.Context) (bool, error) { + for routineName := range config.BackupRoutines { + backups, err := serviceClient.GetFullBackupsForRoutine(routineName) + if err != nil { + return false, nil + } + + if len(backups) == 0 { + pkgLog.Info("No backups found for routine", "name", routineName) + return false, nil + } + + for idx := range backups { + backupMeta := backups[idx].(map[string]interface{}) + backupDataPaths = append(backupDataPaths, backupMeta["key"].(string)) + } + } + + return true, nil + }); err != nil { + return backupDataPaths, err + } + + return backupDataPaths, nil +} diff --git a/test/backup_service/backup_service_suite_test.go b/test/backup_service/backup_service_suite_test.go new file mode 100644 index 000000000..3d8ff23c4 --- /dev/null +++ b/test/backup_service/backup_service_suite_test.go @@ -0,0 +1,49 @@ +package backupservice + +import ( + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + k8Runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/aerospike/aerospike-kubernetes-operator/test" +) + +var testEnv *envtest.Environment + +var k8sClient client.Client + +var scheme = k8Runtime.NewScheme() + +func TestBackupService(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "BackupService Suite") +} + +var _ = BeforeSuite( + func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environment") + + var err error + testEnv, _, k8sClient, _, err = test.BootStrapTestEnv(scheme) + Expect(err).NotTo(HaveOccurred()) + }) + +var _ = AfterSuite( + func() { + + By("tearing down the test environment") + gexec.KillAndWait(5 * time.Second) + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }, +) diff --git a/test/backup_service/backup_service_test.go b/test/backup_service/backup_service_test.go new file mode 100644 index 000000000..59623a625 --- /dev/null +++ b/test/backup_service/backup_service_test.go @@ -0,0 +1,235 @@ +package backupservice + +import ( + "encoding/json" + "net/http" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +var _ = Describe( + "Backup Service Test", func() { + var ( + backupService *asdbv1beta1.AerospikeBackupService + err error + ) + + AfterEach(func() { + Expect(DeleteBackupService(k8sClient, backupService)).ToNot(HaveOccurred()) + }) + + Context( + "When doing Invalid operations", func() { + It("Should fail when wrong format backup service config is given", func() { + badConfig, gErr := getWrongBackupServiceConfBytes() + Expect(gErr).ToNot(HaveOccurred()) + backupService = newBackupServiceWithConfig(badConfig) + + err = DeployBackupService(k8sClient, backupService) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when un-supported field is given in backup service config", func() { + configMap := getBackupServiceConfMap() + configMap["unknown"] = "unknown" + + configBytes, mErr := json.Marshal(configMap) + Expect(mErr).ToNot(HaveOccurred()) + + backupService = newBackupServiceWithConfig(configBytes) + + err = DeployBackupService(k8sClient, backupService) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unknown field")) + }) + + It("Should fail when wrong image is given", func() { + backupService, err = NewBackupService() + Expect(err).ToNot(HaveOccurred()) + + backupService.Spec.Image = "wrong-image" + + err = deployBackupServiceWithTO(k8sClient, backupService, 1*time.Minute) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when aerospike-clusters field is given", func() { + configMap := getBackupServiceConfMap() + configMap[common.AerospikeClustersKey] = map[string]interface{}{ + "test-cluster": map[string]interface{}{ + "credentials": map[string]interface{}{ + "password": "admin123", + "user": "admin", + }, + "seed-nodes": []map[string]interface{}{ + { + "host-name": "aerocluster.aerospike.svc.cluster.local", + "port": 3000, + }, + }, + }, + } + + configBytes, mErr := json.Marshal(configMap) + Expect(mErr).ToNot(HaveOccurred()) + + backupService = newBackupServiceWithConfig(configBytes) + + err = deployBackupServiceWithTO(k8sClient, backupService, 1*time.Minute) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring( + "aerospike-clusters field cannot be specified in backup service config")) + }) + + It("Should fail when backup-routines field is given", func() { + configMap := getBackupServiceConfMap() + configMap[common.BackupRoutinesKey] = map[string]interface{}{ + "test-routine": map[string]interface{}{ + "backup-policy": "test-policy", + "interval-cron": "@daily", + "incr-interval-cron": "@hourly", + "namespaces": []string{"test"}, + "source-cluster": "test-cluster", + "storage": "local", + }, + } + + configBytes, mErr := json.Marshal(configMap) + Expect(mErr).ToNot(HaveOccurred()) + + backupService = newBackupServiceWithConfig(configBytes) + + err = deployBackupServiceWithTO(k8sClient, backupService, 1*time.Minute) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring("backup-routines field cannot be specified in backup service config")) + }) + }, + ) + + Context("When doing Valid operations", func() { + It("Should deploy backup service components when correct backup config is given", func() { + backupService, err = NewBackupService() + Expect(err).ToNot(HaveOccurred()) + err = DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should restart backup service deployment pod when config is changed", func() { + backupService, err = NewBackupService() + Expect(err).ToNot(HaveOccurred()) + err = DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + podList, gErr := getBackupServicePodList(k8sClient, backupService) + Expect(gErr).ToNot(HaveOccurred()) + Expect(len(podList.Items)).To(Equal(1)) + + PodUID := podList.Items[0].ObjectMeta.UID + + // Get backup service object + backupService, err = getBackupServiceObj(k8sClient, name, namespace) + Expect(err).ToNot(HaveOccurred()) + + // Change config + backupService.Spec.Config.Raw = []byte(`{"service":{"http":{"port":8080}}}`) + err = updateBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + podList, err = getBackupServicePodList(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + Expect(len(podList.Items)).To(Equal(1)) + + Expect(podList.Items[0].ObjectMeta.UID).ToNot(Equal(PodUID)) + }) + + It("Should restart backup service deployment pod when pod spec is changed", func() { + backupService, err = NewBackupService() + Expect(err).ToNot(HaveOccurred()) + err = DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + podList, gErr := getBackupServicePodList(k8sClient, backupService) + Expect(gErr).ToNot(HaveOccurred()) + Expect(len(podList.Items)).To(Equal(1)) + + PodUID := podList.Items[0].ObjectMeta.UID + + // Get backup service object + backupService, err = getBackupServiceObj(k8sClient, name, namespace) + Expect(err).ToNot(HaveOccurred()) + + // Change Pod spec + backupService.Spec.Resources = &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0.5"), + }, + } + + err = updateBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + podList, err = getBackupServicePodList(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + Expect(len(podList.Items)).To(Equal(1)) + + Expect(podList.Items[0].ObjectMeta.UID).ToNot(Equal(PodUID)) + }) + + It("Should change K8s service type when service type is changed in CR", func() { + backupService, err = NewBackupService() + Expect(err).ToNot(HaveOccurred()) + err := DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + svc, err := getBackupK8sServiceObj(k8sClient, name, namespace) + Expect(err).ToNot(HaveOccurred()) + Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeClusterIP)) + + // Get backup service object + backupService, err = getBackupServiceObj(k8sClient, name, namespace) + Expect(err).ToNot(HaveOccurred()) + + // Change service type + backupService.Spec.Service = &asdbv1beta1.Service{Type: corev1.ServiceTypeLoadBalancer} + + err = updateBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + svc, err = getBackupK8sServiceObj(k8sClient, name, namespace) + Expect(err).ToNot(HaveOccurred()) + Expect(svc.Spec.Type).To(Equal(corev1.ServiceTypeLoadBalancer)) + + Eventually(func() bool { + svc, err = getBackupK8sServiceObj(k8sClient, name, namespace) + if err != nil { + return false + } + return svc.Status.LoadBalancer.Ingress != nil + }, timeout, interval).Should(BeTrue()) + + // Check backup service health using LB IP + Eventually(func() bool { + resp, err := http.Get("http://" + svc.Status.LoadBalancer.Ingress[0].IP + ":8081/health") + if err != nil { + pkgLog.Error(err, "Failed to get health") + return false + } + + defer resp.Body.Close() + + return resp.StatusCode == http.StatusOK + }, timeout, interval).Should(BeTrue()) + + }) + + }) + }, +) diff --git a/test/backup_service/test_utils.go b/test/backup_service/test_utils.go new file mode 100644 index 000000000..b0da52ee1 --- /dev/null +++ b/test/backup_service/test_utils.go @@ -0,0 +1,299 @@ +package backupservice + +import ( + "context" + "encoding/json" + "fmt" + "time" + + app "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" +) + +const BackupServiceImage = "aerospike/aerospike-backup-service:1.0.0" + +const ( + timeout = 2 * time.Minute + interval = 2 * time.Second + name = "backup-service" + namespace = "test" +) + +var testCtx = context.TODO() + +var pkgLog = ctrl.Log.WithName("backupservice") + +func NewBackupService() (*asdbv1beta1.AerospikeBackupService, error) { + configBytes, err := getBackupServiceConfBytes() + if err != nil { + return nil, err + } + + backupService := newBackupServiceWithEmptyConfig() + backupService.Spec.Config = runtime.RawExtension{ + Raw: configBytes, + } + + return backupService, nil +} + +func newBackupServiceWithConfig(config []byte) *asdbv1beta1.AerospikeBackupService { + backupService := newBackupServiceWithEmptyConfig() + backupService.Spec.Config = runtime.RawExtension{ + Raw: config, + } + + return backupService +} + +func newBackupServiceWithEmptyConfig() *asdbv1beta1.AerospikeBackupService { + return &asdbv1beta1.AerospikeBackupService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: asdbv1beta1.AerospikeBackupServiceSpec{ + Image: BackupServiceImage, + SecretMounts: []asdbv1beta1.SecretMount{ + { + SecretName: test.AWSSecretName, + VolumeMount: corev1.VolumeMount{ + Name: test.AWSSecretName, + MountPath: "/root/.aws/credentials", + SubPath: "credentials", + }, + }, + }, + }, + } +} + +func getBackupServiceObj(cl client.Client, name, namespace string) (*asdbv1beta1.AerospikeBackupService, + error) { + var backupService asdbv1beta1.AerospikeBackupService + + if err := cl.Get(testCtx, types.NamespacedName{Name: name, Namespace: namespace}, &backupService); err != nil { + return nil, err + } + + return &backupService, nil +} + +func getBackupK8sServiceObj(cl client.Client, name, namespace string) (*corev1.Service, error) { + var svc corev1.Service + + if err := cl.Get(testCtx, types.NamespacedName{Name: name, Namespace: namespace}, &svc); err != nil { + return nil, err + } + + return &svc, nil +} +func DeployBackupService(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService) error { + if err := cl.Create(testCtx, backupService); err != nil { + return err + } + + return waitForBackupService(cl, backupService, timeout) +} + +func deployBackupServiceWithTO(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService, + timeout time.Duration) error { + if err := cl.Create(testCtx, backupService); err != nil { + return err + } + + return waitForBackupService(cl, backupService, timeout) +} + +func updateBackupService(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService) error { + if err := cl.Update(testCtx, backupService); err != nil { + return err + } + + return waitForBackupService(cl, backupService, timeout) +} + +func waitForBackupService(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService, + timeout time.Duration) error { + namespaceName := types.NamespacedName{ + Name: backupService.Name, Namespace: backupService.Namespace, + } + + if err := wait.PollUntilContextTimeout( + testCtx, 1*time.Second, + timeout, true, func(ctx context.Context) (bool, error) { + if err := cl.Get(ctx, namespaceName, backupService); err != nil { + return false, nil + } + + if backupService.Status.Phase != asdbv1beta1.AerospikeBackupServiceCompleted { + pkgLog.Info(fmt.Sprintf("BackupService is in %s phase", backupService.Status.Phase)) + return false, nil + } + + podList, err := getBackupServicePodList(cl, backupService) + if err != nil { + return false, nil + } + + if len(podList.Items) != 1 { + return false, nil + } + + return true, nil + }); err != nil { + return err + } + + var cm corev1.ConfigMap + + if err := cl.Get(testCtx, namespaceName, &cm); err != nil { + return err + } + + pkgLog.Info("ConfigMap is present") + + var deploy app.Deployment + + if err := cl.Get(testCtx, namespaceName, &deploy); err != nil { + return err + } + + pkgLog.Info("Deployment is present") + + var svc corev1.Service + + if err := cl.Get(testCtx, namespaceName, &svc); err != nil { + return err + } + + pkgLog.Info("Service is present") + + return nil +} + +func getBackupServiceConfBytes() ([]byte, error) { + config := getBackupServiceConfMap() + + configBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +func getWrongBackupServiceConfBytes() ([]byte, error) { + config := getBackupServiceConfMap() + + tempList := make([]interface{}, 0, len(config[common.BackupPoliciesKey].(map[string]interface{}))) + + for _, policy := range config[common.BackupPoliciesKey].(map[string]interface{}) { + tempList = append(tempList, policy) + } + + // change the format from map to list + config[common.BackupPoliciesKey] = tempList + + configBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +func getBackupServiceConfMap() map[string]interface{} { + return map[string]interface{}{ + common.ServiceKey: map[string]interface{}{ + "http": map[string]interface{}{ + "port": 8081, + }, + }, + common.BackupPoliciesKey: map[string]interface{}{ + "test-policy": map[string]interface{}{ + "parallel": 3, + "remove-files": "KeepAll", + }, + "test-policy1": map[string]interface{}{ + "parallel": 3, + "remove-files": "KeepAll", + }, + }, + common.StorageKey: map[string]interface{}{ + "local": map[string]interface{}{ + "path": "/localStorage", + "type": "local", + }, + "s3Storage": map[string]interface{}{ + "type": "aws-s3", + "path": "s3://aerospike-kubernetes-operator-test", + "s3-region": "us-east-1", + "s3-profile": "default", + }, + }, + } +} + +func getBackupServicePodList(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService) (*corev1.PodList, + error) { + var podList corev1.PodList + + labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeBackupService(backupService.Name)) + listOps := &client.ListOptions{ + Namespace: backupService.Namespace, LabelSelector: labelSelector, + } + + if err := cl.List(context.TODO(), &podList, listOps); err != nil { + return nil, err + } + + return &podList, nil +} + +func DeleteBackupService( + k8sClient client.Client, + backService *asdbv1beta1.AerospikeBackupService, +) error { + deletePolicy := metav1.DeletePropagationForeground + + // Add Delete propagation policy to delete the dependent resources first + if err := k8sClient.Delete(testCtx, backService, + &client.DeleteOptions{PropagationPolicy: &deletePolicy}); err != nil && !k8serrors.IsNotFound(err) { + return err + } + + // Wait for all the dependent resources to be garbage collected by k8s + for { + _, err := getBackupServiceObj(k8sClient, backService.Name, backService.Namespace) + + if err != nil { + if k8serrors.IsNotFound(err) { + break + } + + return err + } + + time.Sleep(1 * time.Second) + } + + return nil +} diff --git a/test/cleanup-test-namespace.sh b/test/cleanup-test-namespace.sh index b7c8324d1..ac964992c 100755 --- a/test/cleanup-test-namespace.sh +++ b/test/cleanup-test-namespace.sh @@ -7,6 +7,9 @@ # ################################################ +echo "Cleaning up s3 bucket contents s3://aerospike-kubernetes-operator-test" +aws s3 rm s3://aerospike-kubernetes-operator-test --recursive + namespaces="test test1 test2 aerospike" for namespace in $namespaces; do @@ -14,6 +17,15 @@ for namespace in $namespaces; do echo "Removing Aerospike clusters from namespace: $namespace" kubectl -n "$namespace" delete aerospikecluster --all + echo "Removing Aerospike restore from namespace: $namespace" + kubectl -n "$namespace" delete aerospikerestore --all + + echo "Removing Aerospike backup from namespace: $namespace" + kubectl -n "$namespace" delete aerospikebackup --all + + echo "Removing Aerospike backup service from namespace: $namespace" + kubectl -n "$namespace" delete aerospikebackupservice --all + # Force delete pods kubectl -n "$namespace" delete pod --selector 'app=aerospike-cluster' --grace-period=0 --force --ignore-not-found @@ -27,6 +39,7 @@ for namespace in $namespaces; do echo "Removing serviceaccount from namespace: $namespace" kubectl -n "$namespace" delete serviceaccount aerospike-operator-controller-manager --ignore-not-found + kubectl -n "$namespace" delete serviceaccount aerospike-backup-service --ignore-not-found done @@ -43,6 +56,9 @@ kubectl delete clusterserviceversion -n $OPERATOR_NS $(kubectl get clusterservic kubectl delete job $(kubectl get job -o=jsonpath='{.items[?(@.status.succeeded==1)].metadata.name}' -n $OPERATOR_NS) -n $OPERATOR_NS --ignore-not-found kubectl delete CatalogSource $(kubectl get CatalogSource -n $OPERATOR_NS | grep aerospike-kubernetes-operator | cut -f 1 -d ' ') --ignore-not-found kubectl delete crd aerospikeclusters.asdb.aerospike.com --ignore-not-found +kubectl delete crd aerospikerestores.asdb.aerospike.com --ignore-not-found +kubectl delete crd aerospikebackups.asdb.aerospike.com --ignore-not-found +kubectl delete crd aerospikebackupservices.asdb.aerospike.com --ignore-not-found # Delete webhook configurations. Web hooks from older versions linger around and intercept requests. kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $(kubectl get mutatingwebhookconfigurations.admissionregistration.k8s.io | grep aerospike | cut -f 1 -d " ") diff --git a/test/access_control_test.go b/test/cluster/access_control_test.go similarity index 98% rename from test/access_control_test.go rename to test/cluster/access_control_test.go index 449c838f1..64ebee231 100644 --- a/test/access_control_test.go +++ b/test/cluster/access_control_test.go @@ -1,6 +1,6 @@ //go:build !noac -package test +package cluster import ( goctx "context" @@ -21,6 +21,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" aerospikecluster "github.com/aerospike/aerospike-kubernetes-operator/controllers" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) const ( @@ -1244,7 +1245,7 @@ var _ = Describe( { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1367,7 +1368,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ // Missing required user admin role. "sys-admin", @@ -1376,7 +1377,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1385,7 +1386,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1453,7 +1454,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ // Missing required user admin role. "sys-admin", @@ -1462,7 +1463,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1471,7 +1472,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1673,7 +1674,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "sys-admin", "user-admin", @@ -1682,7 +1683,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "data-admin", "read-write-udf", @@ -1759,7 +1760,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1768,7 +1769,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1777,7 +1778,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1825,7 +1826,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "sys-admin", "user-admin", @@ -1834,7 +1835,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "data-admin", "read-write-udf", @@ -1921,7 +1922,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1930,7 +1931,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1939,7 +1940,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1997,7 +1998,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -2006,7 +2007,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -2015,7 +2016,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -2075,7 +2076,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -2084,7 +2085,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -2093,7 +2094,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -2205,7 +2206,7 @@ var _ = Describe( // Get default password from secret. secretNamespcedName := types.NamespacedName{ - Name: aerospikeSecretName, + Name: test.AerospikeSecretName, Namespace: aeroCluster.Namespace, } passFileName := "password.conf" @@ -2238,7 +2239,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) // Set correct secret name for admin user credentials. - aeroCluster.Spec.AerospikeAccessControl.Users[0].SecretName = authSecretName + aeroCluster.Spec.AerospikeAccessControl.Users[0].SecretName = test.AuthSecretName err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) diff --git a/test/aero_info.go b/test/cluster/aero_info.go similarity index 62% rename from test/aero_info.go rename to test/cluster/aero_info.go index 9269eb1bd..a0bf81b75 100644 --- a/test/aero_info.go +++ b/test/cluster/aero_info.go @@ -1,4 +1,4 @@ -package test +package cluster // Aerospike client and info testing utilities. // @@ -6,50 +6,20 @@ package test import ( goctx "context" "fmt" - "strings" "time" "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/deployment" "github.com/aerospike/aerospike-management-lib/info" ) -type CloudProvider int - -const ( - CloudProviderUnknown CloudProvider = iota - CloudProviderAWS - CloudProviderGCP -) - -func getServiceForPod( - pod *corev1.Pod, k8sClient client.Client, -) (*corev1.Service, error) { - service := &corev1.Service{} - err := k8sClient.Get( - goctx.TODO(), - types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, service, - ) - - if err != nil { - return nil, fmt.Errorf( - "failed to get service for pod %s: %v", pod.Name, err, - ) - } - - return service, nil -} - func newAsConn( _ logr.Logger, aeroCluster *asdbv1.AerospikeCluster, pod *corev1.Pod, k8sClient client.Client, @@ -245,133 +215,6 @@ func newHostConn( return deployment.NewHostConn(log, host, asConn), nil } -func getPodList( - aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, -) (*corev1.PodList, error) { - podList := &corev1.PodList{} - labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) - listOps := &client.ListOptions{ - Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, - } - - if err := k8sClient.List(goctx.TODO(), podList, listOps); err != nil { - return nil, err - } - - return podList, nil -} - -func getSTSList( - aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, -) (*appsv1.StatefulSetList, error) { - stsList := &appsv1.StatefulSetList{} - labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) - listOps := &client.ListOptions{ - Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, - } - - if err := k8sClient.List(goctx.TODO(), stsList, listOps); err != nil { - return nil, err - } - - return stsList, nil -} - -func getNodeList(ctx goctx.Context, k8sClient client.Client) ( - *corev1.NodeList, error, -) { - nodeList := &corev1.NodeList{} - if err := k8sClient.List(ctx, nodeList); err != nil { - return nil, err - } - - return nodeList, nil -} - -func getZones(ctx goctx.Context, k8sClient client.Client) ([]string, error) { - unqZones := map[string]int{} - - nodes, err := getNodeList(ctx, k8sClient) - if err != nil { - return nil, err - } - - for idx := range nodes.Items { - unqZones[nodes.Items[idx].Labels[zoneKey]] = 1 - } - - zones := make([]string, 0, len(unqZones)) - - for zone := range unqZones { - zones = append(zones, zone) - } - - return zones, nil -} - -func getRegion(ctx goctx.Context, k8sClient client.Client) (string, error) { - nodes, err := getNodeList(ctx, k8sClient) - if err != nil { - return "", err - } - - if len(nodes.Items) == 0 { - return "", fmt.Errorf("node list empty: %v", nodes.Items) - } - - return nodes.Items[0].Labels[regionKey], nil -} - -func getCloudProvider( - ctx goctx.Context, k8sClient client.Client, -) (CloudProvider, error) { - labelKeys := map[string]struct{}{} - - nodes, err := getNodeList(ctx, k8sClient) - if err != nil { - return CloudProviderUnknown, err - } - - for idx := range nodes.Items { - for labelKey := range nodes.Items[idx].Labels { - if strings.Contains(labelKey, "cloud.google.com") { - return CloudProviderGCP, nil - } - - if strings.Contains(labelKey, "eks.amazonaws.com") { - return CloudProviderAWS, nil - } - - labelKeys[labelKey] = struct{}{} - } - - provider := determineByProviderID(&nodes.Items[idx]) - if provider != CloudProviderUnknown { - return provider, nil - } - } - - labelKeysSlice := make([]string, 0, len(labelKeys)) - - for labelKey := range labelKeys { - labelKeysSlice = append(labelKeysSlice, labelKey) - } - - return CloudProviderUnknown, fmt.Errorf( - "can't determin cloud platform by node's labels: %v", labelKeysSlice, - ) -} - -func determineByProviderID(node *corev1.Node) CloudProvider { - if strings.Contains(node.Spec.ProviderID, "gce") { - return CloudProviderGCP - } else if strings.Contains(node.Spec.ProviderID, "aws") { - return CloudProviderAWS - } - // TODO add cloud provider detection for Azure - return CloudProviderUnknown -} - func newAllHostConn( log logr.Logger, aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, @@ -399,23 +242,6 @@ func newAllHostConn( return hostConns, nil } -func getAeroClusterPVCList( - aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, -) ([]corev1.PersistentVolumeClaim, error) { - // List the pvc for this aeroCluster's statefulset - pvcList := &corev1.PersistentVolumeClaimList{} - labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) - listOps := &client.ListOptions{ - Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, - } - - if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { - return nil, err - } - - return pvcList.Items, nil -} - func getAsConfig(asinfo *info.AsInfo, cmd string) (lib.Stats, error) { var ( confs lib.Stats diff --git a/test/batch_restart_pods_test.go b/test/cluster/batch_restart_pods_test.go similarity index 99% rename from test/batch_restart_pods_test.go rename to test/cluster/batch_restart_pods_test.go index 6e05d9cab..e5c1dc87e 100644 --- a/test/batch_restart_pods_test.go +++ b/test/cluster/batch_restart_pods_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/batch_scaledown_pods_test.go b/test/cluster/batch_scaledown_pods_test.go similarity index 99% rename from test/batch_scaledown_pods_test.go rename to test/cluster/batch_scaledown_pods_test.go index c216ef48a..13a33c870 100644 --- a/test/batch_scaledown_pods_test.go +++ b/test/cluster/batch_scaledown_pods_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster_helper.go b/test/cluster/cluster_helper.go similarity index 95% rename from test/cluster_helper.go rename to test/cluster/cluster_helper.go index 22a9a64c0..8911c0752 100644 --- a/test/cluster_helper.go +++ b/test/cluster/cluster_helper.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -20,11 +20,13 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" internalerrors "github.com/aerospike/aerospike-kubernetes-operator/errors" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/info" ) @@ -42,6 +44,19 @@ const ( latestSchemaVersion = "7.1.0" ) +var ( + storageClass = "ssd" + namespace = "test" + pkgLog = ctrl.Log.WithName("cluster") +) + +const aerospikeConfigSecret string = "aerospike-config-secret" //nolint:gosec // for testing + +const serviceTLSPort = 4333 +const serviceNonTLSPort = 3000 + +var aerospikeVolumeInitMethodDeleteFiles = asdbv1.AerospikeVolumeMethodDeleteFiles + var ( retryInterval = time.Second * 5 cascadeDeleteFalse = false @@ -70,7 +85,7 @@ func rollingRestartClusterByEnablingTLS( aeroCluster.Spec.OperatorClientCertSpec = &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -722,7 +737,6 @@ func deleteCluster( // Wait for all removed PVCs to be terminated. for { newPVCList, err := getAeroClusterPVCList(aeroCluster, k8sClient) - if err != nil { return fmt.Errorf("error getting PVCs: %v", err) } @@ -746,6 +760,15 @@ func deleteCluster( return nil } +// DeleteCluster is the public variant of deleteCluster +// Remove this when deleteCluster will be made public +func DeleteCluster( + k8sClient client.Client, ctx goctx.Context, + aeroCluster *asdbv1.AerospikeCluster, +) error { + return deleteCluster(k8sClient, ctx, aeroCluster) +} + func deployCluster( k8sClient client.Client, ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster, @@ -756,6 +779,15 @@ func deployCluster( ) } +// DeployCluster is the public variant of deployCluster +// Remove this when deployCluster will be made public +func DeployCluster( + k8sClient client.Client, ctx goctx.Context, + aeroCluster *asdbv1.AerospikeCluster, +) error { + return deployCluster(k8sClient, ctx, aeroCluster) +} + func deployClusterWithTO( k8sClient client.Client, ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster, @@ -854,7 +886,7 @@ func createAerospikeClusterPost460( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -869,7 +901,7 @@ func createAerospikeClusterPost460( OperatorClientCertSpec: &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -917,7 +949,7 @@ func createAerospikeClusterPost560( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -932,7 +964,7 @@ func createAerospikeClusterPost560( OperatorClientCertSpec: &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -1040,7 +1072,7 @@ func createDummyAerospikeClusterWithRFAndStorage( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1108,7 +1140,7 @@ func createDummyAerospikeCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1144,6 +1176,14 @@ func createDummyAerospikeCluster( return aeroCluster } +// CreateDummyAerospikeCluster func is a public variant of createDummyAerospikeCluster +// Remove this when createDummyAerospikeCluster will be made public +func CreateDummyAerospikeCluster( + clusterNamespacedName types.NamespacedName, size int32, +) *asdbv1.AerospikeCluster { + return createDummyAerospikeCluster(clusterNamespacedName, size) +} + func UpdateClusterImage( aerocluster *asdbv1.AerospikeCluster, image string, ) error { @@ -1301,7 +1341,7 @@ func createBasicTLSCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1317,7 +1357,7 @@ func createBasicTLSCluster( OperatorClientCertSpec: &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -1540,7 +1580,7 @@ func getStorageVolumeForSecret() asdbv1.VolumeSpec { Name: aerospikeConfigSecret, Source: asdbv1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, }, }, Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ @@ -1639,3 +1679,20 @@ func getNonRootPodSpec() asdbv1.AerospikePodSpec { }, } } + +func getAeroClusterPVCList( + aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, +) ([]corev1.PersistentVolumeClaim, error) { + // List the pvc for this aeroCluster's statefulset + pvcList := &corev1.PersistentVolumeClaimList{} + labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) + listOps := &client.ListOptions{ + Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, + } + + if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { + return nil, err + } + + return pvcList.Items, nil +} diff --git a/test/cluster_resource_test.go b/test/cluster/cluster_resource_test.go similarity index 99% rename from test/cluster_resource_test.go rename to test/cluster/cluster_resource_test.go index 9b4ab8699..32cbad0ee 100644 --- a/test/cluster_resource_test.go +++ b/test/cluster/cluster_resource_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster_storage_cleanup_test.go b/test/cluster/cluster_storage_cleanup_test.go similarity index 99% rename from test/cluster_storage_cleanup_test.go rename to test/cluster/cluster_storage_cleanup_test.go index 480d4eb33..23e5ee5d7 100644 --- a/test/cluster_storage_cleanup_test.go +++ b/test/cluster/cluster_storage_cleanup_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster_test.go b/test/cluster/cluster_test.go similarity index 99% rename from test/cluster_test.go rename to test/cluster/cluster_test.go index fb88bd186..dfc16fcad 100644 --- a/test/cluster_test.go +++ b/test/cluster/cluster_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -15,6 +15,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) var _ = Describe( @@ -892,10 +893,10 @@ func UpdateTLSClusterTest(ctx goctx.Context) { network["tls"] = tlsList aeroCluster.Spec.AerospikeConfig.Value["network"] = network secretVolume := asdbv1.VolumeSpec{ - Name: tlsCacertSecretName, + Name: test.TLSCacertSecretName, Source: asdbv1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, }, }, Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ @@ -906,7 +907,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { operatorClientCertSpec := getOperatorCert() operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.CaCertsFilename = "" cacertPath := &asdbv1.CaCertsSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, } operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.CaCertsSource = cacertPath aeroCluster.Spec.OperatorClientCertSpec = operatorClientCertSpec @@ -1362,7 +1363,7 @@ func UpdateClusterTest(ctx goctx.Context) { aeroCluster.Spec.OperatorClientCertSpec = &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", diff --git a/test/dynamic_config_test.go b/test/cluster/dynamic_config_test.go similarity index 99% rename from test/dynamic_config_test.go rename to test/cluster/dynamic_config_test.go index becbf0756..e318c9fb2 100644 --- a/test/dynamic_config_test.go +++ b/test/cluster/dynamic_config_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -20,6 +20,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" "github.com/aerospike/aerospike-kubernetes-operator/pkg/configschema" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/asconfig" "github.com/aerospike/aerospike-management-lib/info" @@ -492,7 +493,7 @@ var _ = Describe( admin2 := asdbv1.AerospikeUserSpec{ Name: "admin2", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -643,7 +644,7 @@ func getPodIDs(ctx context.Context, aeroCluster *asdbv1.AerospikeCluster) (map[s } stdout, _, execErr := utils.Exec( - utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientset, + utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientSet, cfg, ) diff --git a/test/host_network_test.go b/test/cluster/host_network_test.go similarity index 98% rename from test/host_network_test.go rename to test/cluster/host_network_test.go index 136c74a1c..40b9b5721 100644 --- a/test/host_network_test.go +++ b/test/cluster/host_network_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( "bufio" @@ -97,7 +97,7 @@ func checkAdvertisedAddress( // intraClusterAdvertisesNodeIp indicates if the pod advertises k8s node IP. func intraClusterAdvertisesNodeIP(ctx goctx.Context, pod *corev1.Pod) bool { podNodeIP := pod.Status.HostIP - logs := getPodLogs(k8sClientset, ctx, pod) + logs := getPodLogs(k8sClientSet, ctx, pod) scanner := bufio.NewScanner(strings.NewReader(logs)) hbAdvertisesNodeID := false fabricAdvertisesNodeID := false diff --git a/test/k8snode_block_list_test.go b/test/cluster/k8snode_block_list_test.go similarity index 99% rename from test/k8snode_block_list_test.go rename to test/cluster/k8snode_block_list_test.go index a298944e8..5c3975d6e 100644 --- a/test/k8snode_block_list_test.go +++ b/test/cluster/k8snode_block_list_test.go @@ -1,16 +1,15 @@ -package test +package cluster import ( "context" "fmt" - "k8s.io/utils/ptr" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" ) diff --git a/test/large_reconcile_test.go b/test/cluster/large_reconcile_test.go similarity index 99% rename from test/large_reconcile_test.go rename to test/cluster/large_reconcile_test.go index 470b41a55..a3e834b24 100644 --- a/test/large_reconcile_test.go +++ b/test/cluster/large_reconcile_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -7,7 +7,6 @@ import ( "strconv" "time" - as "github.com/aerospike/aerospike-client-go/v7" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -15,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" + as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" ) diff --git a/test/ldap_auth_test.go b/test/cluster/ldap_auth_test.go similarity index 97% rename from test/ldap_auth_test.go rename to test/cluster/ldap_auth_test.go index fbeacc410..5f49adfc5 100644 --- a/test/ldap_auth_test.go +++ b/test/cluster/ldap_auth_test.go @@ -2,7 +2,7 @@ // Tests Aerospike ldap external authentication. -package test +package cluster import ( goctx "context" @@ -16,6 +16,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) var _ = Describe( @@ -112,7 +113,7 @@ func getAerospikeClusterSpecWithLDAP( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/multicluster_test.go b/test/cluster/multicluster_test.go similarity index 96% rename from test/multicluster_test.go rename to test/cluster/multicluster_test.go index 8728f837a..d3ea3ca54 100644 --- a/test/multicluster_test.go +++ b/test/cluster/multicluster_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -6,6 +6,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" + + "github.com/aerospike/aerospike-kubernetes-operator/test" ) var _ = Describe( @@ -17,13 +19,13 @@ var _ = Describe( // 1st cluster clusterName1 := "multicluster" clusterNamespacedName1 := getNamespacedName( - clusterName1, multiClusterNs1, + clusterName1, test.MultiClusterNs1, ) // 2nd cluster clusterName2 := "multicluster" clusterNamespacedName2 := getNamespacedName( - clusterName2, multiClusterNs2, + clusterName2, test.MultiClusterNs2, ) Context( @@ -48,13 +50,13 @@ var _ = Describe( // 1st cluster clusterName1 := "multicluster1" clusterNamespacedName1 := getNamespacedName( - clusterName1, multiClusterNs1, + clusterName1, test.MultiClusterNs1, ) // 2nd cluster clusterName2 := "multicluster2" clusterNamespacedName2 := getNamespacedName( - clusterName2, multiClusterNs1, + clusterName2, test.MultiClusterNs1, ) Context( diff --git a/test/network_policy_test.go b/test/cluster/network_policy_test.go similarity index 99% rename from test/network_policy_test.go rename to test/cluster/network_policy_test.go index 3a1ba41bc..fd72ba5b7 100644 --- a/test/network_policy_test.go +++ b/test/cluster/network_policy_test.go @@ -2,7 +2,7 @@ // Tests Aerospike network policy settings. -package test +package cluster import ( goctx "context" @@ -23,6 +23,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" aerospikecluster "github.com/aerospike/aerospike-kubernetes-operator/controllers" + "github.com/aerospike/aerospike-kubernetes-operator/test" "github.com/aerospike/aerospike-management-lib/deployment" ) @@ -274,7 +275,7 @@ func negativeDeployNetworkPolicyTest(ctx goctx.Context, multiPodPerHost, enableT Context( "Negative cases for configuredIP", func() { - clusterNamespacedName := getNamespacedName("np-configured-ip", multiClusterNs1) + clusterNamespacedName := getNamespacedName("np-configured-ip", test.MultiClusterNs1) BeforeEach( func() { @@ -649,7 +650,7 @@ func doTestNetworkPolicy( It( "DefaultNetworkPolicy", func() { clusterNamespacedName := getNamespacedName( - "np-default", multiClusterNs1, + "np-default", test.MultiClusterNs1, ) // Ensures that default network policy is applied. @@ -670,7 +671,7 @@ func doTestNetworkPolicy( It( "PodAndExternal", func() { clusterNamespacedName := getNamespacedName( - "np-pod-external", multiClusterNs1, + "np-pod-external", test.MultiClusterNs1, ) // Ensures that default network policy is applied. @@ -697,7 +698,7 @@ func doTestNetworkPolicy( if multiPodPerHost { It("OnlyPodNetwork: should create cluster without nodePort service", func() { clusterNamespacedName := getNamespacedName( - "pod-network-cluster", multiClusterNs1) + "pod-network-cluster", test.MultiClusterNs1) networkPolicy := asdbv1.AerospikeNetworkPolicy{ AccessType: asdbv1.AerospikeNetworkTypePod, @@ -767,7 +768,7 @@ func doTestNetworkPolicy( Context( "When using configuredIP", func() { - clusterNamespacedName := getNamespacedName("np-configured-ip", multiClusterNs1) + clusterNamespacedName := getNamespacedName("np-configured-ip", test.MultiClusterNs1) BeforeEach( func() { err := deleteNodeLabels(ctx, []string{labelAccessAddress, labelAlternateAccessAddress}) @@ -858,7 +859,7 @@ func doTestNetworkPolicy( // Test cases with NetworkAttachmentDefinition of different namespaces can't be tested with current mocking. Context("customInterface", func() { clusterNamespacedName := getNamespacedName( - "np-custom-interface", multiClusterNs1, + "np-custom-interface", test.MultiClusterNs1, ) // Skip this test when multiPodPerHost is true and enabledTLS true because Network Policy contains all @@ -1338,7 +1339,7 @@ func getAerospikeClusterSpecWithNetworkPolicy( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/on_demand_operations_test.go b/test/cluster/on_demand_operations_test.go similarity index 99% rename from test/on_demand_operations_test.go rename to test/cluster/on_demand_operations_test.go index b3a5644f4..10619d7a9 100644 --- a/test/on_demand_operations_test.go +++ b/test/cluster/on_demand_operations_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/poddisruptionbudget_test.go b/test/cluster/poddisruptionbudget_test.go similarity index 99% rename from test/poddisruptionbudget_test.go rename to test/cluster/poddisruptionbudget_test.go index c6c16e009..788c9acba 100644 --- a/test/poddisruptionbudget_test.go +++ b/test/cluster/poddisruptionbudget_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( "context" diff --git a/test/podspec_test.go b/test/cluster/podspec_test.go similarity index 99% rename from test/podspec_test.go rename to test/cluster/podspec_test.go index 90a019e28..10a687d15 100644 --- a/test/podspec_test.go +++ b/test/cluster/podspec_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/rack_enabled_cluster_test.go b/test/cluster/rack_enabled_cluster_test.go similarity index 99% rename from test/rack_enabled_cluster_test.go rename to test/cluster/rack_enabled_cluster_test.go index bf3cb656d..13bdd4bdd 100644 --- a/test/rack_enabled_cluster_test.go +++ b/test/cluster/rack_enabled_cluster_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/rack_management_test.go b/test/cluster/rack_management_test.go similarity index 99% rename from test/rack_management_test.go rename to test/cluster/rack_management_test.go index 5d36c0dfc..add28302e 100644 --- a/test/rack_management_test.go +++ b/test/cluster/rack_management_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/rack_utils.go b/test/cluster/rack_utils.go similarity index 98% rename from test/rack_utils.go rename to test/cluster/rack_utils.go index 098dde03f..21f3bce9c 100644 --- a/test/rack_utils.go +++ b/test/cluster/rack_utils.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -428,6 +428,15 @@ func getConfiguredRackStateList(aeroCluster *asdbv1.AerospikeCluster) []RackStat return rackStateList } +func getRackID(pod *corev1.Pod) (int, error) { + rack, ok := pod.ObjectMeta.Labels["aerospike.com/rack-id"] + if !ok { + return 0, nil + } + + return strconv.Atoi(rack) +} + // TODO: Update this func splitRacks(nodeCount, rackCount int) []int { nodesPerRack, extraNodes := nodeCount/rackCount, nodeCount%rackCount @@ -446,13 +455,6 @@ func splitRacks(nodeCount, rackCount int) []int { return topology } -func getNamespacedName(name, namespace string) types.NamespacedName { - return types.NamespacedName{ - Name: name, - Namespace: namespace, - } -} - func getRackPodList( k8sClient client.Client, ctx goctx.Context, found *appsv1.StatefulSet, ) (*corev1.PodList, error) { diff --git a/test/sample_files_test.go b/test/cluster/sample_files_test.go similarity index 86% rename from test/sample_files_test.go rename to test/cluster/sample_files_test.go index d7d7827d9..6e7183404 100644 --- a/test/sample_files_test.go +++ b/test/cluster/sample_files_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( "context" @@ -12,6 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/yaml" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" @@ -117,7 +118,7 @@ func getSamplesFiles() ([]string, error) { err error ) - // getGitRepoRootPath is called here explicitly to get projectRoot at this point + // getGitRepoRootPath is called here explicitly to get ProjectRoot at this point // This may be empty if getSamplesFiles is called during var initialization phase if projectRoot == "" { projectRoot, err = getGitRepoRootPath() @@ -128,6 +129,20 @@ func getSamplesFiles() ([]string, error) { absolutePath := filepath.Join(projectRoot, fileDir) + // Files/Dirs ignored are: + // 1. PMEM sample file as hardware is not available + // 2. XDR related files as they are separately tested + // 3. All files which are not CR samples + // 4. BackupService, Backup and Restore related files + ignoreFiles := sets.New[string]( + "pmem_cluster_cr.yaml", + "xdr_dst_cluster_cr.yaml", + "xdr_src_cluster_cr.yaml", + "aerospikebackup.yaml", + "aerospikebackupservice.yaml", + "aerospikerestore.yaml", + ) + if err := filepath.Walk(absolutePath, func(path string, info fs.FileInfo, err error) error { if err != nil { return err @@ -138,12 +153,10 @@ func getSamplesFiles() ([]string, error) { return nil } - // Files/Dirs ignored are: - // 1. PMEM sample file as hardware is not available - // 2. XDR related files as they are separately tested - // 3. All files which are not CR samples - if strings.Contains(path, "pmem_cluster_cr.yaml") || strings.Contains(path, "xdr_") || - !strings.HasSuffix(path, "_cr.yaml") { + parts := strings.Split(path, "/") + file := parts[len(parts)-1] + + if ignoreFiles.Has(file) || !strings.HasSuffix(path, "_cr.yaml") { return nil } diff --git a/test/security_context_test.go b/test/cluster/security_context_test.go similarity index 99% rename from test/security_context_test.go rename to test/cluster/security_context_test.go index 9944e6de9..66471bb75 100644 --- a/test/security_context_test.go +++ b/test/cluster/security_context_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/services_test.go b/test/cluster/services_test.go similarity index 99% rename from test/services_test.go rename to test/cluster/services_test.go index 8c80d1329..01f0faecb 100644 --- a/test/services_test.go +++ b/test/cluster/services_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/statefulset_storage_test.go b/test/cluster/statefulset_storage_test.go similarity index 99% rename from test/statefulset_storage_test.go rename to test/cluster/statefulset_storage_test.go index d5f8bcf7f..ff39890dc 100644 --- a/test/statefulset_storage_test.go +++ b/test/cluster/statefulset_storage_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -395,7 +395,7 @@ func getSTSFromRackID(aeroCluster *asdbv1.AerospikeCluster, rackID int) ( found := &appsv1.StatefulSet{} err := k8sClient.Get( goctx.TODO(), - getNamespacedNameForSTS(aeroCluster, rackID), + GetNamespacedNameForSTS(aeroCluster, rackID), found, ) @@ -435,7 +435,7 @@ func validateExternalVolumeInContainer(sts *appsv1.StatefulSet, index int, isIni return false, nil } -func getNamespacedNameForSTS( +func GetNamespacedNameForSTS( aeroCluster *asdbv1.AerospikeCluster, rackID int, ) types.NamespacedName { return types.NamespacedName{ diff --git a/test/storage_init_test.go b/test/cluster/storage_init_test.go similarity index 99% rename from test/storage_init_test.go rename to test/cluster/storage_init_test.go index fbff805ac..77a391c8f 100644 --- a/test/storage_init_test.go +++ b/test/cluster/storage_init_test.go @@ -1,4 +1,4 @@ -package test +package cluster // Tests storage initialization works as expected. // If specified devices should be initialized only on first use. @@ -21,6 +21,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" "github.com/aerospike/aerospike-kubernetes-operator/pkg/jsonpatch" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" lib "github.com/aerospike/aerospike-management-lib" ) @@ -709,7 +710,7 @@ func writeDataToVolumeBlock( magicBytes, path, ), } - _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) if err != nil { return fmt.Errorf("error creating file %v", err) @@ -726,7 +727,7 @@ func writeDataToVolumeFileSystem( cmd := []string{ "bash", "-c", fmt.Sprintf("echo %s > %s/magic.txt", magicBytes, path), } - _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) if err != nil { return fmt.Errorf("error creating file %v", err) @@ -741,7 +742,7 @@ func hasDataBlock(pod *corev1.Pod, volume *asdbv1.VolumeSpec) bool { cmd := []string{ "bash", "-c", fmt.Sprintf("dd if=%s count=1 status=none", path), } - stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) return strings.HasPrefix(stdout, magicBytes) } @@ -750,7 +751,7 @@ func hasDataFilesystem(pod *corev1.Pod, volume *asdbv1.VolumeSpec) bool { cName, path := getContainerNameAndPath(volume) cmd := []string{"bash", "-c", fmt.Sprintf("cat %s/magic.txt", path)} - stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) return strings.HasPrefix(stdout, magicBytes) } @@ -780,7 +781,7 @@ func getStorageInitAerospikeCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/storage_test.go b/test/cluster/storage_test.go similarity index 99% rename from test/storage_test.go rename to test/cluster/storage_test.go index 26baae72e..e268d8d74 100644 --- a/test/storage_test.go +++ b/test/cluster/storage_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/types" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) // * Test @@ -560,7 +561,7 @@ var _ = Describe( volumes := aeroCluster.Spec.Storage.Volumes aeroCluster.Spec.Storage.Volumes[len(volumes)-1].Source = asdbv1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: authSecretName, + SecretName: test.AuthSecretName, }, } diff --git a/test/storage_wipe_test.go b/test/cluster/storage_wipe_test.go similarity index 99% rename from test/storage_wipe_test.go rename to test/cluster/storage_wipe_test.go index 9067a904d..7aae50c43 100644 --- a/test/storage_wipe_test.go +++ b/test/cluster/storage_wipe_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -15,6 +15,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) const ( @@ -580,7 +581,7 @@ func getStorageWipeAerospikeCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/strong_consistency_test.go b/test/cluster/strong_consistency_test.go similarity index 99% rename from test/strong_consistency_test.go rename to test/cluster/strong_consistency_test.go index f88373360..020431fa4 100644 --- a/test/strong_consistency_test.go +++ b/test/cluster/strong_consistency_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster/suite_test.go b/test/cluster/suite_test.go new file mode 100644 index 000000000..d119bc6bb --- /dev/null +++ b/test/cluster/suite_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + goctx "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + k8Runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + // +kubebuilder:scaffold:imports + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var testEnv *envtest.Environment + +var k8sClient client.Client + +var cfg *rest.Config + +var k8sClientSet *kubernetes.Clientset + +var projectRoot string + +var scheme = k8Runtime.NewScheme() + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Cluster Suite") +} + +var _ = BeforeEach(func() { + By("Cleaning up all Aerospike clusters.") + + for idx := range test.Namespaces { + deleteAllClusters(test.Namespaces[idx]) + Expect(cleanupPVC(k8sClient, test.Namespaces[idx])).NotTo(HaveOccurred()) + } +}) + +func deleteAllClusters(namespace string) { + ctx := goctx.TODO() + list := &asdbv1.AerospikeClusterList{} + listOps := &client.ListOptions{Namespace: namespace} + + err := k8sClient.List(ctx, list, listOps) + Expect(err).NotTo(HaveOccurred()) + + for clusterIndex := range list.Items { + By(fmt.Sprintf("Deleting cluster \"%s/%s\".", list.Items[clusterIndex].Namespace, list.Items[clusterIndex].Name)) + err := deleteCluster(k8sClient, ctx, &list.Items[clusterIndex]) + Expect(err).NotTo(HaveOccurred()) + } +} + +// This is used when running tests on existing cluster +// user has to install its own operator then run cleanup and then start this + +var _ = BeforeSuite( + func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environment") + pkgLog.Info(fmt.Sprintf("Client will connect through '%s' network to Aerospike Clusters.", + *defaultNetworkType)) + + var err error + testEnv, cfg, k8sClient, k8sClientSet, err = test.BootStrapTestEnv(scheme) + Expect(err).NotTo(HaveOccurred()) + + projectRoot, err = getGitRepoRootPath() + Expect(err).NotTo(HaveOccurred()) + + cloudProvider, err = getCloudProvider(goctx.TODO(), k8sClient) + Expect(err).ToNot(HaveOccurred()) + }) + +var _ = AfterSuite( + func() { + By("Cleaning up all pvcs") + + for idx := range test.Namespaces { + _ = cleanupPVC(k8sClient, test.Namespaces[idx]) + } + + By("tearing down the test environment") + gexec.KillAndWait(5 * time.Second) + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }, +) diff --git a/test/test_client.go b/test/cluster/test_client.go similarity index 99% rename from test/test_client.go rename to test/cluster/test_client.go index 295b07159..b02adf90c 100644 --- a/test/test_client.go +++ b/test/cluster/test_client.go @@ -1,4 +1,4 @@ -package test +package cluster // Aerospike client and info testing utilities. // diff --git a/test/tls_authenticate_client_test.go b/test/cluster/tls_authenticate_client_test.go similarity index 98% rename from test/tls_authenticate_client_test.go rename to test/cluster/tls_authenticate_client_test.go index ed4b0ba38..d412d966c 100644 --- a/test/tls_authenticate_client_test.go +++ b/test/cluster/tls_authenticate_client_test.go @@ -2,7 +2,7 @@ // Tests Aerospike TLS authenticate client settings. -package test +package cluster import ( goctx "context" @@ -17,6 +17,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) const tlsClusterName = "tls-auth-client" @@ -177,7 +178,7 @@ func getAerospikeConfig( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "sys-admin", "user-admin", @@ -292,7 +293,7 @@ func doTestTLSAuthenticateClientAnyWithCapath(ctx goctx.Context) { operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.ClientCertFilename = "server-cert.pem" operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.ClientKeyFilename = "server_key.pem" cacertPath := &asdbv1.CaCertsSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, } operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.CaCertsSource = cacertPath @@ -300,10 +301,10 @@ func doTestTLSAuthenticateClientAnyWithCapath(ctx goctx.Context) { networkConf, operatorClientCertSpec, ) secretVolume := asdbv1.VolumeSpec{ - Name: tlsCacertSecretName, + Name: test.TLSCacertSecretName, Source: asdbv1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, }, }, Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ diff --git a/test/cluster/utils.go b/test/cluster/utils.go new file mode 100644 index 000000000..a1af81c8b --- /dev/null +++ b/test/cluster/utils.go @@ -0,0 +1,879 @@ +package cluster + +import ( + "bytes" + goctx "context" + "encoding/json" + "flag" + "fmt" + "io" + "os/exec" + "reflect" + "strings" + "time" + + set "github.com/deckarep/golang-set/v2" + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + as "github.com/aerospike/aerospike-client-go/v7" + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + operatorUtils "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + lib "github.com/aerospike/aerospike-management-lib" + "github.com/aerospike/aerospike-management-lib/info" +) + +var defaultNetworkType = flag.String("connect-through-network-type", "hostExternal", + "Network type is used to determine an appropriate access type. Can be 'pod',"+ + " 'hostInternal' or 'hostExternal'. AS client in the test will choose access type"+ + " which matches expected network type. See details in"+ + " https://docs.aerospike.com/docs/cloud/kubernetes/operator/Cluster-configuration-settings.html#network-policy") + +type CloudProvider int + +const ( + CloudProviderUnknown CloudProvider = iota + CloudProviderAWS + CloudProviderGCP +) + +const zoneKey = "topology.kubernetes.io/zone" +const regionKey = "topology.kubernetes.io/region" + +var cloudProvider CloudProvider + +func waitForAerospikeCluster( + k8sClient client.Client, ctx goctx.Context, + aeroCluster *asdbv1.AerospikeCluster, replicas int, + retryInterval, timeout time.Duration, expectedPhases []asdbv1.AerospikeClusterPhase, +) error { + var isValid bool + + err := wait.PollUntilContextTimeout(ctx, + retryInterval, timeout, true, func(ctx goctx.Context) (done bool, err error) { + // Fetch the AerospikeCluster instance + newCluster := &asdbv1.AerospikeCluster{} + err = k8sClient.Get( + ctx, types.NamespacedName{ + Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, + }, newCluster, + ) + if err != nil { + if errors.IsNotFound(err) { + pkgLog.Info( + "Waiting for availability of %s AerospikeCluster\n", + "name", aeroCluster.Name, + ) + return false, nil + } + return false, err + } + + isValid = isClusterStateValid(aeroCluster, newCluster, replicas, expectedPhases) + return isValid, nil + }, + ) + + if err != nil { + return err + } + + pkgLog.Info("AerospikeCluster available\n") + + // make info call + return nil +} + +func isClusterStateValid( + aeroCluster *asdbv1.AerospikeCluster, + newCluster *asdbv1.AerospikeCluster, replicas int, expectedPhases []asdbv1.AerospikeClusterPhase, +) bool { + if int(newCluster.Status.Size) != replicas { + pkgLog.Info("Cluster size is not correct") + return false + } + + // Do not compare status with spec if cluster reconciliation is paused + // `paused` flag only exists in the spec and not in the status. + if !asdbv1.GetBool(aeroCluster.Spec.Paused) { + // Validate status + statusToSpec, err := asdbv1.CopyStatusToSpec(&newCluster.Status.AerospikeClusterStatusSpec) + if err != nil { + pkgLog.Error(err, "Failed to copy spec in status", "err", err) + return false + } + + if !reflect.DeepEqual(statusToSpec, &newCluster.Spec) { + pkgLog.Info("Cluster status is not matching the spec") + return false + } + } + + // TODO: This is not valid for tests where maxUnavailablePods flag is used. + // We can take the param in func to skip this check + // // Validate pods + // if len(newCluster.Status.Pods) != replicas { + // pkgLog.Info("Cluster status doesn't have pod status for all nodes. Cluster status may not have fully updated") + // return false + // } + + for podName := range newCluster.Status.Pods { + if newCluster.Status.Pods[podName].Aerospike.NodeID == "" { + pkgLog.Info("Cluster pod's nodeID is empty") + return false + } + + if operatorUtils.IsImageEqual(newCluster.Status.Pods[podName].Image, aeroCluster.Spec.Image) { + break + } + + pkgLog.Info( + fmt.Sprintf("Cluster pod's image %s not same as spec %s", newCluster.Status.Pods[podName].Image, + aeroCluster.Spec.Image, + ), + ) + + return false + } + + if newCluster.Labels[asdbv1.AerospikeAPIVersionLabel] != asdbv1.AerospikeAPIVersion { + pkgLog.Info("Cluster API version label is not correct") + return false + } + + // Validate phase + phaseSet := set.NewSet(expectedPhases...) + if !phaseSet.Contains(newCluster.Status.Phase) { + pkgLog.Info("Cluster phase is not correct") + return false + } + + pkgLog.Info("Cluster state is validated successfully") + + return true +} + +func getTimeout(nodes int32) time.Duration { + return 3 * time.Minute * time.Duration(nodes) +} + +func getPodLogs( + k8sClientset *kubernetes.Clientset, ctx goctx.Context, pod *corev1.Pod, +) string { + podLogOpts := corev1.PodLogOptions{} + req := k8sClientset.CoreV1().Pods(pod.Namespace).GetLogs( + pod.Name, &podLogOpts, + ) + + podLogs, err := req.Stream(ctx) + if err != nil { + return "error in opening stream" + } + + defer func(podLogs io.ReadCloser) { + _ = podLogs.Close() + }(podLogs) + + buf := new(bytes.Buffer) + + _, err = io.Copy(buf, podLogs) + if err != nil { + return "error in copy information from podLogs to buf" + } + + str := buf.String() + + return str +} + +// Copy makes a deep copy from src into dst. +func Copy(dst, src interface{}) error { + if dst == nil { + return fmt.Errorf("dst cannot be nil") + } + + if src == nil { + return fmt.Errorf("src cannot be nil") + } + + jsonBytes, err := json.Marshal(src) + if err != nil { + return fmt.Errorf("unable to marshal src: %s", err) + } + + err = json.Unmarshal(jsonBytes, dst) + if err != nil { + return fmt.Errorf("unable to unmarshal into dst: %s", err) + } + + return nil +} + +type AerospikeConfSpec struct { + version string + network map[string]interface{} + service map[string]interface{} + security map[string]interface{} + namespaces []interface{} +} + +func (acs *AerospikeConfSpec) getVersion() string { + return acs.version +} + +func (acs *AerospikeConfSpec) setEnableSecurity(enableSecurity bool) error { + cmpVal, err := lib.CompareVersions(acs.version, "5.7.0") + if err != nil { + return err + } + + if cmpVal >= 0 { + if enableSecurity { + security := map[string]interface{}{} + acs.security = security + } + + return nil + } + + acs.security = map[string]interface{}{} + acs.security["enable-security"] = enableSecurity + + return nil +} + +func (acs *AerospikeConfSpec) setEnableQuotas(enableQuotas bool) error { + cmpVal, err := lib.CompareVersions(acs.version, "5.6.0") + if err != nil { + return err + } + + if cmpVal >= 0 { + if acs.security == nil { + acs.security = map[string]interface{}{} + } + + acs.security["enable-quotas"] = enableQuotas + } + + return nil +} + +func (acs *AerospikeConfSpec) getSpec() map[string]interface{} { + spec := map[string]interface{}{ + "service": acs.service, + "network": acs.network, + "namespaces": acs.namespaces, + } + if acs.security != nil { + spec["security"] = acs.security + } + + return spec +} + +func getOperatorCert() *asdbv1.AerospikeOperatorClientCertSpec { + return &asdbv1.AerospikeOperatorClientCertSpec{ + TLSClientName: "aerospike-a-0.test-runner", + AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ + SecretCertSource: &asdbv1.AerospikeSecretCertSource{ + SecretName: "aerospike-secret", + CaCertsFilename: "cacert.pem", + ClientCertFilename: "svc_cluster_chain.pem", + ClientKeyFilename: "svc_key.pem", + }, + }, + } +} + +func getNetworkTLSConfig() map[string]interface{} { + return map[string]interface{}{ + "service": map[string]interface{}{ + "tls-name": "aerospike-a-0.test-runner", + "tls-port": serviceTLSPort, + "port": serviceNonTLSPort, + }, + "fabric": map[string]interface{}{ + "tls-name": "aerospike-a-0.test-runner", + "tls-port": 3011, + "port": 3001, + }, + "heartbeat": map[string]interface{}{ + "tls-name": "aerospike-a-0.test-runner", + "tls-port": 3012, + "port": 3002, + }, + + "tls": []interface{}{ + map[string]interface{}{ + "name": "aerospike-a-0.test-runner", + "cert-file": "/etc/aerospike/secret/svc_cluster_chain.pem", + "key-file": "/etc/aerospike/secret/svc_key.pem", + "ca-file": "/etc/aerospike/secret/cacert.pem", + }, + }, + } +} + +func getNetworkConfig() map[string]interface{} { + return map[string]interface{}{ + "service": map[string]interface{}{ + "port": serviceNonTLSPort, + }, + "fabric": map[string]interface{}{ + "port": 3001, + }, + "heartbeat": map[string]interface{}{ + "port": 3002, + }, + } +} + +func NewAerospikeConfSpec(image string) (*AerospikeConfSpec, error) { + ver, err := asdbv1.GetImageVersion(image) + if err != nil { + return nil, err + } + + service := map[string]interface{}{ + "feature-key-file": "/etc/aerospike/secret/features.conf", + } + network := getNetworkConfig() + namespaces := []interface{}{ + map[string]interface{}{ + "name": "test", + "replication-factor": 1, + "storage-engine": map[string]interface{}{ + "type": "memory", + "data-size": 1073741824, + }, + }, + } + + return &AerospikeConfSpec{ + version: ver, + service: service, + network: network, + namespaces: namespaces, + security: nil, + }, nil +} + +func ValidateAttributes( + actual []map[string]string, expected map[string]string, +) bool { + for key, val := range expected { + for i := 0; i < len(actual); i++ { + m := actual[i] + + v, ok := m[key] + if ok && v == val { + return true + } + } + } + + return false +} + +func getAeroClusterConfig( + namespace types.NamespacedName, image string, +) (*asdbv1.AerospikeCluster, error) { + version, err := asdbv1.GetImageVersion(image) + if err != nil { + return nil, err + } + + cmpVal1, err := lib.CompareVersions(version, "5.7.0") + if err != nil { + return nil, err + } + + cmpVal2, err := lib.CompareVersions(version, "7.0.0") + if err != nil { + return nil, err + } + + switch { + case cmpVal2 >= 0: + return createAerospikeClusterPost640( + namespace, 2, image, + ), nil + + case cmpVal1 >= 0: + return createAerospikeClusterPost560( + namespace, 2, image, + ), nil + + case cmpVal1 < 0: + return createAerospikeClusterPost460( + namespace, 2, image, + ), nil + + default: + return nil, fmt.Errorf("invalid image version %s", version) + } +} + +func getAerospikeStorageConfig( + containerName string, inputCascadeDelete bool, + storageSize string, + cloudProvider CloudProvider, +) *asdbv1.AerospikeStorageSpec { + // Create pods and storage devices write data to the devices. + // - deletes cluster without cascade delete of volumes. + // - recreate and check if volumes are reinitialized correctly. + fileDeleteInitMethod := asdbv1.AerospikeVolumeMethodDeleteFiles + ddInitMethod := asdbv1.AerospikeVolumeMethodDD + blkDiscardInitMethod := asdbv1.AerospikeVolumeMethodBlkdiscard + blkDiscardWipeMethod := asdbv1.AerospikeVolumeMethodBlkdiscard + + if cloudProvider == CloudProviderAWS { + // Blkdiscard method is not supported in AWS, so it is initialized as DD Method + blkDiscardInitMethod = asdbv1.AerospikeVolumeMethodDD + blkDiscardWipeMethod = asdbv1.AerospikeVolumeMethodDD + } + + return &asdbv1.AerospikeStorageSpec{ + BlockVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ + InputCascadeDelete: &inputCascadeDelete, + }, + FileSystemVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ + InputCascadeDelete: &inputCascadeDelete, + }, + Volumes: []asdbv1.VolumeSpec{ + { + Name: "file-noinit", + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeFilesystem, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/filesystem-noinit", + }, + }, + { + Name: "file-init", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &fileDeleteInitMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeFilesystem, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/filesystem-init", + }, + }, + { + Name: "device-noinit", + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/blockdevice-noinit", + }, + }, + { + Name: "device-dd", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &ddInitMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/blockdevice-init-dd", + }, + }, + { + Name: "device-blkdiscard", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &blkDiscardInitMethod, + InputWipeMethod: &blkDiscardWipeMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/blockdevice-init-blkdiscard", + }, + }, + { + Name: "file-noinit-1", + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeFilesystem, + }, + }, + Sidecars: []asdbv1.VolumeAttachment{ + { + ContainerName: containerName, + Path: "/opt/aerospike/filesystem-noinit", + }, + }, + }, + { + Name: "device-dd-1", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &ddInitMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Sidecars: []asdbv1.VolumeAttachment{ + { + ContainerName: containerName, + Path: "/opt/aerospike/blockdevice-init-dd", + }, + }, + }, + getStorageVolumeForSecret(), + }, + } +} + +//nolint:unparam // generic function +func contains(elems []string, v string) bool { + for _, s := range elems { + if v == s { + return true + } + } + + return false +} + +func getAerospikeConfigFromNode(log logr.Logger, k8sClient client.Client, ctx goctx.Context, + clusterNamespacedName types.NamespacedName, configContext string, pod *asdbv1.AerospikePodStatus) (lib.Stats, error) { + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) + if err != nil { + return nil, err + } + + host, err := createHost(pod) + if err != nil { + return nil, err + } + + asinfo := info.NewAsInfo( + log, host, getClientPolicy(aeroCluster, k8sClient), + ) + + confs, err := getAsConfig(asinfo, configContext) + if err != nil { + return nil, err + } + + return confs[configContext].(lib.Stats), nil +} + +func getPasswordFromSecret(k8sClient client.Client, + secretNamespcedName types.NamespacedName, passFileName string, +) (string, error) { + secret := &corev1.Secret{} + + err := k8sClient.Get(goctx.TODO(), secretNamespcedName, secret) + if err != nil { + return "", fmt.Errorf("failed to get secret %s: %v", secretNamespcedName, err) + } + + passBytes, ok := secret.Data[passFileName] + if !ok { + return "", fmt.Errorf( + "failed to get password file in secret %s, fileName %s", + secretNamespcedName, passFileName, + ) + } + + return string(passBytes), nil +} + +func getAerospikeClient(aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client) (*as.Client, error) { + policy := getClientPolicy(aeroCluster, k8sClient) + policy.FailIfNotConnected = false + policy.Timeout = time.Minute * 2 + policy.UseServicesAlternate = true + policy.ConnectionQueueSize = 100 + policy.LimitConnectionsToQueueSize = true + + hostList := make([]*as.Host, 0, len(aeroCluster.Status.Pods)) + + for podName := range aeroCluster.Status.Pods { + pod := aeroCluster.Status.Pods[podName] + + host, err := createHost(&pod) + if err != nil { + return nil, err + } + + hostList = append(hostList, host) + } + + asClient, err := as.NewClientWithPolicyAndHost(policy, hostList...) + if asClient == nil { + return nil, fmt.Errorf( + "failed to create aerospike cluster asClient: %v", err, + ) + } + + _, _ = asClient.WarmUp(-1) + + // Wait for 5 minutes for cluster to connect + for j := 0; j < 150; j++ { + if isConnected := asClient.IsConnected(); isConnected { + break + } + + time.Sleep(time.Second * 2) + } + + return asClient, nil +} + +func getPodList( + aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, +) (*corev1.PodList, error) { + podList := &corev1.PodList{} + labelSelector := labels.SelectorFromSet(operatorUtils.LabelsForAerospikeCluster(aeroCluster.Name)) + listOps := &client.ListOptions{ + Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, + } + + if err := k8sClient.List(goctx.TODO(), podList, listOps); err != nil { + return nil, err + } + + return podList, nil +} + +func deletePVC(k8sClient client.Client, pvcNamespacedName types.NamespacedName) error { + pvc := &corev1.PersistentVolumeClaim{} + if err := k8sClient.Get(goctx.TODO(), pvcNamespacedName, pvc); err != nil { + if errors.IsNotFound(err) { + return nil + } + + return err + } + + if operatorUtils.IsPVCTerminating(pvc) { + return nil + } + + if err := k8sClient.Delete(goctx.TODO(), pvc); err != nil { + return fmt.Errorf("could not delete pvc %s: %w", pvc.Name, err) + } + + return nil +} + +func cleanupPVC(k8sClient client.Client, ns string) error { + // List the pvc for this aeroCluster's statefulset + pvcList := &corev1.PersistentVolumeClaimList{} + clLabels := map[string]string{"app": "aerospike-cluster"} + labelSelector := labels.SelectorFromSet(clLabels) + listOps := &client.ListOptions{Namespace: ns, LabelSelector: labelSelector} + + if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { + return err + } + + for pvcIndex := range pvcList.Items { + pkgLog.Info("Found pvc, deleting it", "pvcName", + pvcList.Items[pvcIndex].Name, "namespace", pvcList.Items[pvcIndex].Namespace) + + if operatorUtils.IsPVCTerminating(&pvcList.Items[pvcIndex]) { + continue + } + // if utils.ContainsString(pvc.Finalizers, "kubernetes.io/pvc-protection") { + // pvc.Finalizers = utils.RemoveString(pvc.Finalizers, "kubernetes.io/pvc-protection") + // if err := k8sClient.Patch(goctx.TODO(), &pvc, client.Merge); err != nil { + // return fmt.Errorf("could not patch %s finalizer from following pvc: %s: %w", + // "kubernetes.io/pvc-protection", pvc.Name, err) + // } + //} + if err := k8sClient.Delete(goctx.TODO(), &pvcList.Items[pvcIndex]); err != nil { + return fmt.Errorf("could not delete pvc %s: %w", pvcList.Items[pvcIndex].Name, err) + } + } + + return nil +} + +func getSTSList( + aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, +) (*appsv1.StatefulSetList, error) { + stsList := &appsv1.StatefulSetList{} + labelSelector := labels.SelectorFromSet(operatorUtils.LabelsForAerospikeCluster(aeroCluster.Name)) + listOps := &client.ListOptions{ + Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, + } + + if err := k8sClient.List(goctx.TODO(), stsList, listOps); err != nil { + return nil, err + } + + return stsList, nil +} + +func getServiceForPod( + pod *corev1.Pod, k8sClient client.Client, +) (*corev1.Service, error) { + service := &corev1.Service{} + err := k8sClient.Get( + goctx.TODO(), + types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, service, + ) + + if err != nil { + return nil, fmt.Errorf( + "failed to get service for pod %s: %v", pod.Name, err, + ) + } + + return service, nil +} + +func getCloudProvider( + ctx goctx.Context, k8sClient client.Client, +) (CloudProvider, error) { + labelKeys := map[string]struct{}{} + + nodes, err := getNodeList(ctx, k8sClient) + if err != nil { + return CloudProviderUnknown, err + } + + for idx := range nodes.Items { + for labelKey := range nodes.Items[idx].Labels { + if strings.Contains(labelKey, "cloud.google.com") { + return CloudProviderGCP, nil + } + + if strings.Contains(labelKey, "eks.amazonaws.com") { + return CloudProviderAWS, nil + } + + labelKeys[labelKey] = struct{}{} + } + + provider := determineByProviderID(&nodes.Items[idx]) + if provider != CloudProviderUnknown { + return provider, nil + } + } + + labelKeysSlice := make([]string, 0, len(labelKeys)) + + for labelKey := range labelKeys { + labelKeysSlice = append(labelKeysSlice, labelKey) + } + + return CloudProviderUnknown, fmt.Errorf( + "can't determin cloud platform by node's labels: %v", labelKeysSlice, + ) +} + +func determineByProviderID(node *corev1.Node) CloudProvider { + if strings.Contains(node.Spec.ProviderID, "gce") { + return CloudProviderGCP + } else if strings.Contains(node.Spec.ProviderID, "aws") { + return CloudProviderAWS + } + // TODO add cloud provider detection for Azure + return CloudProviderUnknown +} + +func getZones(ctx goctx.Context, k8sClient client.Client) ([]string, error) { + unqZones := map[string]int{} + + nodes, err := getNodeList(ctx, k8sClient) + if err != nil { + return nil, err + } + + for idx := range nodes.Items { + unqZones[nodes.Items[idx].Labels[zoneKey]] = 1 + } + + zones := make([]string, 0, len(unqZones)) + + for zone := range unqZones { + zones = append(zones, zone) + } + + return zones, nil +} + +func getNodeList(ctx goctx.Context, k8sClient client.Client) ( + *corev1.NodeList, error, +) { + nodeList := &corev1.NodeList{} + if err := k8sClient.List(ctx, nodeList); err != nil { + return nil, err + } + + return nodeList, nil +} + +func getRegion(ctx goctx.Context, k8sClient client.Client) (string, error) { + nodes, err := getNodeList(ctx, k8sClient) + if err != nil { + return "", err + } + + if len(nodes.Items) == 0 { + return "", fmt.Errorf("node list empty: %v", nodes.Items) + } + + return nodes.Items[0].Labels[regionKey], nil +} + +func getGitRepoRootPath() (string, error) { + path, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + if err != nil { + return "", err + } + + return strings.TrimSpace(string(path)), nil +} + +func getNamespacedName(name, namespace string) types.NamespacedName { + return types.NamespacedName{ + Name: name, + Namespace: namespace, + } +} diff --git a/test/warm_restart_test.go b/test/cluster/warm_restart_test.go similarity index 98% rename from test/warm_restart_test.go rename to test/cluster/warm_restart_test.go index 99ed226fc..df24b8273 100644 --- a/test/warm_restart_test.go +++ b/test/cluster/warm_restart_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goCtx "context" @@ -103,7 +103,7 @@ func createMarkerFile( } _, _, err := utils.Exec( - utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientset, + utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientSet, cfg, ) @@ -137,7 +137,7 @@ func isMarkerPresent( } _, _, err := utils.Exec( - utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientset, + utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientSet, cfg, ) diff --git a/test/cluster_prereq.go b/test/cluster_prereq.go index 4f3b6ad22..d3a7d6226 100644 --- a/test/cluster_prereq.go +++ b/test/cluster_prereq.go @@ -2,12 +2,17 @@ package test import ( goctx "context" + "fmt" + "os" + "path/filepath" corev1 "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" ) const ( @@ -18,13 +23,38 @@ const ( aeroClusterCR string = "aerospike-cluster" ) +var secrets map[string][]byte +var cacertSecrets map[string][]byte + +const secretDir = "../config/samples/secrets" //nolint:gosec // for testing +const cacertSecretDir = "../config/samples/secrets/cacerts" //nolint:gosec // for testing +const awsCredentialPath = "$HOME/.aws/credentials" //nolint:gosec // for testing + +const AerospikeSecretName = "aerospike-secret" +const TLSCacertSecretName = "aerospike-cacert-secret" //nolint:gosec // for testing +const AuthSecretName = "auth-secret" +const AuthSecretNameForUpdate = "auth-update" +const AWSSecretName = "aws-secret" + +const MultiClusterNs1 string = "test1" +const MultiClusterNs2 string = "test2" +const AerospikeNs string = "aerospike" +const namespace = "test" + +// Namespaces is the list of all the namespaces used in test-suite +var Namespaces = []string{namespace, MultiClusterNs1, MultiClusterNs2, AerospikeNs} + +func getLabels() map[string]string { + return map[string]string{"app": "aerospike-cluster"} +} + func createClusterRBAC(k8sClient client.Client, ctx goctx.Context) error { - subjects := make([]rbac.Subject, 0, len(testNamespaces)) + subjects := make([]rbac.Subject, 0, len(Namespaces)) - for idx := range testNamespaces { + for idx := range Namespaces { // Create service account for getting access in cluster specific namespaces if err := createServiceAccount( - k8sClient, ctx, aeroClusterServiceAccountName, testNamespaces[idx], + k8sClient, ctx, aeroClusterServiceAccountName, Namespaces[idx], ); err != nil { return err } @@ -33,7 +63,7 @@ func createClusterRBAC(k8sClient client.Client, ctx goctx.Context) error { subjects = append(subjects, rbac.Subject{ Kind: "ServiceAccount", Name: aeroClusterServiceAccountName, - Namespace: testNamespaces[idx], + Namespace: Namespaces[idx], }) } @@ -90,3 +120,201 @@ func createServiceAccount( return nil } + +func initConfigSecret(secretDirectory string) (map[string][]byte, error) { + initSecrets := make(map[string][]byte) + + fileInfo, err := os.ReadDir(secretDirectory) + if err != nil { + return nil, err + } + + if len(fileInfo) == 0 { + return nil, fmt.Errorf("no secret file available in %s", secretDirectory) + } + + for _, file := range fileInfo { + if file.IsDir() { + // no need to check recursively + continue + } + + secret, err := os.ReadFile(filepath.Join(secretDirectory, file.Name())) + if err != nil { + return nil, fmt.Errorf("wrong secret file %s: %v", file.Name(), err) + } + + initSecrets[file.Name()] = secret + } + + return initSecrets, nil +} + +func setupByUser(k8sClient client.Client, ctx goctx.Context) error { + var err error + // Create configSecret + if secrets, err = initConfigSecret(secretDir); err != nil { + return fmt.Errorf("failed to init secrets: %v", err) + } + + // Create cacertSecret + if cacertSecrets, err = initConfigSecret(cacertSecretDir); err != nil { + return fmt.Errorf("failed to init secrets: %v", err) + } + + // Create preReq for namespaces used for testing + for idx := range Namespaces { + if err := createClusterPreReq(k8sClient, ctx, Namespaces[idx]); err != nil { + return err + } + } + + // Create another authSecret. Used in access-control tests + passUpdate := "admin321" + labels := getLabels() + + if err := createAuthSecret( + k8sClient, ctx, namespace, labels, AuthSecretNameForUpdate, passUpdate, + ); err != nil { + return err + } + + return createClusterRBAC(k8sClient, ctx) +} + +func createClusterPreReq( + k8sClient client.Client, ctx goctx.Context, namespace string, +) error { + labels := getLabels() + + if err := createNamespace(k8sClient, ctx, namespace); err != nil { + return err + } + + if err := createConfigSecret( + k8sClient, ctx, namespace, labels, + ); err != nil { + return err + } + + if err := createCacertSecret( + k8sClient, ctx, namespace, labels, + ); err != nil { + return err + } + + // Create authSecret + pass := "admin123" + + return createAuthSecret( + k8sClient, ctx, namespace, labels, AuthSecretName, pass, + ) +} + +func createCacertSecret( + k8sClient client.Client, ctx goctx.Context, namespace string, + labels map[string]string, +) error { + // Create configSecret + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: TLSCacertSecretName, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: cacertSecrets, + } + + // Remove old object + _ = k8sClient.Delete(ctx, s) + + // use test context's create helper to create the object and add a cleanup + // function for the new object + err := k8sClient.Create(ctx, s) + if err != nil { + return err + } + + return nil +} + +func createConfigSecret( + k8sClient client.Client, ctx goctx.Context, namespace string, + labels map[string]string, +) error { + // Create configSecret + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: AerospikeSecretName, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: secrets, + } + + // Remove old object + _ = k8sClient.Delete(ctx, s) + + // use test context's create helper to create the object and add a cleanup + // function for the new object + return k8sClient.Create(ctx, s) +} + +func createAuthSecret( + k8sClient client.Client, ctx goctx.Context, namespace string, + labels map[string]string, secretName, pass string, +) error { + // Create authSecret + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "password": []byte(pass), + }, + } + // use test context's create helper to create the object and add a cleanup function for the new object + err := k8sClient.Create(ctx, secret) + if !errors.IsAlreadyExists(err) { + return err + } + + return nil +} + +func setupBackupServicePreReq(k8sClient client.Client, ctx goctx.Context, namespace string) error { + // Create SA for aerospike backup service + if err := createServiceAccount(k8sClient, goctx.TODO(), common.AerospikeBackupService, namespace); err != nil { + return err + } + + awsSecret := make(map[string][]byte) + + resolvePath := os.ExpandEnv(awsCredentialPath) + + data, err := os.ReadFile(resolvePath) + if err != nil { + return err + } + + awsSecret["credentials"] = data + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: AWSSecretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeOpaque, + Data: awsSecret, + } + + // Remove old object + _ = k8sClient.Delete(ctx, secret) + + return k8sClient.Create(ctx, secret) +} diff --git a/test/restore/restore_suite_test.go b/test/restore/restore_suite_test.go new file mode 100644 index 000000000..e6b52ccad --- /dev/null +++ b/test/restore/restore_suite_test.go @@ -0,0 +1,151 @@ +package restore + +import ( + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8Runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/test" + "github.com/aerospike/aerospike-kubernetes-operator/test/backup" + backupservice "github.com/aerospike/aerospike-kubernetes-operator/test/backup_service" + "github.com/aerospike/aerospike-kubernetes-operator/test/cluster" +) + +var testEnv *envtest.Environment + +var k8sClient client.Client + +var scheme = k8Runtime.NewScheme() + +func TestRestore(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Restore Suite") +} + +var _ = BeforeSuite( + func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environment") + var err error + + testEnv, _, k8sClient, _, err = test.BootStrapTestEnv(scheme) + Expect(err).NotTo(HaveOccurred()) + + By("Deploy Backup Service") + backupService, err := backupservice.NewBackupService() + Expect(err).ToNot(HaveOccurred()) + + backupService.Spec.Service = &asdbv1beta1.Service{ + Type: corev1.ServiceTypeLoadBalancer, + } + + backupServiceName = backupService.Name + backupServiceNamespace = backupService.Namespace + + err = backupservice.DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + cascadeDeleteTrue := true + + By(fmt.Sprintf("Deploy source Aerospike Cluster: %s", sourceAerospikeClusterNsNm.String())) + aeroCluster := cluster.CreateDummyAerospikeCluster(sourceAerospikeClusterNsNm, 2) + aeroCluster.Spec.Storage.BlockVolumePolicy.InputCascadeDelete = &cascadeDeleteTrue + aeroCluster.Spec.Storage.FileSystemVolumePolicy.InputCascadeDelete = &cascadeDeleteTrue + + err = cluster.DeployCluster(k8sClient, testCtx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + backupObj, err := backup.NewBackup(backupNsNm) + Expect(err).ToNot(HaveOccurred()) + + // Point to current suite's backup service + backupObj.Spec.BackupService.Name = backupServiceName + backupObj.Spec.BackupService.Namespace = backupServiceNamespace + + err = backup.CreateBackup(k8sClient, backupObj) + Expect(err).ToNot(HaveOccurred()) + + backupDataPaths, err := backup.GetBackupDataPaths(k8sClient, backupObj) + Expect(err).ToNot(HaveOccurred()) + + pkgLog.Info(fmt.Sprintf("BackupDataPaths: %v", backupDataPaths)) + Expect(backupDataPaths).ToNot(BeEmpty()) + + // Example backupDataPath = "/localStorage/test-sample-backup-test-routine/backup/1722353745635/data/test" + backupDataPath = backupDataPaths[0] + + By(fmt.Sprintf("Deploy destination Aerospike Cluster: %s", destinationAerospikeClusterNsNm.String())) + aeroCluster = cluster.CreateDummyAerospikeCluster(destinationAerospikeClusterNsNm, 2) + aeroCluster.Spec.Storage.BlockVolumePolicy.InputCascadeDelete = &cascadeDeleteTrue + aeroCluster.Spec.Storage.FileSystemVolumePolicy.InputCascadeDelete = &cascadeDeleteTrue + + err = cluster.DeployCluster(k8sClient, testCtx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + }) + +var _ = AfterSuite( + func() { + By("Delete Aerospike Cluster") + aeroClusters := []asdbv1.AerospikeCluster{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: sourceAerospikeClusterNsNm.Name, + Namespace: sourceAerospikeClusterNsNm.Namespace, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: destinationAerospikeClusterNsNm.Name, + Namespace: destinationAerospikeClusterNsNm.Namespace, + }, + }, + } + + for idx := range aeroClusters { + aeroCluster := aeroClusters[idx] + err := cluster.DeleteCluster(k8sClient, testCtx, &aeroCluster) + Expect(err).ToNot(HaveOccurred()) + } + + By("Delete Backup") + backupObj := asdbv1beta1.AerospikeBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupNsNm.Name, + Namespace: backupNsNm.Namespace, + }, + } + + err := backup.DeleteBackup(k8sClient, &backupObj) + Expect(err).ToNot(HaveOccurred()) + + By("Delete Backup Service") + backupService := asdbv1beta1.AerospikeBackupService{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + } + + err = backupservice.DeleteBackupService(k8sClient, &backupService) + Expect(err).ToNot(HaveOccurred()) + + By("tearing down the test environment") + gexec.KillAndWait(5 * time.Second) + err = testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }, +) diff --git a/test/restore/restore_test.go b/test/restore/restore_test.go new file mode 100644 index 000000000..0229c2a49 --- /dev/null +++ b/test/restore/restore_test.go @@ -0,0 +1,191 @@ +package restore + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +var _ = Describe( + "Restore Test", func() { + + var ( + restore *asdbv1beta1.AerospikeRestore + err error + restoreNsNm = types.NamespacedName{ + Namespace: namespace, + Name: "sample-restore", + } + ) + + AfterEach(func() { + Expect(deleteRestore(k8sClient, restore)).ToNot(HaveOccurred()) + }) + + Context( + "When doing Invalid operations", func() { + It("Should fail when wrong format restore config is given", func() { + config := getRestoreConfigInMap(backupDataPath) + + // change the format from a single element to slice + config["destination"] = []interface{}{config["destination"]} + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Full, configBytes) + err = createRestore(k8sClient, restore) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when un-supported field is given in restore config", func() { + config := getRestoreConfigInMap(backupDataPath) + config["unknown"] = "unknown" + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Full, configBytes) + err = createRestore(k8sClient, restore) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unknown field")) + }) + + It("Should fail when spec is updated", func() { + restore, err = newRestore(restoreNsNm, asdbv1beta1.Full) + Expect(err).ToNot(HaveOccurred()) + + err = createRestore(k8sClient, restore) + Expect(err).ToNot(HaveOccurred()) + + restore, err = getRestoreObj(k8sClient, restoreNsNm) + Expect(err).ToNot(HaveOccurred()) + + restore.Spec.Type = asdbv1beta1.Incremental + + err = k8sClient.Update(testCtx, restore) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("aerospikeRestore Spec is immutable")) + }) + + It("Should fail restore when wrong backup path is given", func() { + config := getRestoreConfigInMap("wrong-backup-path") + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Full, configBytes) + + err = createRestoreWithTO(k8sClient, restore, 30*time.Second) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when routine/time is not given for Timestamp restore type", func() { + // getRestoreConfigInMap returns restore config without a routine, time and with source type + restoreConfig := getRestoreConfigInMap(backupDataPath) + delete(restoreConfig, common.SourceKey) + + configBytes, mErr := json.Marshal(restoreConfig) + Expect(mErr).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Timestamp, configBytes) + + err = createRestore(k8sClient, restore) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("restore point in time should be positive")) + }) + + It("Should fail when source field is given for Timestamp restore type", func() { + restore, err = newRestore(restoreNsNm, asdbv1beta1.Timestamp) + Expect(err).ToNot(HaveOccurred()) + + err = createRestore(k8sClient, restore) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("source field is not allowed in restore config")) + }) + + It("Should fail when routine field is given for Full/Incremental restore type", func() { + restoreConfig := getRestoreConfigInMap(backupDataPath) + restoreConfig[common.RoutineKey] = "test-routine" + + configBytes, mErr := json.Marshal(restoreConfig) + Expect(mErr).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Full, configBytes) + + err = createRestore(k8sClient, restore) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("routine field is not allowed in restore config")) + }) + + It("Should fail when time field is given for Full/Incremental restore type", func() { + restoreConfig := getRestoreConfigInMap(backupDataPath) + restoreConfig[common.TimeKey] = 1722408895094 + + configBytes, mErr := json.Marshal(restoreConfig) + Expect(mErr).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Full, configBytes) + + err = createRestore(k8sClient, restore) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("time field is not allowed in restore config")) + }) + }) + + Context( + "When doing valid operations", func() { + It( + "Should complete restore for Full restore type", func() { + restore, err = newRestore(restoreNsNm, asdbv1beta1.Full) + Expect(err).ToNot(HaveOccurred()) + + err = createRestore(k8sClient, restore) + Expect(err).ToNot(HaveOccurred()) + }, + ) + + It( + "Should complete restore for Incremental restore type", func() { + restore, err = newRestore(restoreNsNm, asdbv1beta1.Incremental) + Expect(err).ToNot(HaveOccurred()) + + err = createRestore(k8sClient, restore) + Expect(err).ToNot(HaveOccurred()) + }, + ) + + It( + "Should complete restore for Timestamp restore type", func() { + restoreConfig := getRestoreConfigInMap(backupDataPath) + delete(restoreConfig, common.SourceKey) + + parts := strings.Split(backupDataPath, "/") + + time := parts[len(parts)-3] + timeInt, err := strconv.Atoi(time) + Expect(err).ToNot(HaveOccurred()) + + // increase time by 1 millisecond to consider the latest backup under time bound + restoreConfig[common.TimeKey] = int64(timeInt) + 1 + restoreConfig[common.RoutineKey] = parts[len(parts)-5] + + configBytes, err := json.Marshal(restoreConfig) + Expect(err).ToNot(HaveOccurred()) + + restore = newRestoreWithConfig(restoreNsNm, asdbv1beta1.Timestamp, configBytes) + + err = createRestore(k8sClient, restore) + Expect(err).ToNot(HaveOccurred()) + }, + ) + }) + }) diff --git a/test/restore/test_utils.go b/test/restore/test_utils.go new file mode 100644 index 000000000..0fe94ed5e --- /dev/null +++ b/test/restore/test_utils.go @@ -0,0 +1,236 @@ +package restore + +import ( + "context" + "encoding/json" + "fmt" + "time" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/abhishekdwivedi3060/aerospike-backup-service/pkg/model" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" +) + +const ( + timeout = 2 * time.Minute + interval = 2 * time.Second + namespace = "test" +) + +var testCtx = context.TODO() + +var backupServiceName, backupServiceNamespace string + +var backupDataPath string + +var pkgLog = ctrl.Log.WithName("restore") + +var backupNsNm = types.NamespacedName{ + Name: "sample-backup", + Namespace: namespace, +} + +var sourceAerospikeClusterNsNm = types.NamespacedName{ + Name: "aerocluster", + Namespace: namespace, +} + +var destinationAerospikeClusterNsNm = types.NamespacedName{ + Name: "destination-aerocluster", + Namespace: namespace, +} + +func newRestore(restoreNsNm types.NamespacedName, restoreType asdbv1beta1.RestoreType, +) (*asdbv1beta1.AerospikeRestore, error) { + configBytes, err := getRestoreConfBytes(backupDataPath) + if err != nil { + return nil, err + } + + restore := newRestoreWithEmptyConfig(restoreNsNm, restoreType) + + restore.Spec.Config = runtime.RawExtension{ + Raw: configBytes, + } + + return restore, nil +} + +func newRestoreWithConfig(restoreNsNm types.NamespacedName, restoreType asdbv1beta1.RestoreType, configBytes []byte, +) *asdbv1beta1.AerospikeRestore { + restore := newRestoreWithEmptyConfig(restoreNsNm, restoreType) + + restore.Spec.Config = runtime.RawExtension{ + Raw: configBytes, + } + + return restore +} + +func newRestoreWithEmptyConfig(restoreNsNm types.NamespacedName, restoreType asdbv1beta1.RestoreType, +) *asdbv1beta1.AerospikeRestore { + return &asdbv1beta1.AerospikeRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: restoreNsNm.Name, + Namespace: restoreNsNm.Namespace, + }, + Spec: asdbv1beta1.AerospikeRestoreSpec{ + BackupService: asdbv1beta1.BackupService{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + Type: restoreType, + }, + } +} + +func getRestoreObj(cl client.Client, restoreNsNm types.NamespacedName) (*asdbv1beta1.AerospikeRestore, error) { + var restore asdbv1beta1.AerospikeRestore + + if err := cl.Get(testCtx, restoreNsNm, &restore); err != nil { + return nil, err + } + + return &restore, nil +} + +func createRestore(cl client.Client, restore *asdbv1beta1.AerospikeRestore) error { + if err := cl.Create(testCtx, restore); err != nil { + return err + } + + return waitForRestore(cl, restore, timeout) +} + +func createRestoreWithTO(cl client.Client, restore *asdbv1beta1.AerospikeRestore, timeout time.Duration) error { + if err := cl.Create(testCtx, restore); err != nil { + return err + } + + return waitForRestore(cl, restore, timeout) +} + +func deleteRestore(cl client.Client, restore *asdbv1beta1.AerospikeRestore) error { + if err := cl.Delete(testCtx, restore); err != nil && !k8serrors.IsNotFound(err) { + return err + } + + for { + _, err := getRestoreObj(cl, types.NamespacedName{ + Namespace: restore.Namespace, + Name: restore.Name, + }) + + if err != nil { + if k8serrors.IsNotFound(err) { + break + } + + return err + } + + time.Sleep(1 * time.Second) + } + + return nil +} + +func waitForRestore(cl client.Client, restore *asdbv1beta1.AerospikeRestore, + timeout time.Duration) error { + namespaceName := types.NamespacedName{ + Name: restore.Name, Namespace: restore.Namespace, + } + + if err := wait.PollUntilContextTimeout( + testCtx, 1*time.Second, + timeout, true, func(ctx context.Context) (bool, error) { + if err := cl.Get(ctx, namespaceName, restore); err != nil { + return false, nil + } + + if restore.Status.Phase != asdbv1beta1.AerospikeRestoreCompleted { + pkgLog.Info(fmt.Sprintf("Restore is in %s phase", restore.Status.Phase)) + return false, nil + } + + return true, nil + }, + ); err != nil { + return err + } + + pkgLog.Info(fmt.Sprintf("Restore is in %s phase", restore.Status.Phase)) + + if restore.Status.JobID == nil { + return fmt.Errorf("restore job id is not set") + } + + if restore.Status.RestoreResult.Raw == nil { + return fmt.Errorf("restore result is not set") + } + + var restoreResult model.RestoreJobStatus + + if err := json.Unmarshal(restore.Status.RestoreResult.Raw, &restoreResult); err != nil { + return err + } + + if restoreResult.Status != model.JobStatusDone { + return fmt.Errorf("restore job status is not done") + } + + if restoreResult.Error != "" { + return fmt.Errorf("restore job failed with error: %s", restoreResult.Error) + } + + return nil +} + +func getRestoreConfBytes(backupPath string) ([]byte, error) { + restoreConfig := getRestoreConfigInMap(backupPath) + + configBytes, err := json.Marshal(restoreConfig) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +func getRestoreConfigInMap(backupPath string) map[string]interface{} { + return map[string]interface{}{ + "destination": map[string]interface{}{ + "label": "destinationCluster", + "credentials": map[string]interface{}{ + "password": "admin123", + "user": "admin", + }, + "seed-nodes": []map[string]interface{}{ + { + "host-name": fmt.Sprintf("%s.%s.svc.cluster.local", + destinationAerospikeClusterNsNm.Name, destinationAerospikeClusterNsNm.Namespace, + ), + "port": 3000, + }, + }, + }, + "policy": map[string]interface{}{ + "parallel": 3, + "no-generation": true, + "no-indexes": true, + }, + "source": map[string]interface{}{ + "path": backupPath, + "type": "local", + }, + } +} diff --git a/test/setup_test.go b/test/setup_test.go new file mode 100644 index 000000000..d818c9d62 --- /dev/null +++ b/test/setup_test.go @@ -0,0 +1,46 @@ +package test + +import ( + goctx "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe( + "Backup Service Test", func() { + + It("Should setup user RBAC", func() { + // Setup by user function + // test creating resource + // IN operator namespace + // Create aerospike-secret + // Create auth-secret (admin) + // Create auth-update (admin123) + + // For test1 + // Create aerospike-secret + // Create auth-secret (admin) + + // For test2 + // Create aerospike-secret + // Create auth-secret (admin) + + // For aerospike + // Create aerospike-secret + // Create auth-secret (admin) + + // For common + // Create namespace test1, test2, aerospike + // ServiceAccount: aerospike-cluster (operatorNs, test1, test2, aerospike) + // ClusterRole: aerospike-cluster + // ClusterRoleBinding: aerospike-cluster + + err := setupByUser(k8sClient, goctx.TODO()) + Expect(err).ToNot(HaveOccurred()) + + // Set up AerospikeBackupService RBAC and AWS secret + err = setupBackupServicePreReq(k8sClient, goctx.TODO(), namespace) + Expect(err).ToNot(HaveOccurred()) + }) + }) diff --git a/test/suite_test.go b/test/suite_test.go index b3c3ff2d3..58bc5b648 100644 --- a/test/suite_test.go +++ b/test/suite_test.go @@ -17,9 +17,6 @@ limitations under the License. package test import ( - goctx "context" - "flag" - "fmt" "testing" "time" @@ -27,12 +24,7 @@ import ( . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" admissionv1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" k8Runtime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" @@ -44,118 +36,25 @@ import ( // +kubebuilder:scaffold:imports asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config - var testEnv *envtest.Environment -var k8sClient client.Client - -var k8sClientset *kubernetes.Clientset - -var cloudProvider CloudProvider - -var projectRoot string +var scheme = k8Runtime.NewScheme() -var ( - scheme = k8Runtime.NewScheme() -) +var cfg *rest.Config -var defaultNetworkType = flag.String("connect-through-network-type", "hostExternal", - "Network type is used to determine an appropriate access type. Can be 'pod',"+ - " 'hostInternal' or 'hostExternal'. AS client in the test will choose access type"+ - " which matches expected network type. See details in"+ - " https://docs.aerospike.com/docs/cloud/kubernetes/operator/Cluster-configuration-settings.html#network-policy") +var k8sClient client.Client func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") -} - -var _ = BeforeEach(func() { - By("Cleaning up all Aerospike clusters.") - - for idx := range testNamespaces { - deleteAllClusters(testNamespaces[idx]) - Expect(cleanupPVC(k8sClient, testNamespaces[idx])).NotTo(HaveOccurred()) - } -}) - -func deleteAllClusters(namespace string) { - ctx := goctx.TODO() - list := &asdbv1.AerospikeClusterList{} - listOps := &client.ListOptions{Namespace: namespace} - - err := k8sClient.List(ctx, list, listOps) - Expect(err).NotTo(HaveOccurred()) - - for clusterIndex := range list.Items { - By(fmt.Sprintf("Deleting cluster \"%s/%s\".", list.Items[clusterIndex].Namespace, list.Items[clusterIndex].Name)) - err := deleteCluster(k8sClient, ctx, &list.Items[clusterIndex]) - Expect(err).NotTo(HaveOccurred()) - } -} - -func cleanupPVC(k8sClient client.Client, ns string) error { - // List the pvc for this aeroCluster's statefulset - pvcList := &corev1.PersistentVolumeClaimList{} - clLabels := map[string]string{"app": "aerospike-cluster"} - labelSelector := labels.SelectorFromSet(clLabels) - listOps := &client.ListOptions{Namespace: ns, LabelSelector: labelSelector} - - if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { - return err - } - - for pvcIndex := range pvcList.Items { - pkgLog.Info("Found pvc, deleting it", "pvcName", - pvcList.Items[pvcIndex].Name, "namespace", pvcList.Items[pvcIndex].Namespace) - - if utils.IsPVCTerminating(&pvcList.Items[pvcIndex]) { - continue - } - // if utils.ContainsString(pvc.Finalizers, "kubernetes.io/pvc-protection") { - // pvc.Finalizers = utils.RemoveString(pvc.Finalizers, "kubernetes.io/pvc-protection") - // if err := k8sClient.Patch(goctx.TODO(), &pvc, client.Merge); err != nil { - // return fmt.Errorf("could not patch %s finalizer from following pvc: %s: %w", - // "kubernetes.io/pvc-protection", pvc.Name, err) - // } - //} - if err := k8sClient.Delete(goctx.TODO(), &pvcList.Items[pvcIndex]); err != nil { - return fmt.Errorf("could not delete pvc %s: %w", pvcList.Items[pvcIndex].Name, err) - } - } - - return nil + RunSpecs(t, "Setup Suite") } -func deletePVC(k8sClient client.Client, pvcNamespacedName types.NamespacedName) error { - pvc := &corev1.PersistentVolumeClaim{} - if err := k8sClient.Get(goctx.TODO(), pvcNamespacedName, pvc); err != nil { - if errors.IsNotFound(err) { - return nil - } - - return err - } - - if utils.IsPVCTerminating(pvc) { - return nil - } - - if err := k8sClient.Delete(goctx.TODO(), pvc); err != nil { - return fmt.Errorf("could not delete pvc %s: %w", pvc.Name, err) - } - - return nil -} - -// This is used when running tests on existing cluster +// This is used when running tests on an existing cluster // user has to install its own operator then run cleanup and then start this var _ = BeforeSuite( @@ -163,7 +62,7 @@ var _ = BeforeSuite( logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) By("Bootstrapping test environment") - pkgLog.Info(fmt.Sprintf("Client will connect through '%s' network to Aerospike Clusters.", *defaultNetworkType)) + t := true testEnv = &envtest.Environment{ UseExistingCluster: &t, @@ -190,55 +89,10 @@ var _ = BeforeSuite( ) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - - k8sClientset = kubernetes.NewForConfigOrDie(cfg) - Expect(k8sClient).NotTo(BeNil()) - - projectRoot, err = getGitRepoRootPath() - Expect(err).NotTo(HaveOccurred()) - - ctx := goctx.TODO() - - // Setup by user function - // test creating resource - // IN operator namespace - // Create aerospike-secret - // Create auth-secret (admin) - // Create auth-update (admin123) - - // For test1 - // Create aerospike-secret - // Create auth-secret (admin) - - // For test2 - // Create aerospike-secret - // Create auth-secret (admin) - - // For aerospike - // Create aerospike-secret - // Create auth-secret (admin) - - // For common - // Create namespace test1, test2, aerospike - // ServiceAccount: aerospike-cluster (operatorNs, test1, test2, aerospike) - // ClusterRole: aerospike-cluster - // ClusterRoleBinding: aerospike-cluster - - // Need to create storageClass if not created already - err = setupByUser(k8sClient, ctx) - Expect(err).ToNot(HaveOccurred()) - cloudProvider, err = getCloudProvider(ctx, k8sClient) - Expect(err).ToNot(HaveOccurred()) }) var _ = AfterSuite( func() { - By("Cleaning up all pvcs") - - for idx := range testNamespaces { - _ = cleanupPVC(k8sClient, testNamespaces[idx]) - } - By("tearing down the test environment") gexec.KillAndWait(5 * time.Second) err := testEnv.Stop() diff --git a/test/test.sh b/test/test.sh index 8b93e24f2..9f437577d 100755 --- a/test/test.sh +++ b/test/test.sh @@ -62,4 +62,4 @@ echo "---------------------" export CUSTOM_INIT_REGISTRY="$REGISTRY" export IMAGE_PULL_SECRET_NAME="$IMAGE_PULL_SECRET" -make test FOCUS="$FOCUS" ARGS="$ARGS" +make all-test FOCUS="$FOCUS" ARGS="$ARGS" diff --git a/test/utils.go b/test/utils.go index 302cbc049..8db02085e 100644 --- a/test/utils.go +++ b/test/utils.go @@ -1,867 +1,65 @@ package test import ( - "bytes" - goctx "context" - "encoding/json" "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "reflect" - "strconv" - "strings" - "time" - set "github.com/deckarep/golang-set/v2" - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/runtime" + utilRuntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" - ctrl "sigs.k8s.io/controller-runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" - as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - operatorUtils "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" - lib "github.com/aerospike/aerospike-management-lib" - "github.com/aerospike/aerospike-management-lib/info" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" ) -var ( - namespace = "test" - storageClass = "ssd" - pkgLog = ctrl.Log.WithName("test") -) - -var secrets map[string][]byte -var cacertSecrets map[string][]byte - -const secretDir = "../config/samples/secrets" //nolint:gosec // for testing -const cacertSecretDir = "../config/samples/secrets/cacerts" //nolint:gosec // for testing - -const aerospikeSecretName = "aerospike-secret" -const tlsCacertSecretName = "aerospike-cacert-secret" //nolint:gosec // for testing -const authSecretName = "auth-secret" -const authSecretNameForUpdate = "auth-update" - -const multiClusterNs1 string = "test1" -const multiClusterNs2 string = "test2" -const aerospikeNs string = "aerospike" - -const zoneKey = "topology.kubernetes.io/zone" -const regionKey = "topology.kubernetes.io/region" - -const serviceTLSPort = 4333 -const serviceNonTLSPort = 3000 - -// list of all the namespaces used in test-suite -var testNamespaces = []string{namespace, multiClusterNs1, multiClusterNs2, aerospikeNs} - -const aerospikeConfigSecret string = "aerospike-config-secret" //nolint:gosec // for testing - -var aerospikeVolumeInitMethodDeleteFiles = asdbv1.AerospikeVolumeMethodDeleteFiles - -func initConfigSecret(secretDirectory string) (map[string][]byte, error) { - initSecrets := make(map[string][]byte) - - fileInfo, err := os.ReadDir(secretDirectory) - if err != nil { - return nil, err - } - - if len(fileInfo) == 0 { - return nil, fmt.Errorf("no secret file available in %s", secretDirectory) - } - - for _, file := range fileInfo { - if file.IsDir() { - // no need to check recursively - continue - } - - secret, err := os.ReadFile(filepath.Join(secretDirectory, file.Name())) - if err != nil { - return nil, fmt.Errorf("wrong secret file %s: %v", file.Name(), err) - } - - initSecrets[file.Name()] = secret - } - - return initSecrets, nil -} - -func setupByUser(k8sClient client.Client, ctx goctx.Context) error { - var err error - // Create configSecret - if secrets, err = initConfigSecret(secretDir); err != nil { - return fmt.Errorf("failed to init secrets: %v", err) - } - - // Create cacertSecret - if cacertSecrets, err = initConfigSecret(cacertSecretDir); err != nil { - return fmt.Errorf("failed to init secrets: %v", err) - } - - // Create preReq for namespaces used for testing - for idx := range testNamespaces { - if err := createClusterPreReq(k8sClient, ctx, testNamespaces[idx]); err != nil { - return err - } - } - - // Create another authSecret. Used in access-control tests - passUpdate := "admin321" - labels := getLabels() - - if err := createAuthSecret( - k8sClient, ctx, namespace, labels, authSecretNameForUpdate, passUpdate, - ); err != nil { - return err - } - - return createClusterRBAC(k8sClient, ctx) -} - -func createClusterPreReq( - k8sClient client.Client, ctx goctx.Context, namespace string, -) error { - labels := getLabels() - - if err := createNamespace(k8sClient, ctx, namespace); err != nil { - return err - } - - if err := createConfigSecret( - k8sClient, ctx, namespace, labels, - ); err != nil { - return err - } - - if err := createCacertSecret( - k8sClient, ctx, namespace, labels, - ); err != nil { - return err - } - - // Create authSecret - pass := "admin123" - - return createAuthSecret( - k8sClient, ctx, namespace, labels, authSecretName, pass, - ) -} - -func createCacertSecret( - k8sClient client.Client, ctx goctx.Context, namespace string, - labels map[string]string, -) error { - // Create configSecret - s := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: tlsCacertSecretName, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeOpaque, - Data: cacertSecrets, - } - - // Remove old object - _ = k8sClient.Delete(ctx, s) - - // use test context's create helper to create the object and add a cleanup - // function for the new object - err := k8sClient.Create(ctx, s) - if err != nil { - return err - } - - return nil -} - -func createConfigSecret( - k8sClient client.Client, ctx goctx.Context, namespace string, - labels map[string]string, -) error { - // Create configSecret - s := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: aerospikeSecretName, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeOpaque, - Data: secrets, - } - - // Remove old object - _ = k8sClient.Delete(ctx, s) - - // use test context's create helper to create the object and add a cleanup - // function for the new object - return k8sClient.Create(ctx, s) -} - -func createAuthSecret( - k8sClient client.Client, ctx goctx.Context, namespace string, - labels map[string]string, secretName, pass string, -) error { - // Create authSecret - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{ - "password": []byte(pass), - }, - } - // use test context's create helper to create the object and add a cleanup function for the new object - err := k8sClient.Create(ctx, secret) - if !errors.IsAlreadyExists(err) { - return err - } - - return nil -} - -func getLabels() map[string]string { - return map[string]string{"app": "aerospike-cluster"} -} - -func waitForAerospikeCluster( - k8sClient client.Client, ctx goctx.Context, - aeroCluster *asdbv1.AerospikeCluster, replicas int, - retryInterval, timeout time.Duration, expectedPhases []asdbv1.AerospikeClusterPhase, -) error { - var isValid bool - - err := wait.PollUntilContextTimeout(ctx, - retryInterval, timeout, true, func(ctx goctx.Context) (done bool, err error) { - // Fetch the AerospikeCluster instance - newCluster := &asdbv1.AerospikeCluster{} - err = k8sClient.Get( - ctx, types.NamespacedName{ - Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, - }, newCluster, - ) - if err != nil { - if errors.IsNotFound(err) { - pkgLog.Info( - "Waiting for availability of %s AerospikeCluster\n", - "name", aeroCluster.Name, - ) - return false, nil - } - return false, err - } - - isValid = isClusterStateValid(aeroCluster, newCluster, replicas, expectedPhases) - return isValid, nil - }, - ) - - if err != nil { - return err - } - - pkgLog.Info("AerospikeCluster available\n") - - // make info call - return nil -} - -func isClusterStateValid( - aeroCluster *asdbv1.AerospikeCluster, - newCluster *asdbv1.AerospikeCluster, replicas int, expectedPhases []asdbv1.AerospikeClusterPhase, -) bool { - if int(newCluster.Status.Size) != replicas { - pkgLog.Info("Cluster size is not correct") - return false - } - - // Do not compare status with spec if cluster reconciliation is paused - // `paused` flag only exists in the spec and not in the status. - if !asdbv1.GetBool(aeroCluster.Spec.Paused) { - // Validate status - statusToSpec, err := asdbv1.CopyStatusToSpec(&newCluster.Status.AerospikeClusterStatusSpec) - if err != nil { - pkgLog.Error(err, "Failed to copy spec in status", "err", err) - return false - } - - if !reflect.DeepEqual(statusToSpec, &newCluster.Spec) { - pkgLog.Info("Cluster status is not matching the spec") - return false - } - } - - // TODO: This is not valid for tests where maxUnavailablePods flag is used. - // We can take the param in func to skip this check - // // Validate pods - // if len(newCluster.Status.Pods) != replicas { - // pkgLog.Info("Cluster status doesn't have pod status for all nodes. Cluster status may not have fully updated") - // return false - // } - - for podName := range newCluster.Status.Pods { - if newCluster.Status.Pods[podName].Aerospike.NodeID == "" { - pkgLog.Info("Cluster pod's nodeID is empty") - return false - } - - if operatorUtils.IsImageEqual(newCluster.Status.Pods[podName].Image, aeroCluster.Spec.Image) { - break - } - - pkgLog.Info( - fmt.Sprintf("Cluster pod's image %s not same as spec %s", newCluster.Status.Pods[podName].Image, - aeroCluster.Spec.Image, - ), - ) - - return false - } - - if newCluster.Labels[asdbv1.AerospikeAPIVersionLabel] != asdbv1.AerospikeAPIVersion { - pkgLog.Info("Cluster API version label is not correct") - return false - } - - // Validate phase - phaseSet := set.NewSet(expectedPhases...) - if !phaseSet.Contains(newCluster.Status.Phase) { - pkgLog.Info("Cluster phase is not correct") - return false - } - - pkgLog.Info("Cluster state is validated successfully") - - return true -} - -func getTimeout(nodes int32) time.Duration { - return 3 * time.Minute * time.Duration(nodes) -} - -func getPodLogs( - k8sClientset *kubernetes.Clientset, ctx goctx.Context, pod *corev1.Pod, -) string { - podLogOpts := corev1.PodLogOptions{} - req := k8sClientset.CoreV1().Pods(pod.Namespace).GetLogs( - pod.Name, &podLogOpts, - ) - - podLogs, err := req.Stream(ctx) - if err != nil { - return "error in opening stream" - } - - defer func(podLogs io.ReadCloser) { - _ = podLogs.Close() - }(podLogs) - - buf := new(bytes.Buffer) - - _, err = io.Copy(buf, podLogs) - if err != nil { - return "error in copy information from podLogs to buf" - } - - str := buf.String() - - return str -} - -func getRackID(pod *corev1.Pod) (int, error) { - rack, ok := pod.ObjectMeta.Labels["aerospike.com/rack-id"] - if !ok { - return 0, nil - } - - return strconv.Atoi(rack) -} - -// Copy makes a deep copy from src into dst. -func Copy(dst, src interface{}) error { - if dst == nil { - return fmt.Errorf("dst cannot be nil") - } - - if src == nil { - return fmt.Errorf("src cannot be nil") - } - - jsonBytes, err := json.Marshal(src) - if err != nil { - return fmt.Errorf("unable to marshal src: %s", err) - } - - err = json.Unmarshal(jsonBytes, dst) - if err != nil { - return fmt.Errorf("unable to unmarshal into dst: %s", err) - } - - return nil -} - -type AerospikeConfSpec struct { - version string - network map[string]interface{} - service map[string]interface{} - security map[string]interface{} - namespaces []interface{} -} - -func getOperatorCert() *asdbv1.AerospikeOperatorClientCertSpec { - return &asdbv1.AerospikeOperatorClientCertSpec{ - TLSClientName: "aerospike-a-0.test-runner", - AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ - SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: "aerospike-secret", - CaCertsFilename: "cacert.pem", - ClientCertFilename: "svc_cluster_chain.pem", - ClientKeyFilename: "svc_key.pem", - }, - }, - } -} -func getNetworkTLSConfig() map[string]interface{} { - return map[string]interface{}{ - "service": map[string]interface{}{ - "tls-name": "aerospike-a-0.test-runner", - "tls-port": serviceTLSPort, - "port": serviceNonTLSPort, - }, - "fabric": map[string]interface{}{ - "tls-name": "aerospike-a-0.test-runner", - "tls-port": 3011, - "port": 3001, - }, - "heartbeat": map[string]interface{}{ - "tls-name": "aerospike-a-0.test-runner", - "tls-port": 3012, - "port": 3002, - }, - - "tls": []interface{}{ - map[string]interface{}{ - "name": "aerospike-a-0.test-runner", - "cert-file": "/etc/aerospike/secret/svc_cluster_chain.pem", - "key-file": "/etc/aerospike/secret/svc_key.pem", - "ca-file": "/etc/aerospike/secret/cacert.pem", - }, - }, - } -} - -func getNetworkConfig() map[string]interface{} { - return map[string]interface{}{ - "service": map[string]interface{}{ - "port": serviceNonTLSPort, - }, - "fabric": map[string]interface{}{ - "port": 3001, - }, - "heartbeat": map[string]interface{}{ - "port": 3002, - }, - } -} -func NewAerospikeConfSpec(image string) (*AerospikeConfSpec, error) { - ver, err := asdbv1.GetImageVersion(image) - if err != nil { - return nil, err - } - - service := map[string]interface{}{ - "feature-key-file": "/etc/aerospike/secret/features.conf", - } - network := getNetworkConfig() - namespaces := []interface{}{ - map[string]interface{}{ - "name": "test", - "replication-factor": 1, - "storage-engine": map[string]interface{}{ - "type": "memory", - "data-size": 1073741824, - }, - }, +func BootStrapTestEnv(scheme *runtime.Scheme) (testEnv *envtest.Environment, cfg *rest.Config, + k8sClient client.Client, k8sClientSet *kubernetes.Clientset, err error) { + t := true + testEnv = &envtest.Environment{ + UseExistingCluster: &t, } - return &AerospikeConfSpec{ - version: ver, - service: service, - network: network, - namespaces: namespaces, - security: nil, - }, nil -} - -func (acs *AerospikeConfSpec) getVersion() string { - return acs.version -} + cfg, err = testEnv.Start() -func (acs *AerospikeConfSpec) setEnableSecurity(enableSecurity bool) error { - cmpVal, err := lib.CompareVersions(acs.version, "5.7.0") if err != nil { - return err + return testEnv, cfg, k8sClient, k8sClientSet, err } - if cmpVal >= 0 { - if enableSecurity { - security := map[string]interface{}{} - acs.security = security - } - - return nil - } - - acs.security = map[string]interface{}{} - acs.security["enable-security"] = enableSecurity - - return nil -} - -func (acs *AerospikeConfSpec) setEnableQuotas(enableQuotas bool) error { - cmpVal, err := lib.CompareVersions(acs.version, "5.6.0") - if err != nil { - return err + if cfg == nil { + err = fmt.Errorf("cfg is nil") + return testEnv, cfg, k8sClient, k8sClientSet, err } - if cmpVal >= 0 { - if acs.security == nil { - acs.security = map[string]interface{}{} - } + utilRuntime.Must(clientgoscheme.AddToScheme(scheme)) + utilRuntime.Must(asdbv1.AddToScheme(scheme)) + utilRuntime.Must(admissionv1.AddToScheme(scheme)) + utilRuntime.Must(asdbv1beta1.AddToScheme(scheme)) - acs.security["enable-quotas"] = enableQuotas - } - - return nil -} - -func (acs *AerospikeConfSpec) getSpec() map[string]interface{} { - spec := map[string]interface{}{ - "service": acs.service, - "network": acs.network, - "namespaces": acs.namespaces, - } - if acs.security != nil { - spec["security"] = acs.security - } - - return spec -} - -func ValidateAttributes( - actual []map[string]string, expected map[string]string, -) bool { - for key, val := range expected { - for i := 0; i < len(actual); i++ { - m := actual[i] - - v, ok := m[key] - if ok && v == val { - return true - } - } - } - - return false -} + // +kubebuilder:scaffold:scheme -func getAeroClusterConfig( - namespace types.NamespacedName, image string, -) (*asdbv1.AerospikeCluster, error) { - version, err := asdbv1.GetImageVersion(image) - if err != nil { - return nil, err - } - - cmpVal1, err := lib.CompareVersions(version, "5.7.0") - if err != nil { - return nil, err - } - - cmpVal2, err := lib.CompareVersions(version, "7.0.0") - if err != nil { - return nil, err - } - - switch { - case cmpVal2 >= 0: - return createAerospikeClusterPost640( - namespace, 2, image, - ), nil - - case cmpVal1 >= 0: - return createAerospikeClusterPost560( - namespace, 2, image, - ), nil - - case cmpVal1 < 0: - return createAerospikeClusterPost460( - namespace, 2, image, - ), nil - - default: - return nil, fmt.Errorf("invalid image version %s", version) - } -} - -func getAerospikeStorageConfig( - containerName string, inputCascadeDelete bool, - storageSize string, - cloudProvider CloudProvider, -) *asdbv1.AerospikeStorageSpec { - // Create pods and storage devices write data to the devices. - // - deletes cluster without cascade delete of volumes. - // - recreate and check if volumes are reinitialized correctly. - fileDeleteInitMethod := asdbv1.AerospikeVolumeMethodDeleteFiles - ddInitMethod := asdbv1.AerospikeVolumeMethodDD - blkDiscardInitMethod := asdbv1.AerospikeVolumeMethodBlkdiscard - blkDiscardWipeMethod := asdbv1.AerospikeVolumeMethodBlkdiscard - - if cloudProvider == CloudProviderAWS { - // Blkdiscard method is not supported in AWS, so it is initialized as DD Method - blkDiscardInitMethod = asdbv1.AerospikeVolumeMethodDD - blkDiscardWipeMethod = asdbv1.AerospikeVolumeMethodDD - } - - return &asdbv1.AerospikeStorageSpec{ - BlockVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ - InputCascadeDelete: &inputCascadeDelete, - }, - FileSystemVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ - InputCascadeDelete: &inputCascadeDelete, - }, - Volumes: []asdbv1.VolumeSpec{ - { - Name: "file-noinit", - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeFilesystem, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/filesystem-noinit", - }, - }, - { - Name: "file-init", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &fileDeleteInitMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeFilesystem, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/filesystem-init", - }, - }, - { - Name: "device-noinit", - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/blockdevice-noinit", - }, - }, - { - Name: "device-dd", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &ddInitMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/blockdevice-init-dd", - }, - }, - { - Name: "device-blkdiscard", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &blkDiscardInitMethod, - InputWipeMethod: &blkDiscardWipeMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/blockdevice-init-blkdiscard", - }, - }, - { - Name: "file-noinit-1", - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeFilesystem, - }, - }, - Sidecars: []asdbv1.VolumeAttachment{ - { - ContainerName: containerName, - Path: "/opt/aerospike/filesystem-noinit", - }, - }, - }, - { - Name: "device-dd-1", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &ddInitMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Sidecars: []asdbv1.VolumeAttachment{ - { - ContainerName: containerName, - Path: "/opt/aerospike/blockdevice-init-dd", - }, - }, - }, - getStorageVolumeForSecret(), - }, - } -} - -//nolint:unparam // generic function -func contains(elems []string, v string) bool { - for _, s := range elems { - if v == s { - return true - } - } - - return false -} - -func getGitRepoRootPath() (string, error) { - path, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() - if err != nil { - return "", err - } - - return strings.TrimSpace(string(path)), nil -} - -func getAerospikeConfigFromNode(log logr.Logger, k8sClient client.Client, ctx goctx.Context, - clusterNamespacedName types.NamespacedName, configContext string, pod *asdbv1.AerospikePodStatus) (lib.Stats, error) { - aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) - if err != nil { - return nil, err - } - - host, err := createHost(pod) - if err != nil { - return nil, err - } - - asinfo := info.NewAsInfo( - log, host, getClientPolicy(aeroCluster, k8sClient), + k8sClient, err = client.New( + cfg, client.Options{Scheme: scheme}, ) - confs, err := getAsConfig(asinfo, configContext) - if err != nil { - return nil, err - } - - return confs[configContext].(lib.Stats), nil -} - -func getPasswordFromSecret(k8sClient client.Client, - secretNamespcedName types.NamespacedName, passFileName string, -) (string, error) { - secret := &corev1.Secret{} - - err := k8sClient.Get(goctx.TODO(), secretNamespcedName, secret) if err != nil { - return "", fmt.Errorf("failed to get secret %s: %v", secretNamespcedName, err) + return testEnv, cfg, k8sClient, k8sClientSet, err } - passBytes, ok := secret.Data[passFileName] - if !ok { - return "", fmt.Errorf( - "failed to get password file in secret %s, fileName %s", - secretNamespcedName, passFileName, - ) + if k8sClient == nil { + err = fmt.Errorf("k8sClient is nil") + return testEnv, cfg, k8sClient, k8sClientSet, err } - return string(passBytes), nil -} - -func getAerospikeClient(aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client) (*as.Client, error) { - policy := getClientPolicy(aeroCluster, k8sClient) - policy.FailIfNotConnected = false - policy.Timeout = time.Minute * 2 - policy.UseServicesAlternate = true - policy.ConnectionQueueSize = 100 - policy.LimitConnectionsToQueueSize = true - - hostList := make([]*as.Host, 0, len(aeroCluster.Status.Pods)) - - for podName := range aeroCluster.Status.Pods { - pod := aeroCluster.Status.Pods[podName] - - host, err := createHost(&pod) - if err != nil { - return nil, err - } - - hostList = append(hostList, host) - } - - asClient, err := as.NewClientWithPolicyAndHost(policy, hostList...) - if asClient == nil { - return nil, fmt.Errorf( - "failed to create aerospike cluster asClient: %v", err, - ) - } - - _, _ = asClient.WarmUp(-1) - - // Wait for 5 minutes for cluster to connect - for j := 0; j < 150; j++ { - if isConnected := asClient.IsConnected(); isConnected { - break - } + k8sClientSet = kubernetes.NewForConfigOrDie(cfg) - time.Sleep(time.Second * 2) + if k8sClientSet == nil { + err = fmt.Errorf("k8sClientSet is nil") + return testEnv, cfg, k8sClient, k8sClientSet, err } - return asClient, nil + return testEnv, cfg, k8sClient, k8sClientSet, nil }