From 3327e02e8727b3398a6130fb564abbe2b0298230 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Wed, 17 Jul 2024 14:43:02 +0530 Subject: [PATCH] Refactored existing Cluster test-suite and added Backup test-suite --- Makefile | 29 +- api/v1beta1/aerospikebackup_types.go | 5 +- api/v1beta1/aerospikebackup_webhook.go | 10 +- api/v1beta1/zz_generated.deepcopy.go | 6 + .../asdb.aerospike.com_aerospikebackups.yaml | 18 + .../samples/asdb_v1beta1_aerospikebackup.yaml | 6 +- controllers/backup/reconciler.go | 20 +- ...n_aerospikebackups.asdb.aerospike.com.yaml | 18 + test/backup/backup_suite_test.go | 96 ++ test/backup/backup_test.go | 197 ++++ test/backup/test_utils.go | 292 ++++++ .../backup_service_suite_test.go | 49 +- test/backup_service/backup_service_test.go | 24 +- .../{helper_test.go => test_utils.go} | 17 +- test/{ => cluster}/access_control_test.go | 53 +- test/{ => cluster}/aero_info.go | 176 +--- test/{ => cluster}/batch_restart_pods_test.go | 2 +- .../batch_scaledown_pods_test.go | 2 +- test/{ => cluster}/cluster_helper.go | 80 +- test/{ => cluster}/cluster_resource_test.go | 2 +- .../cluster_storage_cleanup_test.go | 2 +- test/{ => cluster}/cluster_test.go | 11 +- test/{ => cluster}/dynamic_config_test.go | 7 +- test/{ => cluster}/host_network_test.go | 4 +- test/{ => cluster}/k8snode_block_list_test.go | 5 +- test/{ => cluster}/large_reconcile_test.go | 4 +- test/{ => cluster}/ldap_auth_test.go | 5 +- test/{ => cluster}/multicluster_test.go | 12 +- test/{ => cluster}/network_policy_test.go | 17 +- .../on_demand_operations_test.go | 2 +- .../{ => cluster}/poddisruptionbudget_test.go | 2 +- test/{ => cluster}/podspec_test.go | 2 +- .../rack_enabled_cluster_test.go | 2 +- test/{ => cluster}/rack_management_test.go | 2 +- test/{ => cluster}/rack_utils.go | 18 +- test/{ => cluster}/sample_files_test.go | 4 +- test/{ => cluster}/security_context_test.go | 2 +- test/{ => cluster}/services_test.go | 2 +- .../{ => cluster}/statefulset_storage_test.go | 6 +- test/{ => cluster}/storage_init_test.go | 13 +- test/{ => cluster}/storage_test.go | 5 +- test/{ => cluster}/storage_wipe_test.go | 5 +- test/{ => cluster}/strong_consistency_test.go | 2 +- test/cluster/suite_test.go | 122 +++ test/{ => cluster}/test_client.go | 2 +- .../tls_authenticate_client_test.go | 11 +- test/cluster/utils.go | 873 ++++++++++++++++++ test/{ => cluster}/warm_restart_test.go | 6 +- test/cluster_prereq.go | 200 +++- test/suite_test.go | 128 +-- test/test.sh | 2 +- test/utils.go | 862 +---------------- 52 files changed, 2126 insertions(+), 1316 deletions(-) create mode 100644 test/backup/backup_suite_test.go create mode 100644 test/backup/backup_test.go create mode 100644 test/backup/test_utils.go rename test/backup_service/{helper_test.go => test_utils.go} (94%) rename test/{ => cluster}/access_control_test.go (98%) rename test/{ => cluster}/aero_info.go (62%) rename test/{ => cluster}/batch_restart_pods_test.go (99%) rename test/{ => cluster}/batch_scaledown_pods_test.go (99%) rename test/{ => cluster}/cluster_helper.go (95%) rename test/{ => cluster}/cluster_resource_test.go (99%) rename test/{ => cluster}/cluster_storage_cleanup_test.go (99%) rename test/{ => cluster}/cluster_test.go (99%) rename test/{ => cluster}/dynamic_config_test.go (99%) rename test/{ => cluster}/host_network_test.go (98%) rename test/{ => cluster}/k8snode_block_list_test.go (99%) rename test/{ => cluster}/large_reconcile_test.go (99%) rename test/{ => cluster}/ldap_auth_test.go (97%) rename test/{ => cluster}/multicluster_test.go (96%) rename test/{ => cluster}/network_policy_test.go (99%) rename test/{ => cluster}/on_demand_operations_test.go (99%) rename test/{ => cluster}/poddisruptionbudget_test.go (99%) rename test/{ => cluster}/podspec_test.go (99%) rename test/{ => cluster}/rack_enabled_cluster_test.go (99%) rename test/{ => cluster}/rack_management_test.go (99%) rename test/{ => cluster}/rack_utils.go (98%) rename test/{ => cluster}/sample_files_test.go (98%) rename test/{ => cluster}/security_context_test.go (99%) rename test/{ => cluster}/services_test.go (99%) rename test/{ => cluster}/statefulset_storage_test.go (99%) rename test/{ => cluster}/storage_init_test.go (99%) rename test/{ => cluster}/storage_test.go (99%) rename test/{ => cluster}/storage_wipe_test.go (99%) rename test/{ => cluster}/strong_consistency_test.go (99%) create mode 100644 test/cluster/suite_test.go rename test/{ => cluster}/test_client.go (99%) rename test/{ => cluster}/tls_authenticate_client_test.go (98%) create mode 100644 test/cluster/utils.go rename test/{ => cluster}/warm_restart_test.go (98%) diff --git a/Makefile b/Makefile index 5654df55f..39c617a1a 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -# # /bin/sh does not support source command needed in make test +# # /bin/sh does not support source command needed in make test-all #SHELL := /bin/bash ROOT_DIR=$(shell git rev-parse --show-toplevel) @@ -140,10 +140,31 @@ $(GOLANGCI_LINT): $(LOCALBIN) go-lint: golanci-lint ## Run golangci-lint against code. $(GOLANGCI_LINT) run -.PHONY: test -test: manifests generate fmt vet envtest ## Run tests. +.PHONY: all-test +all-test: manifests generate fmt vet envtest ## Run tests. # KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -coverprofile cover.out -show-node-events -v -timeout=12h0m0s -focus=${FOCUS} --junit-report="junit.xml" -- ${ARGS} + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -r --keep-going -coverprofile cover.out -show-node-events -v -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} + +.PHONY: cluster-test +cluster-test: manifests generate fmt vet envtest ## Run tests. + # KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -v . ./cluster -coverprofile cover.out -show-node-events -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} + + +.PHONY: backup-service-test +backup-service-test: manifests generate fmt vet envtest ## Run tests. + # KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -v . ./backup_service -coverprofile cover.out -show-node-events -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} + +.PHONY: backup-test +backup-test: manifests generate fmt vet envtest ## Run tests. + # KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -v . ./backup -coverprofile cover.out -show-node-events -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} + +.PHONY: restore-test +restore-test: manifests generate fmt vet envtest ## Run tests. + # KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" cd $(shell pwd)/test; go run github.com/onsi/ginkgo/v2/ginkgo -v . ./restore -coverprofile cover.out -show-node-events -timeout=12h0m0s --junit-report="junit.xml" -- ${ARGS} ##@ Build diff --git a/api/v1beta1/aerospikebackup_types.go b/api/v1beta1/aerospikebackup_types.go index 7edd57cc3..0b904bd2c 100644 --- a/api/v1beta1/aerospikebackup_types.go +++ b/api/v1beta1/aerospikebackup_types.go @@ -60,8 +60,9 @@ type OnDemandSpec struct { // AerospikeBackupStatus defines the observed state of AerospikeBackup type AerospikeBackupStatus struct { - OnDemand []OnDemandSpec `json:"onDemand,omitempty"` - + BackupService *BackupService `json:"backupService"` + Config runtime.RawExtension `json:"config"` + OnDemand []OnDemandSpec `json:"onDemand,omitempty"` // TODO: finalize the status and phase } diff --git a/api/v1beta1/aerospikebackup_webhook.go b/api/v1beta1/aerospikebackup_webhook.go index 92d0a374a..4bc6d2e51 100644 --- a/api/v1beta1/aerospikebackup_webhook.go +++ b/api/v1beta1/aerospikebackup_webhook.go @@ -79,6 +79,10 @@ func (r *AerospikeBackup) ValidateUpdate(old runtime.Object) (admission.Warnings oldObj := old.(*AerospikeBackup) + if !reflect.DeepEqual(r.Spec.BackupService, oldObj.Spec.BackupService) { + return nil, fmt.Errorf("backup service cannot be updated") + } + if err := r.validateBackupConfig(); err != nil { return nil, err } @@ -155,6 +159,10 @@ func (r *AerospikeBackup) validateBackupConfig() error { return err } + if len(aeroClusters) != 1 { + return fmt.Errorf("only one aerospike cluster is allowed in backup config") + } + if len(config.AerospikeClusters) == 0 { config.AerospikeClusters = make(map[string]*model.AerospikeCluster) } @@ -205,7 +213,7 @@ func (r *AerospikeBackup) validateBackupConfig() error { } if err := config.Validate(); err != nil { - return nil + return err } // Validate on-demand backup diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 7a363cd1b..6c0b45a69 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -253,6 +253,12 @@ func (in *AerospikeBackupSpec) DeepCopy() *AerospikeBackupSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AerospikeBackupStatus) DeepCopyInto(out *AerospikeBackupStatus) { *out = *in + if in.BackupService != nil { + in, out := &in.BackupService, &out.BackupService + *out = new(BackupService) + **out = **in + } + in.Config.DeepCopyInto(&out.Config) if in.OnDemand != nil { in, out := &in.OnDemand, &out.OnDemand *out = make([]OnDemandSpec, len(*in)) diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml index fd5bc6256..906b860c8 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml @@ -88,6 +88,21 @@ spec: status: description: AerospikeBackupStatus defines the observed state of AerospikeBackup properties: + backupService: + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + type: object + x-kubernetes-preserve-unknown-fields: true onDemand: items: properties: @@ -106,6 +121,9 @@ spec: - routineName type: object type: array + required: + - backupService + - config type: object type: object served: true diff --git a/config/samples/asdb_v1beta1_aerospikebackup.yaml b/config/samples/asdb_v1beta1_aerospikebackup.yaml index 7a7df8676..a5267e420 100644 --- a/config/samples/asdb_v1beta1_aerospikebackup.yaml +++ b/config/samples/asdb_v1beta1_aerospikebackup.yaml @@ -12,9 +12,9 @@ spec: backupService: name: aerospikebackupservice-sample namespace: aerospike - onDemand: - - id: first-ad-hoc-backup - routineName: test-routine +# onDemand: +# - id: first-ad-hoc-backup +# routineName: test-routine config: aerospike-cluster: test-cluster: diff --git a/controllers/backup/reconciler.go b/controllers/backup/reconciler.go index 637926eb8..60d425052 100644 --- a/controllers/backup/reconciler.go +++ b/controllers/backup/reconciler.go @@ -303,6 +303,22 @@ func (r *SingleBackupReconciler) ScheduleOnDemandBackup() error { } func (r *SingleBackupReconciler) reconcileBackup() error { + specHash, err := utils.GetHash(string(r.aeroBackup.Spec.Config.Raw)) + if err != nil { + return err + } + + statusHash, err := utils.GetHash(string(r.aeroBackup.Status.Config.Raw)) + if err != nil { + return err + } + + if specHash == statusHash { + r.Log.Info("Backup config not changed", + "name", r.aeroBackup.Name, "namespace", r.aeroBackup.Namespace) + return nil + } + r.Log.Info("Registering backup", "name", r.aeroBackup.Name, "namespace", r.aeroBackup.Namespace) serviceClient, err := backup_service.GetBackupServiceClient(r.Client, r.aeroBackup.Spec.BackupService) @@ -451,9 +467,9 @@ func (r *SingleBackupReconciler) unregisterBackup() error { } func (r *SingleBackupReconciler) updateStatus() error { + r.aeroBackup.Status.BackupService = r.aeroBackup.Spec.BackupService + r.aeroBackup.Status.Config = r.aeroBackup.Spec.Config r.aeroBackup.Status.OnDemand = r.aeroBackup.Spec.OnDemand - r.Log.Info(fmt.Sprintf("Updating status: %+v", r.aeroBackup.Status)) - return r.Client.Status().Update(context.Background(), r.aeroBackup) } diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml index fd5bc6256..906b860c8 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml @@ -88,6 +88,21 @@ spec: status: description: AerospikeBackupStatus defines the observed state of AerospikeBackup properties: + backupService: + properties: + name: + description: Backup service name + type: string + namespace: + description: Backup service namespace + type: string + required: + - name + - namespace + type: object + config: + type: object + x-kubernetes-preserve-unknown-fields: true onDemand: items: properties: @@ -106,6 +121,9 @@ spec: - routineName type: object type: array + required: + - backupService + - config type: object type: object served: true diff --git a/test/backup/backup_suite_test.go b/test/backup/backup_suite_test.go new file mode 100644 index 000000000..c7c18147d --- /dev/null +++ b/test/backup/backup_suite_test.go @@ -0,0 +1,96 @@ +package backup + +import ( + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8Runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/test" + backupservice "github.com/aerospike/aerospike-kubernetes-operator/test/backup_service" + "github.com/aerospike/aerospike-kubernetes-operator/test/cluster" +) + +var testEnv *envtest.Environment + +var k8sClient client.Client + +var scheme = k8Runtime.NewScheme() + +func TestBackup(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Backup Suite") +} + +var _ = BeforeSuite( + func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environment") + var err error + + testEnv, _, k8sClient, _, err = test.BootStrapTestEnv(scheme) + Expect(err).NotTo(HaveOccurred()) + + By("Deploy Backup Service") + backupService, err := backupservice.NewBackupService() + Expect(err).ToNot(HaveOccurred()) + + backupService.Spec.Service = &asdbv1beta1.Service{ + Type: corev1.ServiceTypeLoadBalancer, + } + + backupServiceName = backupService.Name + backupServiceNamespace = backupService.Namespace + + err = backupservice.DeployBackupService(k8sClient, backupService) + Expect(err).ToNot(HaveOccurred()) + + By("Deploy Aerospike Cluster") + aeroCluster := cluster.CreateDummyAerospikeCluster(aerospikeNsNm, 2) + + err = cluster.DeployCluster(k8sClient, testCtx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + }) + +var _ = AfterSuite( + func() { + By("Delete Aerospike Cluster") + aeroCluster := asdbv1.AerospikeCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: aerospikeNsNm.Name, + Namespace: aerospikeNsNm.Namespace, + }, + } + + err := cluster.DeleteCluster(k8sClient, testCtx, &aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Delete Backup Service") + backupService := asdbv1beta1.AerospikeBackupService{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + } + + err = backupservice.DeleteBackupService(k8sClient, &backupService) + Expect(err).ToNot(HaveOccurred()) + + By("tearing down the test environment") + gexec.KillAndWait(5 * time.Second) + err = testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }, +) diff --git a/test/backup/backup_test.go b/test/backup/backup_test.go new file mode 100644 index 000000000..dec7c764a --- /dev/null +++ b/test/backup/backup_test.go @@ -0,0 +1,197 @@ +package backup + +import ( + "encoding/json" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" +) + +var _ = Describe( + "Backup Service Test", func() { + + var ( + backup *asdbv1beta1.AerospikeBackup + err error + ) + + AfterEach(func() { + Expect(deleteBackup(k8sClient, backup)).ToNot(HaveOccurred()) + }) + + Context( + "When doing Invalid operations", func() { + It("Should fail when wrong format backup config is given", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + + badConfig, gErr := getWrongBackupConfBytes() + Expect(gErr).ToNot(HaveOccurred()) + backup.Spec.Config.Raw = badConfig + + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when more than 1 cluster is given in backup config", func() { + config := getBackupConfigInMap() + aeroCluster := config[common.AerospikeClusterKey].(map[string]interface{}) + aeroCluster["cluster-two"] = aeroCluster["test-cluster"] + config[common.AerospikeClusterKey] = aeroCluster + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(configBytes) + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when on-demand backup is given at the time of creation", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemand = []asdbv1beta1.OnDemandSpec{ + { + ID: "on-demand", + RoutineName: "test-routine", + }, + } + + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when non-existing routine is given in on-demand backup", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + + err = deployBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemand = []asdbv1beta1.OnDemandSpec{ + { + ID: "on-demand", + RoutineName: "non-existing-routine", + }, + } + + err = k8sClient.Update(testCtx, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when backup service is not present", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.BackupService.Name = "wrong-backup-service" + + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when backup service reference is updated", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + + err = deployBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.BackupService.Name = "updated-backup-service" + + err = k8sClient.Update(testCtx, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when non-existing policy is referred in Backup routine", func() { + config := getBackupConfigInMap() + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines["test-routine"].(map[string]interface{})["backup-policy"] = "non-existing-policy" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(configBytes) + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + FIt("Should fail when non-existing cluster is referred in Backup routine", func() { + config := getBackupConfigInMap() + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines["test-routine"].(map[string]interface{})["source-cluster"] = "non-existing-cluster" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(configBytes) + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + It("Should fail when non-existing storage is referred in Backup routine", func() { + config := getBackupConfigInMap() + routines := config[common.BackupRoutinesKey].(map[string]interface{}) + routines["test-routine"].(map[string]interface{})["storage"] = "non-existing-storage" + config[common.BackupRoutinesKey] = routines + + configBytes, mErr := json.Marshal(config) + Expect(mErr).ToNot(HaveOccurred()) + + backup = newBackupWithConfig(configBytes) + err = deployBackup(k8sClient, backup) + Expect(err).To(HaveOccurred()) + }) + + }, + ) + + Context("When doing Valid operations", func() { + It("Should trigger backup when correct backup config is given", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + err = deployBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backupServiceName, backupServiceNamespace, backup) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("Should trigger on-demand backup when given", func() { + backup, err = newBackup() + Expect(err).ToNot(HaveOccurred()) + err = deployBackup(k8sClient, backup) + Expect(err).ToNot(HaveOccurred()) + + backup, err = getBackupObj(k8sClient, backup.Name, backup.Namespace) + Expect(err).ToNot(HaveOccurred()) + + backup.Spec.OnDemand = []asdbv1beta1.OnDemandSpec{ + { + ID: "on-demand", + RoutineName: "test-routine", + }, + } + + err = k8sClient.Update(testCtx, backup) + Expect(err).ToNot(HaveOccurred()) + + err = validateTriggeredBackup(k8sClient, backupServiceName, backupServiceNamespace, backup) + Expect(err).ToNot(HaveOccurred()) + }) + + }) + }, +) diff --git a/test/backup/test_utils.go b/test/backup/test_utils.go new file mode 100644 index 000000000..4debc1457 --- /dev/null +++ b/test/backup/test_utils.go @@ -0,0 +1,292 @@ +package backup + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "time" + + "github.com/abhishekdwivedi3060/aerospike-backup-service/pkg/model" + "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + corev1 "k8s.io/api/core/v1" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" +) + +const ( + timeout = 2 * time.Minute + interval = 2 * time.Second + name = "sample-backup" + namespace = "test" +) + +var testCtx = context.TODO() + +var backupServiceName, backupServiceNamespace string + +var pkgLog = ctrl.Log.WithName("backup") + +var aerospikeNsNm = types.NamespacedName{ + Name: "aerocluster", + Namespace: namespace, +} + +func newBackup() (*asdbv1beta1.AerospikeBackup, error) { + configBytes, err := getBackupConfBytes() + if err != nil { + return nil, err + } + + return &asdbv1beta1.AerospikeBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: asdbv1beta1.AerospikeBackupSpec{ + BackupService: &asdbv1beta1.BackupService{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + Config: runtime.RawExtension{ + Raw: configBytes, + }, + }, + }, nil +} + +func newBackupWithConfig(conf []byte) *asdbv1beta1.AerospikeBackup { + return &asdbv1beta1.AerospikeBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: asdbv1beta1.AerospikeBackupSpec{ + BackupService: &asdbv1beta1.BackupService{ + Name: backupServiceName, + Namespace: backupServiceNamespace, + }, + Config: runtime.RawExtension{ + Raw: conf, + }, + }, + } +} + +func getBackupObj(cl client.Client, name, namespace string) (*asdbv1beta1.AerospikeBackup, error) { + var backup asdbv1beta1.AerospikeBackup + + if err := cl.Get(testCtx, types.NamespacedName{Name: name, Namespace: namespace}, &backup); err != nil { + return nil, err + } + + return &backup, nil +} + +func deployBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup) error { + if err := cl.Create(testCtx, backup); err != nil { + return err + } + + return waitForBackup(cl, backup, timeout) +} + +func deleteBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup) error { + if err := cl.Delete(testCtx, backup); err != nil && !k8serrors.IsNotFound(err) { + return err + } + + return nil +} + +func waitForBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup, + timeout time.Duration) error { + namespaceName := types.NamespacedName{ + Name: backup.Name, Namespace: backup.Namespace, + } + + return wait.PollUntilContextTimeout( + testCtx, 1*time.Second, + timeout, true, func(ctx context.Context) (bool, error) { + if err := cl.Get(ctx, namespaceName, backup); err != nil { + return false, nil + } + + status := asdbv1beta1.AerospikeBackupStatus{} + status.BackupService = backup.Spec.BackupService + status.Config = backup.Spec.Config + status.OnDemand = backup.Spec.OnDemand + + if !reflect.DeepEqual(status, backup.Status) { + pkgLog.Info("Backup status not updated yet") + return false, nil + } + return true, nil + }) +} + +func getBackupConfBytes() ([]byte, error) { + backupConfig := getBackupConfigInMap() + + configBytes, err := json.Marshal(backupConfig) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +func getBackupConfigInMap() map[string]interface{} { + return map[string]interface{}{ + common.AerospikeClusterKey: map[string]interface{}{ + "test-cluster": map[string]interface{}{ + "credentials": map[string]interface{}{ + "password": "admin123", + "user": "admin", + }, + "seed-nodes": []map[string]interface{}{ + { + "host-name": "aerocluster.aerospike.svc.cluster.local", + "port": 3000, + }, + }, + }, + }, + common.BackupRoutinesKey: map[string]interface{}{ + "test-routine": map[string]interface{}{ + "backup-policy": "test-policy", + "interval-cron": "@daily", + "incr-interval-cron": "@hourly", + "namespaces": []string{"test"}, + "source-cluster": "test-cluster", + "storage": "local", + }, + }, + } +} + +func getWrongBackupConfBytes() ([]byte, error) { + backupConfig := getBackupConfigInMap() + + // change the format from map to list + backupConfig[common.BackupRoutinesKey] = []interface{}{ + backupConfig[common.BackupRoutinesKey], + } + + configBytes, err := json.Marshal(backupConfig) + if err != nil { + return nil, err + } + + pkgLog.Info(string(configBytes)) + + return configBytes, nil +} + +// validateTriggeredBackup validates if the backup is triggered by checking the current config of backup-service +func validateTriggeredBackup(k8sClient client.Client, backupServiceName, backupServiceNamespace string, + backup *asdbv1beta1.AerospikeBackup) error { + var backupK8sService corev1.Service + + // Wait for Service LB IP to be populated + if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, + func(ctx context.Context) (bool, error) { + if err := k8sClient.Get(testCtx, + types.NamespacedName{Name: backupServiceName, Namespace: backupServiceNamespace}, + &backupK8sService); err != nil { + return false, err + } + + if backupK8sService.Status.LoadBalancer.Ingress == nil { + return false, nil + } + + return true, nil + }); err != nil { + return err + } + + var body []byte + + // Wait for Backup service to be ready + if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, + func(ctx context.Context) (bool, error) { + resp, err := http.Get("http://" + backupK8sService.Status.LoadBalancer.Ingress[0].IP + ":8081/v1/config") + if err != nil { + pkgLog.Error(err, "Failed to get backup service config") + return false, nil + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return false, fmt.Errorf("backup service config fetch failed with status code: %d", + resp.StatusCode) + } + + // Validate the config + body, err = io.ReadAll(resp.Body) + if err != nil { + return false, err + } + + return true, nil + }); err != nil { + return err + } + + var config model.Config + + if err := yaml.Unmarshal(body, &config); err != nil { + return err + } + + desiredConfigMap := make(map[string]interface{}) + + if err := yaml.Unmarshal(backup.Spec.Config.Raw, &desiredConfigMap); err != nil { + return err + } + + if _, ok := desiredConfigMap[common.AerospikeClusterKey]; !ok { + return fmt.Errorf("aerospike-cluster key not found in backup config") + } + + if _, ok := desiredConfigMap[common.BackupRoutinesKey]; !ok { + return fmt.Errorf("backup-routines key not found in backup config") + } + + newCluster := desiredConfigMap[common.AerospikeClusterKey].(map[string]interface{}) + + for name := range newCluster { + if _, ok := config.AerospikeClusters[name]; !ok { + return fmt.Errorf("cluster %s not found in backup config", name) + } + } + + pkgLog.Info("Backup cluster info is found in backup config") + + routines := desiredConfigMap[common.BackupRoutinesKey].(map[string]interface{}) + + for name := range routines { + if _, ok := config.BackupRoutines[name]; !ok { + return fmt.Errorf("routine %s not found in backup config", name) + } + } + + pkgLog.Info("Backup routines info is found in backup config") + + return nil +} diff --git a/test/backup_service/backup_service_suite_test.go b/test/backup_service/backup_service_suite_test.go index f8391366f..2479fb3da 100644 --- a/test/backup_service/backup_service_suite_test.go +++ b/test/backup_service/backup_service_suite_test.go @@ -1,48 +1,31 @@ package backupservice import ( - "context" "testing" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" - admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8Runtime "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" "github.com/aerospike/aerospike-kubernetes-operator/controllers/common" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) -var cfg *rest.Config - var testEnv *envtest.Environment var k8sClient client.Client var scheme = k8Runtime.NewScheme() -var testCtx = context.TODO() - -var pkgLog = ctrl.Log.WithName("backupservice") - -const ( - name = "backup-service" - namespace = "test" -) - func TestBackupService(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "BackupService Suite") @@ -53,35 +36,10 @@ var _ = BeforeSuite( logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) By("Bootstrapping test environment") - t := true - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - var err error - - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - err = clientgoscheme.AddToScheme(scheme) - Expect(err).NotTo(HaveOccurred()) - - err = asdbv1.AddToScheme(scheme) - Expect(err).NotTo(HaveOccurred()) - - err = asdbv1beta1.AddToScheme(scheme) - Expect(err).NotTo(HaveOccurred()) - err = admissionv1.AddToScheme(scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - k8sClient, err = client.New( - cfg, client.Options{Scheme: scheme}, - ) + var err error + testEnv, _, k8sClient, _, err = test.BootStrapTestEnv(scheme) Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) sa := corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -98,6 +56,7 @@ var _ = BeforeSuite( var _ = AfterSuite( func() { + By("tearing down the test environment") gexec.KillAndWait(5 * time.Second) err := testEnv.Stop() diff --git a/test/backup_service/backup_service_test.go b/test/backup_service/backup_service_test.go index f49b21cc1..ed53cdd34 100644 --- a/test/backup_service/backup_service_test.go +++ b/test/backup_service/backup_service_test.go @@ -20,26 +20,26 @@ var _ = Describe( ) AfterEach(func() { - Expect(deleteBackupService(k8sClient, backupService)).ToNot(HaveOccurred()) + Expect(DeleteBackupService(k8sClient, backupService)).ToNot(HaveOccurred()) }) Context( "When doing Invalid operations", func() { It("Should fail when wrong format backup config is given", func() { - backupService, err = newBackupService() + backupService, err = NewBackupService() Expect(err).ToNot(HaveOccurred()) badConfig, gErr := getWrongBackupServiceConfBytes() Expect(gErr).ToNot(HaveOccurred()) backupService.Spec.Config.Raw = badConfig - err = deployBackupService(k8sClient, backupService) + err = DeployBackupService(k8sClient, backupService) Expect(err).To(HaveOccurred()) }, ) It("Should fail when wrong image is given", func() { - backupService, err = newBackupService() + backupService, err = NewBackupService() Expect(err).ToNot(HaveOccurred()) backupService.Spec.Image = "wrong-image" @@ -53,16 +53,16 @@ var _ = Describe( Context("When doing Valid operations", func() { It("Should deploy backup service components when correct backup config is given", func() { - backupService, err = newBackupService() + backupService, err = NewBackupService() Expect(err).ToNot(HaveOccurred()) - err = deployBackupService(k8sClient, backupService) + err = DeployBackupService(k8sClient, backupService) Expect(err).ToNot(HaveOccurred()) }) It("Should restart backup service deployment pod when config is changed", func() { - backupService, err = newBackupService() + backupService, err = NewBackupService() Expect(err).ToNot(HaveOccurred()) - err = deployBackupService(k8sClient, backupService) + err = DeployBackupService(k8sClient, backupService) Expect(err).ToNot(HaveOccurred()) podList, gErr := getBackupServicePodList(k8sClient, backupService) @@ -88,9 +88,9 @@ var _ = Describe( }) It("Should restart backup service deployment pod when pod spec is changed", func() { - backupService, err = newBackupService() + backupService, err = NewBackupService() Expect(err).ToNot(HaveOccurred()) - err = deployBackupService(k8sClient, backupService) + err = DeployBackupService(k8sClient, backupService) Expect(err).ToNot(HaveOccurred()) podList, gErr := getBackupServicePodList(k8sClient, backupService) @@ -121,9 +121,9 @@ var _ = Describe( }) It("Should change K8s service type when service type is changed in CR", func() { - backupService, err = newBackupService() + backupService, err = NewBackupService() Expect(err).ToNot(HaveOccurred()) - err := deployBackupService(k8sClient, backupService) + err := DeployBackupService(k8sClient, backupService) Expect(err).ToNot(HaveOccurred()) svc, err := getBackupK8sServiceObj(k8sClient, name, namespace) diff --git a/test/backup_service/helper_test.go b/test/backup_service/test_utils.go similarity index 94% rename from test/backup_service/helper_test.go rename to test/backup_service/test_utils.go index fe709c863..22c6a5152 100644 --- a/test/backup_service/helper_test.go +++ b/test/backup_service/test_utils.go @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -24,11 +25,17 @@ import ( const BackupServiceImage = "aerospike.jfrog.io/ecosystem-container-prod-local/aerospike-backup-service:1.0.0" const ( - timeout = 2 * time.Minute - interval = 2 * time.Second + timeout = 2 * time.Minute + interval = 2 * time.Second + name = "backup-service" + namespace = "test" ) -func newBackupService() (*asdbv1beta1.AerospikeBackupService, error) { +var testCtx = context.TODO() + +var pkgLog = ctrl.Log.WithName("backupservice") + +func NewBackupService() (*asdbv1beta1.AerospikeBackupService, error) { configBytes, err := getBackupServiceConfBytes() if err != nil { return nil, err @@ -68,7 +75,7 @@ func getBackupK8sServiceObj(cl client.Client, name, namespace string) (*corev1.S return &svc, nil } -func deployBackupService(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService) error { +func DeployBackupService(cl client.Client, backupService *asdbv1beta1.AerospikeBackupService) error { if err := cl.Create(testCtx, backupService); err != nil { return err } @@ -236,7 +243,7 @@ storage: return configBytes, nil } -func deleteBackupService( +func DeleteBackupService( k8sClient client.Client, backService *asdbv1beta1.AerospikeBackupService, ) error { diff --git a/test/access_control_test.go b/test/cluster/access_control_test.go similarity index 98% rename from test/access_control_test.go rename to test/cluster/access_control_test.go index 449c838f1..64ebee231 100644 --- a/test/access_control_test.go +++ b/test/cluster/access_control_test.go @@ -1,6 +1,6 @@ //go:build !noac -package test +package cluster import ( goctx "context" @@ -21,6 +21,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" aerospikecluster "github.com/aerospike/aerospike-kubernetes-operator/controllers" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) const ( @@ -1244,7 +1245,7 @@ var _ = Describe( { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1367,7 +1368,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ // Missing required user admin role. "sys-admin", @@ -1376,7 +1377,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1385,7 +1386,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1453,7 +1454,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ // Missing required user admin role. "sys-admin", @@ -1462,7 +1463,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1471,7 +1472,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1673,7 +1674,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "sys-admin", "user-admin", @@ -1682,7 +1683,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "data-admin", "read-write-udf", @@ -1759,7 +1760,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1768,7 +1769,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1777,7 +1778,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1825,7 +1826,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "sys-admin", "user-admin", @@ -1834,7 +1835,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "data-admin", "read-write-udf", @@ -1921,7 +1922,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1930,7 +1931,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -1939,7 +1940,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -1997,7 +1998,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -2006,7 +2007,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -2015,7 +2016,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -2075,7 +2076,7 @@ var _ = Describe( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -2084,7 +2085,7 @@ var _ = Describe( { Name: "profileUser", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", "sys-admin", @@ -2093,7 +2094,7 @@ var _ = Describe( { Name: "userToDrop", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "profiler", }, @@ -2205,7 +2206,7 @@ var _ = Describe( // Get default password from secret. secretNamespcedName := types.NamespacedName{ - Name: aerospikeSecretName, + Name: test.AerospikeSecretName, Namespace: aeroCluster.Namespace, } passFileName := "password.conf" @@ -2238,7 +2239,7 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) // Set correct secret name for admin user credentials. - aeroCluster.Spec.AerospikeAccessControl.Users[0].SecretName = authSecretName + aeroCluster.Spec.AerospikeAccessControl.Users[0].SecretName = test.AuthSecretName err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) diff --git a/test/aero_info.go b/test/cluster/aero_info.go similarity index 62% rename from test/aero_info.go rename to test/cluster/aero_info.go index 9269eb1bd..a0bf81b75 100644 --- a/test/aero_info.go +++ b/test/cluster/aero_info.go @@ -1,4 +1,4 @@ -package test +package cluster // Aerospike client and info testing utilities. // @@ -6,50 +6,20 @@ package test import ( goctx "context" "fmt" - "strings" "time" "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/deployment" "github.com/aerospike/aerospike-management-lib/info" ) -type CloudProvider int - -const ( - CloudProviderUnknown CloudProvider = iota - CloudProviderAWS - CloudProviderGCP -) - -func getServiceForPod( - pod *corev1.Pod, k8sClient client.Client, -) (*corev1.Service, error) { - service := &corev1.Service{} - err := k8sClient.Get( - goctx.TODO(), - types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, service, - ) - - if err != nil { - return nil, fmt.Errorf( - "failed to get service for pod %s: %v", pod.Name, err, - ) - } - - return service, nil -} - func newAsConn( _ logr.Logger, aeroCluster *asdbv1.AerospikeCluster, pod *corev1.Pod, k8sClient client.Client, @@ -245,133 +215,6 @@ func newHostConn( return deployment.NewHostConn(log, host, asConn), nil } -func getPodList( - aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, -) (*corev1.PodList, error) { - podList := &corev1.PodList{} - labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) - listOps := &client.ListOptions{ - Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, - } - - if err := k8sClient.List(goctx.TODO(), podList, listOps); err != nil { - return nil, err - } - - return podList, nil -} - -func getSTSList( - aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, -) (*appsv1.StatefulSetList, error) { - stsList := &appsv1.StatefulSetList{} - labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) - listOps := &client.ListOptions{ - Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, - } - - if err := k8sClient.List(goctx.TODO(), stsList, listOps); err != nil { - return nil, err - } - - return stsList, nil -} - -func getNodeList(ctx goctx.Context, k8sClient client.Client) ( - *corev1.NodeList, error, -) { - nodeList := &corev1.NodeList{} - if err := k8sClient.List(ctx, nodeList); err != nil { - return nil, err - } - - return nodeList, nil -} - -func getZones(ctx goctx.Context, k8sClient client.Client) ([]string, error) { - unqZones := map[string]int{} - - nodes, err := getNodeList(ctx, k8sClient) - if err != nil { - return nil, err - } - - for idx := range nodes.Items { - unqZones[nodes.Items[idx].Labels[zoneKey]] = 1 - } - - zones := make([]string, 0, len(unqZones)) - - for zone := range unqZones { - zones = append(zones, zone) - } - - return zones, nil -} - -func getRegion(ctx goctx.Context, k8sClient client.Client) (string, error) { - nodes, err := getNodeList(ctx, k8sClient) - if err != nil { - return "", err - } - - if len(nodes.Items) == 0 { - return "", fmt.Errorf("node list empty: %v", nodes.Items) - } - - return nodes.Items[0].Labels[regionKey], nil -} - -func getCloudProvider( - ctx goctx.Context, k8sClient client.Client, -) (CloudProvider, error) { - labelKeys := map[string]struct{}{} - - nodes, err := getNodeList(ctx, k8sClient) - if err != nil { - return CloudProviderUnknown, err - } - - for idx := range nodes.Items { - for labelKey := range nodes.Items[idx].Labels { - if strings.Contains(labelKey, "cloud.google.com") { - return CloudProviderGCP, nil - } - - if strings.Contains(labelKey, "eks.amazonaws.com") { - return CloudProviderAWS, nil - } - - labelKeys[labelKey] = struct{}{} - } - - provider := determineByProviderID(&nodes.Items[idx]) - if provider != CloudProviderUnknown { - return provider, nil - } - } - - labelKeysSlice := make([]string, 0, len(labelKeys)) - - for labelKey := range labelKeys { - labelKeysSlice = append(labelKeysSlice, labelKey) - } - - return CloudProviderUnknown, fmt.Errorf( - "can't determin cloud platform by node's labels: %v", labelKeysSlice, - ) -} - -func determineByProviderID(node *corev1.Node) CloudProvider { - if strings.Contains(node.Spec.ProviderID, "gce") { - return CloudProviderGCP - } else if strings.Contains(node.Spec.ProviderID, "aws") { - return CloudProviderAWS - } - // TODO add cloud provider detection for Azure - return CloudProviderUnknown -} - func newAllHostConn( log logr.Logger, aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, @@ -399,23 +242,6 @@ func newAllHostConn( return hostConns, nil } -func getAeroClusterPVCList( - aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, -) ([]corev1.PersistentVolumeClaim, error) { - // List the pvc for this aeroCluster's statefulset - pvcList := &corev1.PersistentVolumeClaimList{} - labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) - listOps := &client.ListOptions{ - Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, - } - - if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { - return nil, err - } - - return pvcList.Items, nil -} - func getAsConfig(asinfo *info.AsInfo, cmd string) (lib.Stats, error) { var ( confs lib.Stats diff --git a/test/batch_restart_pods_test.go b/test/cluster/batch_restart_pods_test.go similarity index 99% rename from test/batch_restart_pods_test.go rename to test/cluster/batch_restart_pods_test.go index 6e05d9cab..e5c1dc87e 100644 --- a/test/batch_restart_pods_test.go +++ b/test/cluster/batch_restart_pods_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/batch_scaledown_pods_test.go b/test/cluster/batch_scaledown_pods_test.go similarity index 99% rename from test/batch_scaledown_pods_test.go rename to test/cluster/batch_scaledown_pods_test.go index c216ef48a..13a33c870 100644 --- a/test/batch_scaledown_pods_test.go +++ b/test/cluster/batch_scaledown_pods_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster_helper.go b/test/cluster/cluster_helper.go similarity index 95% rename from test/cluster_helper.go rename to test/cluster/cluster_helper.go index 22a9a64c0..a129f5410 100644 --- a/test/cluster_helper.go +++ b/test/cluster/cluster_helper.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -20,11 +20,13 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" internalerrors "github.com/aerospike/aerospike-kubernetes-operator/errors" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/info" ) @@ -42,6 +44,19 @@ const ( latestSchemaVersion = "7.1.0" ) +var ( + storageClass = "ssd" + namespace = "test" + pkgLog = ctrl.Log.WithName("cluster") +) + +const aerospikeConfigSecret string = "aerospike-config-secret" //nolint:gosec // for testing + +const serviceTLSPort = 4333 +const serviceNonTLSPort = 3000 + +var aerospikeVolumeInitMethodDeleteFiles = asdbv1.AerospikeVolumeMethodDeleteFiles + var ( retryInterval = time.Second * 5 cascadeDeleteFalse = false @@ -70,7 +85,7 @@ func rollingRestartClusterByEnablingTLS( aeroCluster.Spec.OperatorClientCertSpec = &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -746,6 +761,15 @@ func deleteCluster( return nil } +// DeleteCluster is the public variant of deleteCluster +// Remove this when deleteCluster will be made public +func DeleteCluster( + k8sClient client.Client, ctx goctx.Context, + aeroCluster *asdbv1.AerospikeCluster, +) error { + return deleteCluster(k8sClient, ctx, aeroCluster) +} + func deployCluster( k8sClient client.Client, ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster, @@ -756,6 +780,15 @@ func deployCluster( ) } +// DeployCluster is the public variant of deployCluster +// Remove this when deployCluster will be made public +func DeployCluster( + k8sClient client.Client, ctx goctx.Context, + aeroCluster *asdbv1.AerospikeCluster, +) error { + return deployCluster(k8sClient, ctx, aeroCluster) +} + func deployClusterWithTO( k8sClient client.Client, ctx goctx.Context, aeroCluster *asdbv1.AerospikeCluster, @@ -854,7 +887,7 @@ func createAerospikeClusterPost460( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -869,7 +902,7 @@ func createAerospikeClusterPost460( OperatorClientCertSpec: &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -917,7 +950,7 @@ func createAerospikeClusterPost560( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -932,7 +965,7 @@ func createAerospikeClusterPost560( OperatorClientCertSpec: &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -1040,7 +1073,7 @@ func createDummyAerospikeClusterWithRFAndStorage( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1108,7 +1141,7 @@ func createDummyAerospikeCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1144,6 +1177,14 @@ func createDummyAerospikeCluster( return aeroCluster } +// CreateDummyAerospikeCluster func is a public variant of createDummyAerospikeCluster +// Remove this when createDummyAerospikeCluster will be made public +func CreateDummyAerospikeCluster( + clusterNamespacedName types.NamespacedName, size int32, +) *asdbv1.AerospikeCluster { + return createDummyAerospikeCluster(clusterNamespacedName, size) +} + func UpdateClusterImage( aerocluster *asdbv1.AerospikeCluster, image string, ) error { @@ -1301,7 +1342,7 @@ func createBasicTLSCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -1317,7 +1358,7 @@ func createBasicTLSCluster( OperatorClientCertSpec: &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", @@ -1540,7 +1581,7 @@ func getStorageVolumeForSecret() asdbv1.VolumeSpec { Name: aerospikeConfigSecret, Source: asdbv1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, }, }, Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ @@ -1639,3 +1680,20 @@ func getNonRootPodSpec() asdbv1.AerospikePodSpec { }, } } + +func getAeroClusterPVCList( + aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, +) ([]corev1.PersistentVolumeClaim, error) { + // List the pvc for this aeroCluster's statefulset + pvcList := &corev1.PersistentVolumeClaimList{} + labelSelector := labels.SelectorFromSet(utils.LabelsForAerospikeCluster(aeroCluster.Name)) + listOps := &client.ListOptions{ + Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, + } + + if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { + return nil, err + } + + return pvcList.Items, nil +} diff --git a/test/cluster_resource_test.go b/test/cluster/cluster_resource_test.go similarity index 99% rename from test/cluster_resource_test.go rename to test/cluster/cluster_resource_test.go index 9b4ab8699..32cbad0ee 100644 --- a/test/cluster_resource_test.go +++ b/test/cluster/cluster_resource_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster_storage_cleanup_test.go b/test/cluster/cluster_storage_cleanup_test.go similarity index 99% rename from test/cluster_storage_cleanup_test.go rename to test/cluster/cluster_storage_cleanup_test.go index 480d4eb33..23e5ee5d7 100644 --- a/test/cluster_storage_cleanup_test.go +++ b/test/cluster/cluster_storage_cleanup_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster_test.go b/test/cluster/cluster_test.go similarity index 99% rename from test/cluster_test.go rename to test/cluster/cluster_test.go index 1c19952c9..85fe56623 100644 --- a/test/cluster_test.go +++ b/test/cluster/cluster_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -15,6 +15,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) var _ = Describe( @@ -757,10 +758,10 @@ func UpdateTLSClusterTest(ctx goctx.Context) { network["tls"] = tlsList aeroCluster.Spec.AerospikeConfig.Value["network"] = network secretVolume := asdbv1.VolumeSpec{ - Name: tlsCacertSecretName, + Name: test.TLSCacertSecretName, Source: asdbv1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, }, }, Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ @@ -771,7 +772,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { operatorClientCertSpec := getOperatorCert() operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.CaCertsFilename = "" cacertPath := &asdbv1.CaCertsSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, } operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.CaCertsSource = cacertPath aeroCluster.Spec.OperatorClientCertSpec = operatorClientCertSpec @@ -1227,7 +1228,7 @@ func UpdateClusterTest(ctx goctx.Context) { aeroCluster.Spec.OperatorClientCertSpec = &asdbv1.AerospikeOperatorClientCertSpec{ AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: aerospikeSecretName, + SecretName: test.AerospikeSecretName, CaCertsFilename: "cacert.pem", ClientCertFilename: "svc_cluster_chain.pem", ClientKeyFilename: "svc_key.pem", diff --git a/test/dynamic_config_test.go b/test/cluster/dynamic_config_test.go similarity index 99% rename from test/dynamic_config_test.go rename to test/cluster/dynamic_config_test.go index becbf0756..e318c9fb2 100644 --- a/test/dynamic_config_test.go +++ b/test/cluster/dynamic_config_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -20,6 +20,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" "github.com/aerospike/aerospike-kubernetes-operator/pkg/configschema" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" lib "github.com/aerospike/aerospike-management-lib" "github.com/aerospike/aerospike-management-lib/asconfig" "github.com/aerospike/aerospike-management-lib/info" @@ -492,7 +493,7 @@ var _ = Describe( admin2 := asdbv1.AerospikeUserSpec{ Name: "admin2", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", @@ -643,7 +644,7 @@ func getPodIDs(ctx context.Context, aeroCluster *asdbv1.AerospikeCluster) (map[s } stdout, _, execErr := utils.Exec( - utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientset, + utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientSet, cfg, ) diff --git a/test/host_network_test.go b/test/cluster/host_network_test.go similarity index 98% rename from test/host_network_test.go rename to test/cluster/host_network_test.go index 136c74a1c..40b9b5721 100644 --- a/test/host_network_test.go +++ b/test/cluster/host_network_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( "bufio" @@ -97,7 +97,7 @@ func checkAdvertisedAddress( // intraClusterAdvertisesNodeIp indicates if the pod advertises k8s node IP. func intraClusterAdvertisesNodeIP(ctx goctx.Context, pod *corev1.Pod) bool { podNodeIP := pod.Status.HostIP - logs := getPodLogs(k8sClientset, ctx, pod) + logs := getPodLogs(k8sClientSet, ctx, pod) scanner := bufio.NewScanner(strings.NewReader(logs)) hbAdvertisesNodeID := false fabricAdvertisesNodeID := false diff --git a/test/k8snode_block_list_test.go b/test/cluster/k8snode_block_list_test.go similarity index 99% rename from test/k8snode_block_list_test.go rename to test/cluster/k8snode_block_list_test.go index a298944e8..5c3975d6e 100644 --- a/test/k8snode_block_list_test.go +++ b/test/cluster/k8snode_block_list_test.go @@ -1,16 +1,15 @@ -package test +package cluster import ( "context" "fmt" - "k8s.io/utils/ptr" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" ) diff --git a/test/large_reconcile_test.go b/test/cluster/large_reconcile_test.go similarity index 99% rename from test/large_reconcile_test.go rename to test/cluster/large_reconcile_test.go index 470b41a55..a3e834b24 100644 --- a/test/large_reconcile_test.go +++ b/test/cluster/large_reconcile_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -7,7 +7,6 @@ import ( "strconv" "time" - as "github.com/aerospike/aerospike-client-go/v7" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -15,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" + as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" ) diff --git a/test/ldap_auth_test.go b/test/cluster/ldap_auth_test.go similarity index 97% rename from test/ldap_auth_test.go rename to test/cluster/ldap_auth_test.go index fbeacc410..5f49adfc5 100644 --- a/test/ldap_auth_test.go +++ b/test/cluster/ldap_auth_test.go @@ -2,7 +2,7 @@ // Tests Aerospike ldap external authentication. -package test +package cluster import ( goctx "context" @@ -16,6 +16,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) var _ = Describe( @@ -112,7 +113,7 @@ func getAerospikeClusterSpecWithLDAP( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/multicluster_test.go b/test/cluster/multicluster_test.go similarity index 96% rename from test/multicluster_test.go rename to test/cluster/multicluster_test.go index 8728f837a..d3ea3ca54 100644 --- a/test/multicluster_test.go +++ b/test/cluster/multicluster_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -6,6 +6,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" + + "github.com/aerospike/aerospike-kubernetes-operator/test" ) var _ = Describe( @@ -17,13 +19,13 @@ var _ = Describe( // 1st cluster clusterName1 := "multicluster" clusterNamespacedName1 := getNamespacedName( - clusterName1, multiClusterNs1, + clusterName1, test.MultiClusterNs1, ) // 2nd cluster clusterName2 := "multicluster" clusterNamespacedName2 := getNamespacedName( - clusterName2, multiClusterNs2, + clusterName2, test.MultiClusterNs2, ) Context( @@ -48,13 +50,13 @@ var _ = Describe( // 1st cluster clusterName1 := "multicluster1" clusterNamespacedName1 := getNamespacedName( - clusterName1, multiClusterNs1, + clusterName1, test.MultiClusterNs1, ) // 2nd cluster clusterName2 := "multicluster2" clusterNamespacedName2 := getNamespacedName( - clusterName2, multiClusterNs1, + clusterName2, test.MultiClusterNs1, ) Context( diff --git a/test/network_policy_test.go b/test/cluster/network_policy_test.go similarity index 99% rename from test/network_policy_test.go rename to test/cluster/network_policy_test.go index 3a1ba41bc..fd72ba5b7 100644 --- a/test/network_policy_test.go +++ b/test/cluster/network_policy_test.go @@ -2,7 +2,7 @@ // Tests Aerospike network policy settings. -package test +package cluster import ( goctx "context" @@ -23,6 +23,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" aerospikecluster "github.com/aerospike/aerospike-kubernetes-operator/controllers" + "github.com/aerospike/aerospike-kubernetes-operator/test" "github.com/aerospike/aerospike-management-lib/deployment" ) @@ -274,7 +275,7 @@ func negativeDeployNetworkPolicyTest(ctx goctx.Context, multiPodPerHost, enableT Context( "Negative cases for configuredIP", func() { - clusterNamespacedName := getNamespacedName("np-configured-ip", multiClusterNs1) + clusterNamespacedName := getNamespacedName("np-configured-ip", test.MultiClusterNs1) BeforeEach( func() { @@ -649,7 +650,7 @@ func doTestNetworkPolicy( It( "DefaultNetworkPolicy", func() { clusterNamespacedName := getNamespacedName( - "np-default", multiClusterNs1, + "np-default", test.MultiClusterNs1, ) // Ensures that default network policy is applied. @@ -670,7 +671,7 @@ func doTestNetworkPolicy( It( "PodAndExternal", func() { clusterNamespacedName := getNamespacedName( - "np-pod-external", multiClusterNs1, + "np-pod-external", test.MultiClusterNs1, ) // Ensures that default network policy is applied. @@ -697,7 +698,7 @@ func doTestNetworkPolicy( if multiPodPerHost { It("OnlyPodNetwork: should create cluster without nodePort service", func() { clusterNamespacedName := getNamespacedName( - "pod-network-cluster", multiClusterNs1) + "pod-network-cluster", test.MultiClusterNs1) networkPolicy := asdbv1.AerospikeNetworkPolicy{ AccessType: asdbv1.AerospikeNetworkTypePod, @@ -767,7 +768,7 @@ func doTestNetworkPolicy( Context( "When using configuredIP", func() { - clusterNamespacedName := getNamespacedName("np-configured-ip", multiClusterNs1) + clusterNamespacedName := getNamespacedName("np-configured-ip", test.MultiClusterNs1) BeforeEach( func() { err := deleteNodeLabels(ctx, []string{labelAccessAddress, labelAlternateAccessAddress}) @@ -858,7 +859,7 @@ func doTestNetworkPolicy( // Test cases with NetworkAttachmentDefinition of different namespaces can't be tested with current mocking. Context("customInterface", func() { clusterNamespacedName := getNamespacedName( - "np-custom-interface", multiClusterNs1, + "np-custom-interface", test.MultiClusterNs1, ) // Skip this test when multiPodPerHost is true and enabledTLS true because Network Policy contains all @@ -1338,7 +1339,7 @@ func getAerospikeClusterSpecWithNetworkPolicy( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/on_demand_operations_test.go b/test/cluster/on_demand_operations_test.go similarity index 99% rename from test/on_demand_operations_test.go rename to test/cluster/on_demand_operations_test.go index ed32c6bf2..3b7ee1260 100644 --- a/test/on_demand_operations_test.go +++ b/test/cluster/on_demand_operations_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/poddisruptionbudget_test.go b/test/cluster/poddisruptionbudget_test.go similarity index 99% rename from test/poddisruptionbudget_test.go rename to test/cluster/poddisruptionbudget_test.go index c6c16e009..788c9acba 100644 --- a/test/poddisruptionbudget_test.go +++ b/test/cluster/poddisruptionbudget_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( "context" diff --git a/test/podspec_test.go b/test/cluster/podspec_test.go similarity index 99% rename from test/podspec_test.go rename to test/cluster/podspec_test.go index 90a019e28..10a687d15 100644 --- a/test/podspec_test.go +++ b/test/cluster/podspec_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/rack_enabled_cluster_test.go b/test/cluster/rack_enabled_cluster_test.go similarity index 99% rename from test/rack_enabled_cluster_test.go rename to test/cluster/rack_enabled_cluster_test.go index bf3cb656d..13bdd4bdd 100644 --- a/test/rack_enabled_cluster_test.go +++ b/test/cluster/rack_enabled_cluster_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/rack_management_test.go b/test/cluster/rack_management_test.go similarity index 99% rename from test/rack_management_test.go rename to test/cluster/rack_management_test.go index 5d36c0dfc..add28302e 100644 --- a/test/rack_management_test.go +++ b/test/cluster/rack_management_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/rack_utils.go b/test/cluster/rack_utils.go similarity index 98% rename from test/rack_utils.go rename to test/cluster/rack_utils.go index 098dde03f..21f3bce9c 100644 --- a/test/rack_utils.go +++ b/test/cluster/rack_utils.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -428,6 +428,15 @@ func getConfiguredRackStateList(aeroCluster *asdbv1.AerospikeCluster) []RackStat return rackStateList } +func getRackID(pod *corev1.Pod) (int, error) { + rack, ok := pod.ObjectMeta.Labels["aerospike.com/rack-id"] + if !ok { + return 0, nil + } + + return strconv.Atoi(rack) +} + // TODO: Update this func splitRacks(nodeCount, rackCount int) []int { nodesPerRack, extraNodes := nodeCount/rackCount, nodeCount%rackCount @@ -446,13 +455,6 @@ func splitRacks(nodeCount, rackCount int) []int { return topology } -func getNamespacedName(name, namespace string) types.NamespacedName { - return types.NamespacedName{ - Name: name, - Namespace: namespace, - } -} - func getRackPodList( k8sClient client.Client, ctx goctx.Context, found *appsv1.StatefulSet, ) (*corev1.PodList, error) { diff --git a/test/sample_files_test.go b/test/cluster/sample_files_test.go similarity index 98% rename from test/sample_files_test.go rename to test/cluster/sample_files_test.go index d7d7827d9..157b416f1 100644 --- a/test/sample_files_test.go +++ b/test/cluster/sample_files_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( "context" @@ -117,7 +117,7 @@ func getSamplesFiles() ([]string, error) { err error ) - // getGitRepoRootPath is called here explicitly to get projectRoot at this point + // getGitRepoRootPath is called here explicitly to get ProjectRoot at this point // This may be empty if getSamplesFiles is called during var initialization phase if projectRoot == "" { projectRoot, err = getGitRepoRootPath() diff --git a/test/security_context_test.go b/test/cluster/security_context_test.go similarity index 99% rename from test/security_context_test.go rename to test/cluster/security_context_test.go index 9944e6de9..66471bb75 100644 --- a/test/security_context_test.go +++ b/test/cluster/security_context_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/services_test.go b/test/cluster/services_test.go similarity index 99% rename from test/services_test.go rename to test/cluster/services_test.go index 8c80d1329..01f0faecb 100644 --- a/test/services_test.go +++ b/test/cluster/services_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/statefulset_storage_test.go b/test/cluster/statefulset_storage_test.go similarity index 99% rename from test/statefulset_storage_test.go rename to test/cluster/statefulset_storage_test.go index d5f8bcf7f..ff39890dc 100644 --- a/test/statefulset_storage_test.go +++ b/test/cluster/statefulset_storage_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -395,7 +395,7 @@ func getSTSFromRackID(aeroCluster *asdbv1.AerospikeCluster, rackID int) ( found := &appsv1.StatefulSet{} err := k8sClient.Get( goctx.TODO(), - getNamespacedNameForSTS(aeroCluster, rackID), + GetNamespacedNameForSTS(aeroCluster, rackID), found, ) @@ -435,7 +435,7 @@ func validateExternalVolumeInContainer(sts *appsv1.StatefulSet, index int, isIni return false, nil } -func getNamespacedNameForSTS( +func GetNamespacedNameForSTS( aeroCluster *asdbv1.AerospikeCluster, rackID int, ) types.NamespacedName { return types.NamespacedName{ diff --git a/test/storage_init_test.go b/test/cluster/storage_init_test.go similarity index 99% rename from test/storage_init_test.go rename to test/cluster/storage_init_test.go index fbff805ac..77a391c8f 100644 --- a/test/storage_init_test.go +++ b/test/cluster/storage_init_test.go @@ -1,4 +1,4 @@ -package test +package cluster // Tests storage initialization works as expected. // If specified devices should be initialized only on first use. @@ -21,6 +21,7 @@ import ( asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" "github.com/aerospike/aerospike-kubernetes-operator/pkg/jsonpatch" "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + "github.com/aerospike/aerospike-kubernetes-operator/test" lib "github.com/aerospike/aerospike-management-lib" ) @@ -709,7 +710,7 @@ func writeDataToVolumeBlock( magicBytes, path, ), } - _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) if err != nil { return fmt.Errorf("error creating file %v", err) @@ -726,7 +727,7 @@ func writeDataToVolumeFileSystem( cmd := []string{ "bash", "-c", fmt.Sprintf("echo %s > %s/magic.txt", magicBytes, path), } - _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + _, _, err := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) if err != nil { return fmt.Errorf("error creating file %v", err) @@ -741,7 +742,7 @@ func hasDataBlock(pod *corev1.Pod, volume *asdbv1.VolumeSpec) bool { cmd := []string{ "bash", "-c", fmt.Sprintf("dd if=%s count=1 status=none", path), } - stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) return strings.HasPrefix(stdout, magicBytes) } @@ -750,7 +751,7 @@ func hasDataFilesystem(pod *corev1.Pod, volume *asdbv1.VolumeSpec) bool { cName, path := getContainerNameAndPath(volume) cmd := []string{"bash", "-c", fmt.Sprintf("cat %s/magic.txt", path)} - stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientset, cfg) + stdout, _, _ := utils.Exec(utils.GetNamespacedName(pod), cName, cmd, k8sClientSet, cfg) return strings.HasPrefix(stdout, magicBytes) } @@ -780,7 +781,7 @@ func getStorageInitAerospikeCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/storage_test.go b/test/cluster/storage_test.go similarity index 99% rename from test/storage_test.go rename to test/cluster/storage_test.go index 26baae72e..e268d8d74 100644 --- a/test/storage_test.go +++ b/test/cluster/storage_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/types" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) // * Test @@ -560,7 +561,7 @@ var _ = Describe( volumes := aeroCluster.Spec.Storage.Volumes aeroCluster.Spec.Storage.Volumes[len(volumes)-1].Source = asdbv1.VolumeSource{ Secret: &v1.SecretVolumeSource{ - SecretName: authSecretName, + SecretName: test.AuthSecretName, }, } diff --git a/test/storage_wipe_test.go b/test/cluster/storage_wipe_test.go similarity index 99% rename from test/storage_wipe_test.go rename to test/cluster/storage_wipe_test.go index 9067a904d..7aae50c43 100644 --- a/test/storage_wipe_test.go +++ b/test/cluster/storage_wipe_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" @@ -15,6 +15,7 @@ import ( as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) const ( @@ -580,7 +581,7 @@ func getStorageWipeAerospikeCluster( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretName, + SecretName: test.AuthSecretName, Roles: []string{ "sys-admin", "user-admin", diff --git a/test/strong_consistency_test.go b/test/cluster/strong_consistency_test.go similarity index 99% rename from test/strong_consistency_test.go rename to test/cluster/strong_consistency_test.go index f88373360..020431fa4 100644 --- a/test/strong_consistency_test.go +++ b/test/cluster/strong_consistency_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goctx "context" diff --git a/test/cluster/suite_test.go b/test/cluster/suite_test.go new file mode 100644 index 000000000..8a6b4b9c9 --- /dev/null +++ b/test/cluster/suite_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + goctx "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + k8Runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + // +kubebuilder:scaffold:imports + + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var testEnv *envtest.Environment + +var k8sClient client.Client + +var cfg *rest.Config + +var k8sClientSet *kubernetes.Clientset + +var projectRoot string + +var scheme = k8Runtime.NewScheme() + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeEach(func() { + By("Cleaning up all Aerospike clusters.") + + for idx := range test.Namespaces { + deleteAllClusters(test.Namespaces[idx]) + Expect(cleanupPVC(k8sClient, test.Namespaces[idx])).NotTo(HaveOccurred()) + } +}) + +func deleteAllClusters(namespace string) { + ctx := goctx.TODO() + list := &asdbv1.AerospikeClusterList{} + listOps := &client.ListOptions{Namespace: namespace} + + err := k8sClient.List(ctx, list, listOps) + Expect(err).NotTo(HaveOccurred()) + + for clusterIndex := range list.Items { + By(fmt.Sprintf("Deleting cluster \"%s/%s\".", list.Items[clusterIndex].Namespace, list.Items[clusterIndex].Name)) + err := deleteCluster(k8sClient, ctx, &list.Items[clusterIndex]) + Expect(err).NotTo(HaveOccurred()) + } +} + +// This is used when running tests on existing cluster +// user has to install its own operator then run cleanup and then start this + +var _ = BeforeSuite( + func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("Bootstrapping test environment") + pkgLog.Info(fmt.Sprintf("Client will connect through '%s' network to Aerospike Clusters.", + *defaultNetworkType)) + + var err error + testEnv, cfg, k8sClient, k8sClientSet, err = test.BootStrapTestEnv(scheme) + Expect(err).NotTo(HaveOccurred()) + + projectRoot, err = getGitRepoRootPath() + Expect(err).NotTo(HaveOccurred()) + + cloudProvider, err = getCloudProvider(goctx.TODO(), k8sClient) + Expect(err).ToNot(HaveOccurred()) + }) + +var _ = AfterSuite( + func() { + By("Cleaning up all pvcs") + + for idx := range test.Namespaces { + _ = cleanupPVC(k8sClient, test.Namespaces[idx]) + } + + By("tearing down the test environment") + gexec.KillAndWait(5 * time.Second) + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + }, +) diff --git a/test/test_client.go b/test/cluster/test_client.go similarity index 99% rename from test/test_client.go rename to test/cluster/test_client.go index 295b07159..b02adf90c 100644 --- a/test/test_client.go +++ b/test/cluster/test_client.go @@ -1,4 +1,4 @@ -package test +package cluster // Aerospike client and info testing utilities. // diff --git a/test/tls_authenticate_client_test.go b/test/cluster/tls_authenticate_client_test.go similarity index 98% rename from test/tls_authenticate_client_test.go rename to test/cluster/tls_authenticate_client_test.go index ed4b0ba38..d412d966c 100644 --- a/test/tls_authenticate_client_test.go +++ b/test/cluster/tls_authenticate_client_test.go @@ -2,7 +2,7 @@ // Tests Aerospike TLS authenticate client settings. -package test +package cluster import ( goctx "context" @@ -17,6 +17,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + "github.com/aerospike/aerospike-kubernetes-operator/test" ) const tlsClusterName = "tls-auth-client" @@ -177,7 +178,7 @@ func getAerospikeConfig( Users: []asdbv1.AerospikeUserSpec{ { Name: "admin", - SecretName: authSecretNameForUpdate, + SecretName: test.AuthSecretNameForUpdate, Roles: []string{ "sys-admin", "user-admin", @@ -292,7 +293,7 @@ func doTestTLSAuthenticateClientAnyWithCapath(ctx goctx.Context) { operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.ClientCertFilename = "server-cert.pem" operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.ClientKeyFilename = "server_key.pem" cacertPath := &asdbv1.CaCertsSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, } operatorClientCertSpec.AerospikeOperatorCertSource.SecretCertSource.CaCertsSource = cacertPath @@ -300,10 +301,10 @@ func doTestTLSAuthenticateClientAnyWithCapath(ctx goctx.Context) { networkConf, operatorClientCertSpec, ) secretVolume := asdbv1.VolumeSpec{ - Name: tlsCacertSecretName, + Name: test.TLSCacertSecretName, Source: asdbv1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: tlsCacertSecretName, + SecretName: test.TLSCacertSecretName, }, }, Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ diff --git a/test/cluster/utils.go b/test/cluster/utils.go new file mode 100644 index 000000000..0acc6a84a --- /dev/null +++ b/test/cluster/utils.go @@ -0,0 +1,873 @@ +package cluster + +import ( + "bytes" + goctx "context" + "encoding/json" + "flag" + "fmt" + "io" + "os/exec" + "reflect" + "strings" + "time" + + set "github.com/deckarep/golang-set/v2" + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + + as "github.com/aerospike/aerospike-client-go/v7" + asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" + operatorUtils "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" + lib "github.com/aerospike/aerospike-management-lib" + "github.com/aerospike/aerospike-management-lib/info" +) + +var defaultNetworkType = flag.String("connect-through-network-type", "hostExternal", + "Network type is used to determine an appropriate access type. Can be 'pod',"+ + " 'hostInternal' or 'hostExternal'. AS client in the test will choose access type"+ + " which matches expected network type. See details in"+ + " https://docs.aerospike.com/docs/cloud/kubernetes/operator/Cluster-configuration-settings.html#network-policy") + +type CloudProvider int + +const ( + CloudProviderUnknown CloudProvider = iota + CloudProviderAWS + CloudProviderGCP +) + +const zoneKey = "topology.kubernetes.io/zone" +const regionKey = "topology.kubernetes.io/region" + +var cloudProvider CloudProvider + +func waitForAerospikeCluster( + k8sClient client.Client, ctx goctx.Context, + aeroCluster *asdbv1.AerospikeCluster, replicas int, + retryInterval, timeout time.Duration, expectedPhases []asdbv1.AerospikeClusterPhase, +) error { + var isValid bool + + err := wait.PollUntilContextTimeout(ctx, + retryInterval, timeout, true, func(ctx goctx.Context) (done bool, err error) { + // Fetch the AerospikeCluster instance + newCluster := &asdbv1.AerospikeCluster{} + err = k8sClient.Get( + ctx, types.NamespacedName{ + Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, + }, newCluster, + ) + if err != nil { + if errors.IsNotFound(err) { + pkgLog.Info( + "Waiting for availability of %s AerospikeCluster\n", + "name", aeroCluster.Name, + ) + return false, nil + } + return false, err + } + + isValid = isClusterStateValid(aeroCluster, newCluster, replicas, expectedPhases) + return isValid, nil + }, + ) + + if err != nil { + return err + } + + pkgLog.Info("AerospikeCluster available\n") + + // make info call + return nil +} + +func isClusterStateValid( + aeroCluster *asdbv1.AerospikeCluster, + newCluster *asdbv1.AerospikeCluster, replicas int, expectedPhases []asdbv1.AerospikeClusterPhase, +) bool { + if int(newCluster.Status.Size) != replicas { + pkgLog.Info("Cluster size is not correct") + return false + } + + // Validate status + statusToSpec, err := asdbv1.CopyStatusToSpec(&newCluster.Status.AerospikeClusterStatusSpec) + if err != nil { + pkgLog.Error(err, "Failed to copy spec in status", "err", err) + return false + } + + if !reflect.DeepEqual(statusToSpec, &newCluster.Spec) { + pkgLog.Info("Cluster status is not matching the spec") + return false + } + + // TODO: This is not valid for tests where maxUnavailablePods flag is used. + // We can take the param in func to skip this check + // // Validate pods + // if len(newCluster.Status.Pods) != replicas { + // pkgLog.Info("Cluster status doesn't have pod status for all nodes. Cluster status may not have fully updated") + // return false + // } + + for podName := range newCluster.Status.Pods { + if newCluster.Status.Pods[podName].Aerospike.NodeID == "" { + pkgLog.Info("Cluster pod's nodeID is empty") + return false + } + + if operatorUtils.IsImageEqual(newCluster.Status.Pods[podName].Image, aeroCluster.Spec.Image) { + break + } + + pkgLog.Info( + fmt.Sprintf("Cluster pod's image %s not same as spec %s", newCluster.Status.Pods[podName].Image, + aeroCluster.Spec.Image, + ), + ) + } + + if newCluster.Labels[asdbv1.AerospikeAPIVersionLabel] != asdbv1.AerospikeAPIVersion { + pkgLog.Info("Cluster API version label is not correct") + return false + } + + // Validate phase + phaseSet := set.NewSet(expectedPhases...) + if !phaseSet.Contains(newCluster.Status.Phase) { + pkgLog.Info("Cluster phase is not correct") + return false + } + + pkgLog.Info("Cluster state is validated successfully") + + return true +} + +func getTimeout(nodes int32) time.Duration { + return 3 * time.Minute * time.Duration(nodes) +} + +func getPodLogs( + k8sClientset *kubernetes.Clientset, ctx goctx.Context, pod *corev1.Pod, +) string { + podLogOpts := corev1.PodLogOptions{} + req := k8sClientset.CoreV1().Pods(pod.Namespace).GetLogs( + pod.Name, &podLogOpts, + ) + + podLogs, err := req.Stream(ctx) + if err != nil { + return "error in opening stream" + } + + defer func(podLogs io.ReadCloser) { + _ = podLogs.Close() + }(podLogs) + + buf := new(bytes.Buffer) + + _, err = io.Copy(buf, podLogs) + if err != nil { + return "error in copy information from podLogs to buf" + } + + str := buf.String() + + return str +} + +// Copy makes a deep copy from src into dst. +func Copy(dst, src interface{}) error { + if dst == nil { + return fmt.Errorf("dst cannot be nil") + } + + if src == nil { + return fmt.Errorf("src cannot be nil") + } + + jsonBytes, err := json.Marshal(src) + if err != nil { + return fmt.Errorf("unable to marshal src: %s", err) + } + + err = json.Unmarshal(jsonBytes, dst) + if err != nil { + return fmt.Errorf("unable to unmarshal into dst: %s", err) + } + + return nil +} + +type AerospikeConfSpec struct { + version string + network map[string]interface{} + service map[string]interface{} + security map[string]interface{} + namespaces []interface{} +} + +func (acs *AerospikeConfSpec) getVersion() string { + return acs.version +} + +func (acs *AerospikeConfSpec) setEnableSecurity(enableSecurity bool) error { + cmpVal, err := lib.CompareVersions(acs.version, "5.7.0") + if err != nil { + return err + } + + if cmpVal >= 0 { + if enableSecurity { + security := map[string]interface{}{} + acs.security = security + } + + return nil + } + + acs.security = map[string]interface{}{} + acs.security["enable-security"] = enableSecurity + + return nil +} + +func (acs *AerospikeConfSpec) setEnableQuotas(enableQuotas bool) error { + cmpVal, err := lib.CompareVersions(acs.version, "5.6.0") + if err != nil { + return err + } + + if cmpVal >= 0 { + if acs.security == nil { + acs.security = map[string]interface{}{} + } + + acs.security["enable-quotas"] = enableQuotas + } + + return nil +} + +func (acs *AerospikeConfSpec) getSpec() map[string]interface{} { + spec := map[string]interface{}{ + "service": acs.service, + "network": acs.network, + "namespaces": acs.namespaces, + } + if acs.security != nil { + spec["security"] = acs.security + } + + return spec +} + +func getOperatorCert() *asdbv1.AerospikeOperatorClientCertSpec { + return &asdbv1.AerospikeOperatorClientCertSpec{ + TLSClientName: "aerospike-a-0.test-runner", + AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ + SecretCertSource: &asdbv1.AerospikeSecretCertSource{ + SecretName: "aerospike-secret", + CaCertsFilename: "cacert.pem", + ClientCertFilename: "svc_cluster_chain.pem", + ClientKeyFilename: "svc_key.pem", + }, + }, + } +} + +func getNetworkTLSConfig() map[string]interface{} { + return map[string]interface{}{ + "service": map[string]interface{}{ + "tls-name": "aerospike-a-0.test-runner", + "tls-port": serviceTLSPort, + "port": serviceNonTLSPort, + }, + "fabric": map[string]interface{}{ + "tls-name": "aerospike-a-0.test-runner", + "tls-port": 3011, + "port": 3001, + }, + "heartbeat": map[string]interface{}{ + "tls-name": "aerospike-a-0.test-runner", + "tls-port": 3012, + "port": 3002, + }, + + "tls": []interface{}{ + map[string]interface{}{ + "name": "aerospike-a-0.test-runner", + "cert-file": "/etc/aerospike/secret/svc_cluster_chain.pem", + "key-file": "/etc/aerospike/secret/svc_key.pem", + "ca-file": "/etc/aerospike/secret/cacert.pem", + }, + }, + } +} + +func getNetworkConfig() map[string]interface{} { + return map[string]interface{}{ + "service": map[string]interface{}{ + "port": serviceNonTLSPort, + }, + "fabric": map[string]interface{}{ + "port": 3001, + }, + "heartbeat": map[string]interface{}{ + "port": 3002, + }, + } +} + +func NewAerospikeConfSpec(image string) (*AerospikeConfSpec, error) { + ver, err := asdbv1.GetImageVersion(image) + if err != nil { + return nil, err + } + + service := map[string]interface{}{ + "feature-key-file": "/etc/aerospike/secret/features.conf", + } + network := getNetworkConfig() + namespaces := []interface{}{ + map[string]interface{}{ + "name": "test", + "replication-factor": 1, + "storage-engine": map[string]interface{}{ + "type": "memory", + "data-size": 1073741824, + }, + }, + } + + return &AerospikeConfSpec{ + version: ver, + service: service, + network: network, + namespaces: namespaces, + security: nil, + }, nil +} + +func ValidateAttributes( + actual []map[string]string, expected map[string]string, +) bool { + for key, val := range expected { + for i := 0; i < len(actual); i++ { + m := actual[i] + + v, ok := m[key] + if ok && v == val { + return true + } + } + } + + return false +} + +func getAeroClusterConfig( + namespace types.NamespacedName, image string, +) (*asdbv1.AerospikeCluster, error) { + version, err := asdbv1.GetImageVersion(image) + if err != nil { + return nil, err + } + + cmpVal1, err := lib.CompareVersions(version, "5.7.0") + if err != nil { + return nil, err + } + + cmpVal2, err := lib.CompareVersions(version, "7.0.0") + if err != nil { + return nil, err + } + + switch { + case cmpVal2 >= 0: + return createAerospikeClusterPost640( + namespace, 2, image, + ), nil + + case cmpVal1 >= 0: + return createAerospikeClusterPost560( + namespace, 2, image, + ), nil + + case cmpVal1 < 0: + return createAerospikeClusterPost460( + namespace, 2, image, + ), nil + + default: + return nil, fmt.Errorf("invalid image version %s", version) + } +} + +func getAerospikeStorageConfig( + containerName string, inputCascadeDelete bool, + storageSize string, + cloudProvider CloudProvider, +) *asdbv1.AerospikeStorageSpec { + // Create pods and storage devices write data to the devices. + // - deletes cluster without cascade delete of volumes. + // - recreate and check if volumes are reinitialized correctly. + fileDeleteInitMethod := asdbv1.AerospikeVolumeMethodDeleteFiles + ddInitMethod := asdbv1.AerospikeVolumeMethodDD + blkDiscardInitMethod := asdbv1.AerospikeVolumeMethodBlkdiscard + blkDiscardWipeMethod := asdbv1.AerospikeVolumeMethodBlkdiscard + + if cloudProvider == CloudProviderAWS { + // Blkdiscard method is not supported in AWS, so it is initialized as DD Method + blkDiscardInitMethod = asdbv1.AerospikeVolumeMethodDD + blkDiscardWipeMethod = asdbv1.AerospikeVolumeMethodDD + } + + return &asdbv1.AerospikeStorageSpec{ + BlockVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ + InputCascadeDelete: &inputCascadeDelete, + }, + FileSystemVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ + InputCascadeDelete: &inputCascadeDelete, + }, + Volumes: []asdbv1.VolumeSpec{ + { + Name: "file-noinit", + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeFilesystem, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/filesystem-noinit", + }, + }, + { + Name: "file-init", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &fileDeleteInitMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeFilesystem, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/filesystem-init", + }, + }, + { + Name: "device-noinit", + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/blockdevice-noinit", + }, + }, + { + Name: "device-dd", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &ddInitMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/blockdevice-init-dd", + }, + }, + { + Name: "device-blkdiscard", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &blkDiscardInitMethod, + InputWipeMethod: &blkDiscardWipeMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ + Path: "/opt/aerospike/blockdevice-init-blkdiscard", + }, + }, + { + Name: "file-noinit-1", + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeFilesystem, + }, + }, + Sidecars: []asdbv1.VolumeAttachment{ + { + ContainerName: containerName, + Path: "/opt/aerospike/filesystem-noinit", + }, + }, + }, + { + Name: "device-dd-1", + AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ + InputInitMethod: &ddInitMethod, + }, + Source: asdbv1.VolumeSource{ + PersistentVolume: &asdbv1.PersistentVolumeSpec{ + Size: resource.MustParse(storageSize), + StorageClass: storageClass, + VolumeMode: corev1.PersistentVolumeBlock, + }, + }, + Sidecars: []asdbv1.VolumeAttachment{ + { + ContainerName: containerName, + Path: "/opt/aerospike/blockdevice-init-dd", + }, + }, + }, + getStorageVolumeForSecret(), + }, + } +} + +//nolint:unparam // generic function +func contains(elems []string, v string) bool { + for _, s := range elems { + if v == s { + return true + } + } + + return false +} + +func getAerospikeConfigFromNode(log logr.Logger, k8sClient client.Client, ctx goctx.Context, + clusterNamespacedName types.NamespacedName, configContext string, pod *asdbv1.AerospikePodStatus) (lib.Stats, error) { + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) + if err != nil { + return nil, err + } + + host, err := createHost(pod) + if err != nil { + return nil, err + } + + asinfo := info.NewAsInfo( + log, host, getClientPolicy(aeroCluster, k8sClient), + ) + + confs, err := getAsConfig(asinfo, configContext) + if err != nil { + return nil, err + } + + return confs[configContext].(lib.Stats), nil +} + +func getPasswordFromSecret(k8sClient client.Client, + secretNamespcedName types.NamespacedName, passFileName string, +) (string, error) { + secret := &corev1.Secret{} + + err := k8sClient.Get(goctx.TODO(), secretNamespcedName, secret) + if err != nil { + return "", fmt.Errorf("failed to get secret %s: %v", secretNamespcedName, err) + } + + passBytes, ok := secret.Data[passFileName] + if !ok { + return "", fmt.Errorf( + "failed to get password file in secret %s, fileName %s", + secretNamespcedName, passFileName, + ) + } + + return string(passBytes), nil +} + +func getAerospikeClient(aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client) (*as.Client, error) { + policy := getClientPolicy(aeroCluster, k8sClient) + policy.FailIfNotConnected = false + policy.Timeout = time.Minute * 2 + policy.UseServicesAlternate = true + policy.ConnectionQueueSize = 100 + policy.LimitConnectionsToQueueSize = true + + hostList := make([]*as.Host, 0, len(aeroCluster.Status.Pods)) + + for podName := range aeroCluster.Status.Pods { + pod := aeroCluster.Status.Pods[podName] + + host, err := createHost(&pod) + if err != nil { + return nil, err + } + + hostList = append(hostList, host) + } + + asClient, err := as.NewClientWithPolicyAndHost(policy, hostList...) + if asClient == nil { + return nil, fmt.Errorf( + "failed to create aerospike cluster asClient: %v", err, + ) + } + + _, _ = asClient.WarmUp(-1) + + // Wait for 5 minutes for cluster to connect + for j := 0; j < 150; j++ { + if isConnected := asClient.IsConnected(); isConnected { + break + } + + time.Sleep(time.Second * 2) + } + + return asClient, nil +} + +func getPodList( + aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, +) (*corev1.PodList, error) { + podList := &corev1.PodList{} + labelSelector := labels.SelectorFromSet(operatorUtils.LabelsForAerospikeCluster(aeroCluster.Name)) + listOps := &client.ListOptions{ + Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, + } + + if err := k8sClient.List(goctx.TODO(), podList, listOps); err != nil { + return nil, err + } + + return podList, nil +} + +func deletePVC(k8sClient client.Client, pvcNamespacedName types.NamespacedName) error { + pvc := &corev1.PersistentVolumeClaim{} + if err := k8sClient.Get(goctx.TODO(), pvcNamespacedName, pvc); err != nil { + if errors.IsNotFound(err) { + return nil + } + + return err + } + + if operatorUtils.IsPVCTerminating(pvc) { + return nil + } + + if err := k8sClient.Delete(goctx.TODO(), pvc); err != nil { + return fmt.Errorf("could not delete pvc %s: %w", pvc.Name, err) + } + + return nil +} + +func cleanupPVC(k8sClient client.Client, ns string) error { + // List the pvc for this aeroCluster's statefulset + pvcList := &corev1.PersistentVolumeClaimList{} + clLabels := map[string]string{"app": "aerospike-cluster"} + labelSelector := labels.SelectorFromSet(clLabels) + listOps := &client.ListOptions{Namespace: ns, LabelSelector: labelSelector} + + if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { + return err + } + + for pvcIndex := range pvcList.Items { + pkgLog.Info("Found pvc, deleting it", "pvcName", + pvcList.Items[pvcIndex].Name, "namespace", pvcList.Items[pvcIndex].Namespace) + + if operatorUtils.IsPVCTerminating(&pvcList.Items[pvcIndex]) { + continue + } + // if utils.ContainsString(pvc.Finalizers, "kubernetes.io/pvc-protection") { + // pvc.Finalizers = utils.RemoveString(pvc.Finalizers, "kubernetes.io/pvc-protection") + // if err := k8sClient.Patch(goctx.TODO(), &pvc, client.Merge); err != nil { + // return fmt.Errorf("could not patch %s finalizer from following pvc: %s: %w", + // "kubernetes.io/pvc-protection", pvc.Name, err) + // } + //} + if err := k8sClient.Delete(goctx.TODO(), &pvcList.Items[pvcIndex]); err != nil { + return fmt.Errorf("could not delete pvc %s: %w", pvcList.Items[pvcIndex].Name, err) + } + } + + return nil +} + +func getSTSList( + aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client, +) (*appsv1.StatefulSetList, error) { + stsList := &appsv1.StatefulSetList{} + labelSelector := labels.SelectorFromSet(operatorUtils.LabelsForAerospikeCluster(aeroCluster.Name)) + listOps := &client.ListOptions{ + Namespace: aeroCluster.Namespace, LabelSelector: labelSelector, + } + + if err := k8sClient.List(goctx.TODO(), stsList, listOps); err != nil { + return nil, err + } + + return stsList, nil +} + +func getServiceForPod( + pod *corev1.Pod, k8sClient client.Client, +) (*corev1.Service, error) { + service := &corev1.Service{} + err := k8sClient.Get( + goctx.TODO(), + types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, service, + ) + + if err != nil { + return nil, fmt.Errorf( + "failed to get service for pod %s: %v", pod.Name, err, + ) + } + + return service, nil +} + +func getCloudProvider( + ctx goctx.Context, k8sClient client.Client, +) (CloudProvider, error) { + labelKeys := map[string]struct{}{} + + nodes, err := getNodeList(ctx, k8sClient) + if err != nil { + return CloudProviderUnknown, err + } + + for idx := range nodes.Items { + for labelKey := range nodes.Items[idx].Labels { + if strings.Contains(labelKey, "cloud.google.com") { + return CloudProviderGCP, nil + } + + if strings.Contains(labelKey, "eks.amazonaws.com") { + return CloudProviderAWS, nil + } + + labelKeys[labelKey] = struct{}{} + } + + provider := determineByProviderID(&nodes.Items[idx]) + if provider != CloudProviderUnknown { + return provider, nil + } + } + + labelKeysSlice := make([]string, 0, len(labelKeys)) + + for labelKey := range labelKeys { + labelKeysSlice = append(labelKeysSlice, labelKey) + } + + return CloudProviderUnknown, fmt.Errorf( + "can't determin cloud platform by node's labels: %v", labelKeysSlice, + ) +} + +func determineByProviderID(node *corev1.Node) CloudProvider { + if strings.Contains(node.Spec.ProviderID, "gce") { + return CloudProviderGCP + } else if strings.Contains(node.Spec.ProviderID, "aws") { + return CloudProviderAWS + } + // TODO add cloud provider detection for Azure + return CloudProviderUnknown +} + +func getZones(ctx goctx.Context, k8sClient client.Client) ([]string, error) { + unqZones := map[string]int{} + + nodes, err := getNodeList(ctx, k8sClient) + if err != nil { + return nil, err + } + + for idx := range nodes.Items { + unqZones[nodes.Items[idx].Labels[zoneKey]] = 1 + } + + zones := make([]string, 0, len(unqZones)) + + for zone := range unqZones { + zones = append(zones, zone) + } + + return zones, nil +} + +func getNodeList(ctx goctx.Context, k8sClient client.Client) ( + *corev1.NodeList, error, +) { + nodeList := &corev1.NodeList{} + if err := k8sClient.List(ctx, nodeList); err != nil { + return nil, err + } + + return nodeList, nil +} + +func getRegion(ctx goctx.Context, k8sClient client.Client) (string, error) { + nodes, err := getNodeList(ctx, k8sClient) + if err != nil { + return "", err + } + + if len(nodes.Items) == 0 { + return "", fmt.Errorf("node list empty: %v", nodes.Items) + } + + return nodes.Items[0].Labels[regionKey], nil +} + +func getGitRepoRootPath() (string, error) { + path, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + if err != nil { + return "", err + } + + return strings.TrimSpace(string(path)), nil +} + +func getNamespacedName(name, namespace string) types.NamespacedName { + return types.NamespacedName{ + Name: name, + Namespace: namespace, + } +} diff --git a/test/warm_restart_test.go b/test/cluster/warm_restart_test.go similarity index 98% rename from test/warm_restart_test.go rename to test/cluster/warm_restart_test.go index 99ed226fc..df24b8273 100644 --- a/test/warm_restart_test.go +++ b/test/cluster/warm_restart_test.go @@ -1,4 +1,4 @@ -package test +package cluster import ( goCtx "context" @@ -103,7 +103,7 @@ func createMarkerFile( } _, _, err := utils.Exec( - utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientset, + utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientSet, cfg, ) @@ -137,7 +137,7 @@ func isMarkerPresent( } _, _, err := utils.Exec( - utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientset, + utils.GetNamespacedName(pod), asdbv1.AerospikeServerContainerName, cmd, k8sClientSet, cfg, ) diff --git a/test/cluster_prereq.go b/test/cluster_prereq.go index 4f3b6ad22..0284dceb0 100644 --- a/test/cluster_prereq.go +++ b/test/cluster_prereq.go @@ -2,6 +2,9 @@ package test import ( goctx "context" + "fmt" + "os" + "path/filepath" corev1 "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" @@ -18,13 +21,36 @@ const ( aeroClusterCR string = "aerospike-cluster" ) +var secrets map[string][]byte +var cacertSecrets map[string][]byte + +const secretDir = "../config/samples/secrets" //nolint:gosec // for testing +const cacertSecretDir = "../config/samples/secrets/cacerts" //nolint:gosec // for testing + +const AerospikeSecretName = "aerospike-secret" +const TLSCacertSecretName = "aerospike-cacert-secret" //nolint:gosec // for testing +const AuthSecretName = "auth-secret" +const AuthSecretNameForUpdate = "auth-update" + +const MultiClusterNs1 string = "test1" +const MultiClusterNs2 string = "test2" +const AerospikeNs string = "aerospike" +const namespace = "test" + +// Namespaces is the list of all the namespaces used in test-suite +var Namespaces = []string{namespace, MultiClusterNs1, MultiClusterNs2, AerospikeNs} + +func getLabels() map[string]string { + return map[string]string{"app": "aerospike-cluster"} +} + func createClusterRBAC(k8sClient client.Client, ctx goctx.Context) error { - subjects := make([]rbac.Subject, 0, len(testNamespaces)) + subjects := make([]rbac.Subject, 0, len(Namespaces)) - for idx := range testNamespaces { + for idx := range Namespaces { // Create service account for getting access in cluster specific namespaces if err := createServiceAccount( - k8sClient, ctx, aeroClusterServiceAccountName, testNamespaces[idx], + k8sClient, ctx, aeroClusterServiceAccountName, Namespaces[idx], ); err != nil { return err } @@ -33,7 +59,7 @@ func createClusterRBAC(k8sClient client.Client, ctx goctx.Context) error { subjects = append(subjects, rbac.Subject{ Kind: "ServiceAccount", Name: aeroClusterServiceAccountName, - Namespace: testNamespaces[idx], + Namespace: Namespaces[idx], }) } @@ -90,3 +116,169 @@ func createServiceAccount( return nil } + +func initConfigSecret(secretDirectory string) (map[string][]byte, error) { + initSecrets := make(map[string][]byte) + + fileInfo, err := os.ReadDir(secretDirectory) + if err != nil { + return nil, err + } + + if len(fileInfo) == 0 { + return nil, fmt.Errorf("no secret file available in %s", secretDirectory) + } + + for _, file := range fileInfo { + if file.IsDir() { + // no need to check recursively + continue + } + + secret, err := os.ReadFile(filepath.Join(secretDirectory, file.Name())) + if err != nil { + return nil, fmt.Errorf("wrong secret file %s: %v", file.Name(), err) + } + + initSecrets[file.Name()] = secret + } + + return initSecrets, nil +} + +func setupByUser(k8sClient client.Client, ctx goctx.Context) error { + var err error + // Create configSecret + if secrets, err = initConfigSecret(secretDir); err != nil { + return fmt.Errorf("failed to init secrets: %v", err) + } + + // Create cacertSecret + if cacertSecrets, err = initConfigSecret(cacertSecretDir); err != nil { + return fmt.Errorf("failed to init secrets: %v", err) + } + + // Create preReq for namespaces used for testing + for idx := range Namespaces { + if err := createClusterPreReq(k8sClient, ctx, Namespaces[idx]); err != nil { + return err + } + } + + // Create another authSecret. Used in access-control tests + passUpdate := "admin321" + labels := getLabels() + + if err := createAuthSecret( + k8sClient, ctx, namespace, labels, AuthSecretNameForUpdate, passUpdate, + ); err != nil { + return err + } + + return createClusterRBAC(k8sClient, ctx) +} + +func createClusterPreReq( + k8sClient client.Client, ctx goctx.Context, namespace string, +) error { + labels := getLabels() + + if err := createNamespace(k8sClient, ctx, namespace); err != nil { + return err + } + + if err := createConfigSecret( + k8sClient, ctx, namespace, labels, + ); err != nil { + return err + } + + if err := createCacertSecret( + k8sClient, ctx, namespace, labels, + ); err != nil { + return err + } + + // Create authSecret + pass := "admin123" + + return createAuthSecret( + k8sClient, ctx, namespace, labels, AuthSecretName, pass, + ) +} + +func createCacertSecret( + k8sClient client.Client, ctx goctx.Context, namespace string, + labels map[string]string, +) error { + // Create configSecret + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: TLSCacertSecretName, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: cacertSecrets, + } + + // Remove old object + _ = k8sClient.Delete(ctx, s) + + // use test context's create helper to create the object and add a cleanup + // function for the new object + err := k8sClient.Create(ctx, s) + if err != nil { + return err + } + + return nil +} + +func createConfigSecret( + k8sClient client.Client, ctx goctx.Context, namespace string, + labels map[string]string, +) error { + // Create configSecret + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: AerospikeSecretName, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: secrets, + } + + // Remove old object + _ = k8sClient.Delete(ctx, s) + + // use test context's create helper to create the object and add a cleanup + // function for the new object + return k8sClient.Create(ctx, s) +} + +func createAuthSecret( + k8sClient client.Client, ctx goctx.Context, namespace string, + labels map[string]string, secretName, pass string, +) error { + // Create authSecret + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + "password": []byte(pass), + }, + } + // use test context's create helper to create the object and add a cleanup function for the new object + err := k8sClient.Create(ctx, secret) + if !errors.IsAlreadyExists(err) { + return err + } + + return nil +} diff --git a/test/suite_test.go b/test/suite_test.go index b3c3ff2d3..82f3f4f41 100644 --- a/test/suite_test.go +++ b/test/suite_test.go @@ -18,8 +18,6 @@ package test import ( goctx "context" - "flag" - "fmt" "testing" "time" @@ -27,12 +25,7 @@ import ( . "github.com/onsi/gomega" "github.com/onsi/gomega/gexec" admissionv1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" k8Runtime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" @@ -44,115 +37,22 @@ import ( // +kubebuilder:scaffold:imports asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -var cfg *rest.Config - var testEnv *envtest.Environment -var k8sClient client.Client - -var k8sClientset *kubernetes.Clientset - -var cloudProvider CloudProvider - -var projectRoot string +var scheme = k8Runtime.NewScheme() -var ( - scheme = k8Runtime.NewScheme() -) +var cfg *rest.Config -var defaultNetworkType = flag.String("connect-through-network-type", "hostExternal", - "Network type is used to determine an appropriate access type. Can be 'pod',"+ - " 'hostInternal' or 'hostExternal'. AS client in the test will choose access type"+ - " which matches expected network type. See details in"+ - " https://docs.aerospike.com/docs/cloud/kubernetes/operator/Cluster-configuration-settings.html#network-policy") +var k8sClient client.Client func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") -} - -var _ = BeforeEach(func() { - By("Cleaning up all Aerospike clusters.") - - for idx := range testNamespaces { - deleteAllClusters(testNamespaces[idx]) - Expect(cleanupPVC(k8sClient, testNamespaces[idx])).NotTo(HaveOccurred()) - } -}) - -func deleteAllClusters(namespace string) { - ctx := goctx.TODO() - list := &asdbv1.AerospikeClusterList{} - listOps := &client.ListOptions{Namespace: namespace} - - err := k8sClient.List(ctx, list, listOps) - Expect(err).NotTo(HaveOccurred()) - - for clusterIndex := range list.Items { - By(fmt.Sprintf("Deleting cluster \"%s/%s\".", list.Items[clusterIndex].Namespace, list.Items[clusterIndex].Name)) - err := deleteCluster(k8sClient, ctx, &list.Items[clusterIndex]) - Expect(err).NotTo(HaveOccurred()) - } -} - -func cleanupPVC(k8sClient client.Client, ns string) error { - // List the pvc for this aeroCluster's statefulset - pvcList := &corev1.PersistentVolumeClaimList{} - clLabels := map[string]string{"app": "aerospike-cluster"} - labelSelector := labels.SelectorFromSet(clLabels) - listOps := &client.ListOptions{Namespace: ns, LabelSelector: labelSelector} - - if err := k8sClient.List(goctx.TODO(), pvcList, listOps); err != nil { - return err - } - - for pvcIndex := range pvcList.Items { - pkgLog.Info("Found pvc, deleting it", "pvcName", - pvcList.Items[pvcIndex].Name, "namespace", pvcList.Items[pvcIndex].Namespace) - - if utils.IsPVCTerminating(&pvcList.Items[pvcIndex]) { - continue - } - // if utils.ContainsString(pvc.Finalizers, "kubernetes.io/pvc-protection") { - // pvc.Finalizers = utils.RemoveString(pvc.Finalizers, "kubernetes.io/pvc-protection") - // if err := k8sClient.Patch(goctx.TODO(), &pvc, client.Merge); err != nil { - // return fmt.Errorf("could not patch %s finalizer from following pvc: %s: %w", - // "kubernetes.io/pvc-protection", pvc.Name, err) - // } - //} - if err := k8sClient.Delete(goctx.TODO(), &pvcList.Items[pvcIndex]); err != nil { - return fmt.Errorf("could not delete pvc %s: %w", pvcList.Items[pvcIndex].Name, err) - } - } - - return nil -} - -func deletePVC(k8sClient client.Client, pvcNamespacedName types.NamespacedName) error { - pvc := &corev1.PersistentVolumeClaim{} - if err := k8sClient.Get(goctx.TODO(), pvcNamespacedName, pvc); err != nil { - if errors.IsNotFound(err) { - return nil - } - - return err - } - - if utils.IsPVCTerminating(pvc) { - return nil - } - - if err := k8sClient.Delete(goctx.TODO(), pvc); err != nil { - return fmt.Errorf("could not delete pvc %s: %w", pvc.Name, err) - } - - return nil + RunSpecs(t, "Setup Suite") } // This is used when running tests on existing cluster @@ -163,7 +63,7 @@ var _ = BeforeSuite( logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) By("Bootstrapping test environment") - pkgLog.Info(fmt.Sprintf("Client will connect through '%s' network to Aerospike Clusters.", *defaultNetworkType)) + t := true testEnv = &envtest.Environment{ UseExistingCluster: &t, @@ -191,14 +91,6 @@ var _ = BeforeSuite( Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - k8sClientset = kubernetes.NewForConfigOrDie(cfg) - Expect(k8sClient).NotTo(BeNil()) - - projectRoot, err = getGitRepoRootPath() - Expect(err).NotTo(HaveOccurred()) - - ctx := goctx.TODO() - // Setup by user function // test creating resource // IN operator namespace @@ -225,20 +117,12 @@ var _ = BeforeSuite( // ClusterRoleBinding: aerospike-cluster // Need to create storageClass if not created already - err = setupByUser(k8sClient, ctx) - Expect(err).ToNot(HaveOccurred()) - cloudProvider, err = getCloudProvider(ctx, k8sClient) + err = setupByUser(k8sClient, goctx.TODO()) Expect(err).ToNot(HaveOccurred()) }) var _ = AfterSuite( func() { - By("Cleaning up all pvcs") - - for idx := range testNamespaces { - _ = cleanupPVC(k8sClient, testNamespaces[idx]) - } - By("tearing down the test environment") gexec.KillAndWait(5 * time.Second) err := testEnv.Stop() diff --git a/test/test.sh b/test/test.sh index 8b93e24f2..8ef8900f2 100755 --- a/test/test.sh +++ b/test/test.sh @@ -62,4 +62,4 @@ echo "---------------------" export CUSTOM_INIT_REGISTRY="$REGISTRY" export IMAGE_PULL_SECRET_NAME="$IMAGE_PULL_SECRET" -make test FOCUS="$FOCUS" ARGS="$ARGS" +make test-all FOCUS="$FOCUS" ARGS="$ARGS" diff --git a/test/utils.go b/test/utils.go index cf6fb89cf..8db02085e 100644 --- a/test/utils.go +++ b/test/utils.go @@ -1,861 +1,65 @@ package test import ( - "bytes" - goctx "context" - "encoding/json" "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "reflect" - "strconv" - "strings" - "time" - set "github.com/deckarep/golang-set/v2" - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/runtime" + utilRuntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" - ctrl "sigs.k8s.io/controller-runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" - as "github.com/aerospike/aerospike-client-go/v7" asdbv1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1" - operatorUtils "github.com/aerospike/aerospike-kubernetes-operator/pkg/utils" - lib "github.com/aerospike/aerospike-management-lib" - "github.com/aerospike/aerospike-management-lib/info" + asdbv1beta1 "github.com/aerospike/aerospike-kubernetes-operator/api/v1beta1" ) -var ( - namespace = "test" - storageClass = "ssd" - pkgLog = ctrl.Log.WithName("test") -) - -var secrets map[string][]byte -var cacertSecrets map[string][]byte - -const secretDir = "../config/samples/secrets" //nolint:gosec // for testing -const cacertSecretDir = "../config/samples/secrets/cacerts" //nolint:gosec // for testing - -const aerospikeSecretName = "aerospike-secret" -const tlsCacertSecretName = "aerospike-cacert-secret" //nolint:gosec // for testing -const authSecretName = "auth-secret" -const authSecretNameForUpdate = "auth-update" - -const multiClusterNs1 string = "test1" -const multiClusterNs2 string = "test2" -const aerospikeNs string = "aerospike" - -const zoneKey = "topology.kubernetes.io/zone" -const regionKey = "topology.kubernetes.io/region" - -const serviceTLSPort = 4333 -const serviceNonTLSPort = 3000 - -// list of all the namespaces used in test-suite -var testNamespaces = []string{namespace, multiClusterNs1, multiClusterNs2, aerospikeNs} - -const aerospikeConfigSecret string = "aerospike-config-secret" //nolint:gosec // for testing - -var aerospikeVolumeInitMethodDeleteFiles = asdbv1.AerospikeVolumeMethodDeleteFiles - -func initConfigSecret(secretDirectory string) (map[string][]byte, error) { - initSecrets := make(map[string][]byte) - - fileInfo, err := os.ReadDir(secretDirectory) - if err != nil { - return nil, err - } - - if len(fileInfo) == 0 { - return nil, fmt.Errorf("no secret file available in %s", secretDirectory) - } - - for _, file := range fileInfo { - if file.IsDir() { - // no need to check recursively - continue - } - - secret, err := os.ReadFile(filepath.Join(secretDirectory, file.Name())) - if err != nil { - return nil, fmt.Errorf("wrong secret file %s: %v", file.Name(), err) - } - - initSecrets[file.Name()] = secret - } - - return initSecrets, nil -} - -func setupByUser(k8sClient client.Client, ctx goctx.Context) error { - var err error - // Create configSecret - if secrets, err = initConfigSecret(secretDir); err != nil { - return fmt.Errorf("failed to init secrets: %v", err) - } - - // Create cacertSecret - if cacertSecrets, err = initConfigSecret(cacertSecretDir); err != nil { - return fmt.Errorf("failed to init secrets: %v", err) - } - - // Create preReq for namespaces used for testing - for idx := range testNamespaces { - if err := createClusterPreReq(k8sClient, ctx, testNamespaces[idx]); err != nil { - return err - } - } - - // Create another authSecret. Used in access-control tests - passUpdate := "admin321" - labels := getLabels() - - if err := createAuthSecret( - k8sClient, ctx, namespace, labels, authSecretNameForUpdate, passUpdate, - ); err != nil { - return err - } - - return createClusterRBAC(k8sClient, ctx) -} - -func createClusterPreReq( - k8sClient client.Client, ctx goctx.Context, namespace string, -) error { - labels := getLabels() - - if err := createNamespace(k8sClient, ctx, namespace); err != nil { - return err - } - - if err := createConfigSecret( - k8sClient, ctx, namespace, labels, - ); err != nil { - return err - } - - if err := createCacertSecret( - k8sClient, ctx, namespace, labels, - ); err != nil { - return err - } - - // Create authSecret - pass := "admin123" - - return createAuthSecret( - k8sClient, ctx, namespace, labels, authSecretName, pass, - ) -} - -func createCacertSecret( - k8sClient client.Client, ctx goctx.Context, namespace string, - labels map[string]string, -) error { - // Create configSecret - s := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: tlsCacertSecretName, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeOpaque, - Data: cacertSecrets, - } - - // Remove old object - _ = k8sClient.Delete(ctx, s) - - // use test context's create helper to create the object and add a cleanup - // function for the new object - err := k8sClient.Create(ctx, s) - if err != nil { - return err - } - - return nil -} - -func createConfigSecret( - k8sClient client.Client, ctx goctx.Context, namespace string, - labels map[string]string, -) error { - // Create configSecret - s := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: aerospikeSecretName, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeOpaque, - Data: secrets, - } - - // Remove old object - _ = k8sClient.Delete(ctx, s) - - // use test context's create helper to create the object and add a cleanup - // function for the new object - return k8sClient.Create(ctx, s) -} - -func createAuthSecret( - k8sClient client.Client, ctx goctx.Context, namespace string, - labels map[string]string, secretName, pass string, -) error { - // Create authSecret - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{ - "password": []byte(pass), - }, +func BootStrapTestEnv(scheme *runtime.Scheme) (testEnv *envtest.Environment, cfg *rest.Config, + k8sClient client.Client, k8sClientSet *kubernetes.Clientset, err error) { + t := true + testEnv = &envtest.Environment{ + UseExistingCluster: &t, } - // use test context's create helper to create the object and add a cleanup function for the new object - err := k8sClient.Create(ctx, secret) - if !errors.IsAlreadyExists(err) { - return err - } - - return nil -} -func getLabels() map[string]string { - return map[string]string{"app": "aerospike-cluster"} -} - -func waitForAerospikeCluster( - k8sClient client.Client, ctx goctx.Context, - aeroCluster *asdbv1.AerospikeCluster, replicas int, - retryInterval, timeout time.Duration, expectedPhases []asdbv1.AerospikeClusterPhase, -) error { - var isValid bool - - err := wait.PollUntilContextTimeout(ctx, - retryInterval, timeout, true, func(ctx goctx.Context) (done bool, err error) { - // Fetch the AerospikeCluster instance - newCluster := &asdbv1.AerospikeCluster{} - err = k8sClient.Get( - ctx, types.NamespacedName{ - Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, - }, newCluster, - ) - if err != nil { - if errors.IsNotFound(err) { - pkgLog.Info( - "Waiting for availability of %s AerospikeCluster\n", - "name", aeroCluster.Name, - ) - return false, nil - } - return false, err - } - - isValid = isClusterStateValid(aeroCluster, newCluster, replicas, expectedPhases) - return isValid, nil - }, - ) + cfg, err = testEnv.Start() if err != nil { - return err - } - - pkgLog.Info("AerospikeCluster available\n") - - // make info call - return nil -} - -func isClusterStateValid( - aeroCluster *asdbv1.AerospikeCluster, - newCluster *asdbv1.AerospikeCluster, replicas int, expectedPhases []asdbv1.AerospikeClusterPhase, -) bool { - if int(newCluster.Status.Size) != replicas { - pkgLog.Info("Cluster size is not correct") - return false - } - - // Validate status - statusToSpec, err := asdbv1.CopyStatusToSpec(&newCluster.Status.AerospikeClusterStatusSpec) - if err != nil { - pkgLog.Error(err, "Failed to copy spec in status", "err", err) - return false - } - - if !reflect.DeepEqual(statusToSpec, &newCluster.Spec) { - pkgLog.Info("Cluster status is not matching the spec") - return false - } - - // TODO: This is not valid for tests where maxUnavailablePods flag is used. - // We can take the param in func to skip this check - // // Validate pods - // if len(newCluster.Status.Pods) != replicas { - // pkgLog.Info("Cluster status doesn't have pod status for all nodes. Cluster status may not have fully updated") - // return false - // } - - for podName := range newCluster.Status.Pods { - if newCluster.Status.Pods[podName].Aerospike.NodeID == "" { - pkgLog.Info("Cluster pod's nodeID is empty") - return false - } - - if operatorUtils.IsImageEqual(newCluster.Status.Pods[podName].Image, aeroCluster.Spec.Image) { - break - } - - pkgLog.Info( - fmt.Sprintf("Cluster pod's image %s not same as spec %s", newCluster.Status.Pods[podName].Image, - aeroCluster.Spec.Image, - ), - ) + return testEnv, cfg, k8sClient, k8sClientSet, err } - if newCluster.Labels[asdbv1.AerospikeAPIVersionLabel] != asdbv1.AerospikeAPIVersion { - pkgLog.Info("Cluster API version label is not correct") - return false + if cfg == nil { + err = fmt.Errorf("cfg is nil") + return testEnv, cfg, k8sClient, k8sClientSet, err } - // Validate phase - phaseSet := set.NewSet(expectedPhases...) - if !phaseSet.Contains(newCluster.Status.Phase) { - pkgLog.Info("Cluster phase is not correct") - return false - } - - pkgLog.Info("Cluster state is validated successfully") + utilRuntime.Must(clientgoscheme.AddToScheme(scheme)) + utilRuntime.Must(asdbv1.AddToScheme(scheme)) + utilRuntime.Must(admissionv1.AddToScheme(scheme)) + utilRuntime.Must(asdbv1beta1.AddToScheme(scheme)) - return true -} + // +kubebuilder:scaffold:scheme -func getTimeout(nodes int32) time.Duration { - return 3 * time.Minute * time.Duration(nodes) -} - -func getPodLogs( - k8sClientset *kubernetes.Clientset, ctx goctx.Context, pod *corev1.Pod, -) string { - podLogOpts := corev1.PodLogOptions{} - req := k8sClientset.CoreV1().Pods(pod.Namespace).GetLogs( - pod.Name, &podLogOpts, + k8sClient, err = client.New( + cfg, client.Options{Scheme: scheme}, ) - podLogs, err := req.Stream(ctx) - if err != nil { - return "error in opening stream" - } - - defer func(podLogs io.ReadCloser) { - _ = podLogs.Close() - }(podLogs) - - buf := new(bytes.Buffer) - - _, err = io.Copy(buf, podLogs) if err != nil { - return "error in copy information from podLogs to buf" + return testEnv, cfg, k8sClient, k8sClientSet, err } - str := buf.String() - - return str -} - -func getRackID(pod *corev1.Pod) (int, error) { - rack, ok := pod.ObjectMeta.Labels["aerospike.com/rack-id"] - if !ok { - return 0, nil + if k8sClient == nil { + err = fmt.Errorf("k8sClient is nil") + return testEnv, cfg, k8sClient, k8sClientSet, err } - return strconv.Atoi(rack) -} - -// Copy makes a deep copy from src into dst. -func Copy(dst, src interface{}) error { - if dst == nil { - return fmt.Errorf("dst cannot be nil") - } - - if src == nil { - return fmt.Errorf("src cannot be nil") - } - - jsonBytes, err := json.Marshal(src) - if err != nil { - return fmt.Errorf("unable to marshal src: %s", err) - } - - err = json.Unmarshal(jsonBytes, dst) - if err != nil { - return fmt.Errorf("unable to unmarshal into dst: %s", err) - } - - return nil -} - -type AerospikeConfSpec struct { - version string - network map[string]interface{} - service map[string]interface{} - security map[string]interface{} - namespaces []interface{} -} - -func getOperatorCert() *asdbv1.AerospikeOperatorClientCertSpec { - return &asdbv1.AerospikeOperatorClientCertSpec{ - TLSClientName: "aerospike-a-0.test-runner", - AerospikeOperatorCertSource: asdbv1.AerospikeOperatorCertSource{ - SecretCertSource: &asdbv1.AerospikeSecretCertSource{ - SecretName: "aerospike-secret", - CaCertsFilename: "cacert.pem", - ClientCertFilename: "svc_cluster_chain.pem", - ClientKeyFilename: "svc_key.pem", - }, - }, - } -} -func getNetworkTLSConfig() map[string]interface{} { - return map[string]interface{}{ - "service": map[string]interface{}{ - "tls-name": "aerospike-a-0.test-runner", - "tls-port": serviceTLSPort, - "port": serviceNonTLSPort, - }, - "fabric": map[string]interface{}{ - "tls-name": "aerospike-a-0.test-runner", - "tls-port": 3011, - "port": 3001, - }, - "heartbeat": map[string]interface{}{ - "tls-name": "aerospike-a-0.test-runner", - "tls-port": 3012, - "port": 3002, - }, - - "tls": []interface{}{ - map[string]interface{}{ - "name": "aerospike-a-0.test-runner", - "cert-file": "/etc/aerospike/secret/svc_cluster_chain.pem", - "key-file": "/etc/aerospike/secret/svc_key.pem", - "ca-file": "/etc/aerospike/secret/cacert.pem", - }, - }, - } -} - -func getNetworkConfig() map[string]interface{} { - return map[string]interface{}{ - "service": map[string]interface{}{ - "port": serviceNonTLSPort, - }, - "fabric": map[string]interface{}{ - "port": 3001, - }, - "heartbeat": map[string]interface{}{ - "port": 3002, - }, - } -} -func NewAerospikeConfSpec(image string) (*AerospikeConfSpec, error) { - ver, err := asdbv1.GetImageVersion(image) - if err != nil { - return nil, err - } - - service := map[string]interface{}{ - "feature-key-file": "/etc/aerospike/secret/features.conf", - } - network := getNetworkConfig() - namespaces := []interface{}{ - map[string]interface{}{ - "name": "test", - "replication-factor": 1, - "storage-engine": map[string]interface{}{ - "type": "memory", - "data-size": 1073741824, - }, - }, - } - - return &AerospikeConfSpec{ - version: ver, - service: service, - network: network, - namespaces: namespaces, - security: nil, - }, nil -} - -func (acs *AerospikeConfSpec) getVersion() string { - return acs.version -} - -func (acs *AerospikeConfSpec) setEnableSecurity(enableSecurity bool) error { - cmpVal, err := lib.CompareVersions(acs.version, "5.7.0") - if err != nil { - return err - } - - if cmpVal >= 0 { - if enableSecurity { - security := map[string]interface{}{} - acs.security = security - } - - return nil - } - - acs.security = map[string]interface{}{} - acs.security["enable-security"] = enableSecurity - - return nil -} - -func (acs *AerospikeConfSpec) setEnableQuotas(enableQuotas bool) error { - cmpVal, err := lib.CompareVersions(acs.version, "5.6.0") - if err != nil { - return err - } - - if cmpVal >= 0 { - if acs.security == nil { - acs.security = map[string]interface{}{} - } - - acs.security["enable-quotas"] = enableQuotas - } - - return nil -} - -func (acs *AerospikeConfSpec) getSpec() map[string]interface{} { - spec := map[string]interface{}{ - "service": acs.service, - "network": acs.network, - "namespaces": acs.namespaces, - } - if acs.security != nil { - spec["security"] = acs.security - } - - return spec -} - -func ValidateAttributes( - actual []map[string]string, expected map[string]string, -) bool { - for key, val := range expected { - for i := 0; i < len(actual); i++ { - m := actual[i] - - v, ok := m[key] - if ok && v == val { - return true - } - } - } - - return false -} - -func getAeroClusterConfig( - namespace types.NamespacedName, image string, -) (*asdbv1.AerospikeCluster, error) { - version, err := asdbv1.GetImageVersion(image) - if err != nil { - return nil, err - } - - cmpVal1, err := lib.CompareVersions(version, "5.7.0") - if err != nil { - return nil, err - } - - cmpVal2, err := lib.CompareVersions(version, "7.0.0") - if err != nil { - return nil, err - } - - switch { - case cmpVal2 >= 0: - return createAerospikeClusterPost640( - namespace, 2, image, - ), nil - - case cmpVal1 >= 0: - return createAerospikeClusterPost560( - namespace, 2, image, - ), nil - - case cmpVal1 < 0: - return createAerospikeClusterPost460( - namespace, 2, image, - ), nil - - default: - return nil, fmt.Errorf("invalid image version %s", version) - } -} - -func getAerospikeStorageConfig( - containerName string, inputCascadeDelete bool, - storageSize string, - cloudProvider CloudProvider, -) *asdbv1.AerospikeStorageSpec { - // Create pods and storage devices write data to the devices. - // - deletes cluster without cascade delete of volumes. - // - recreate and check if volumes are reinitialized correctly. - fileDeleteInitMethod := asdbv1.AerospikeVolumeMethodDeleteFiles - ddInitMethod := asdbv1.AerospikeVolumeMethodDD - blkDiscardInitMethod := asdbv1.AerospikeVolumeMethodBlkdiscard - blkDiscardWipeMethod := asdbv1.AerospikeVolumeMethodBlkdiscard - - if cloudProvider == CloudProviderAWS { - // Blkdiscard method is not supported in AWS, so it is initialized as DD Method - blkDiscardInitMethod = asdbv1.AerospikeVolumeMethodDD - blkDiscardWipeMethod = asdbv1.AerospikeVolumeMethodDD - } - - return &asdbv1.AerospikeStorageSpec{ - BlockVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ - InputCascadeDelete: &inputCascadeDelete, - }, - FileSystemVolumePolicy: asdbv1.AerospikePersistentVolumePolicySpec{ - InputCascadeDelete: &inputCascadeDelete, - }, - Volumes: []asdbv1.VolumeSpec{ - { - Name: "file-noinit", - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeFilesystem, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/filesystem-noinit", - }, - }, - { - Name: "file-init", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &fileDeleteInitMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeFilesystem, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/filesystem-init", - }, - }, - { - Name: "device-noinit", - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/blockdevice-noinit", - }, - }, - { - Name: "device-dd", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &ddInitMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/blockdevice-init-dd", - }, - }, - { - Name: "device-blkdiscard", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &blkDiscardInitMethod, - InputWipeMethod: &blkDiscardWipeMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Aerospike: &asdbv1.AerospikeServerVolumeAttachment{ - Path: "/opt/aerospike/blockdevice-init-blkdiscard", - }, - }, - { - Name: "file-noinit-1", - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeFilesystem, - }, - }, - Sidecars: []asdbv1.VolumeAttachment{ - { - ContainerName: containerName, - Path: "/opt/aerospike/filesystem-noinit", - }, - }, - }, - { - Name: "device-dd-1", - AerospikePersistentVolumePolicySpec: asdbv1.AerospikePersistentVolumePolicySpec{ - InputInitMethod: &ddInitMethod, - }, - Source: asdbv1.VolumeSource{ - PersistentVolume: &asdbv1.PersistentVolumeSpec{ - Size: resource.MustParse(storageSize), - StorageClass: storageClass, - VolumeMode: corev1.PersistentVolumeBlock, - }, - }, - Sidecars: []asdbv1.VolumeAttachment{ - { - ContainerName: containerName, - Path: "/opt/aerospike/blockdevice-init-dd", - }, - }, - }, - getStorageVolumeForSecret(), - }, - } -} - -//nolint:unparam // generic function -func contains(elems []string, v string) bool { - for _, s := range elems { - if v == s { - return true - } - } - - return false -} - -func getGitRepoRootPath() (string, error) { - path, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() - if err != nil { - return "", err - } - - return strings.TrimSpace(string(path)), nil -} - -func getAerospikeConfigFromNode(log logr.Logger, k8sClient client.Client, ctx goctx.Context, - clusterNamespacedName types.NamespacedName, configContext string, pod *asdbv1.AerospikePodStatus) (lib.Stats, error) { - aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) - if err != nil { - return nil, err - } - - host, err := createHost(pod) - if err != nil { - return nil, err - } - - asinfo := info.NewAsInfo( - log, host, getClientPolicy(aeroCluster, k8sClient), - ) - - confs, err := getAsConfig(asinfo, configContext) - if err != nil { - return nil, err - } - - return confs[configContext].(lib.Stats), nil -} - -func getPasswordFromSecret(k8sClient client.Client, - secretNamespcedName types.NamespacedName, passFileName string, -) (string, error) { - secret := &corev1.Secret{} - - err := k8sClient.Get(goctx.TODO(), secretNamespcedName, secret) - if err != nil { - return "", fmt.Errorf("failed to get secret %s: %v", secretNamespcedName, err) - } - - passBytes, ok := secret.Data[passFileName] - if !ok { - return "", fmt.Errorf( - "failed to get password file in secret %s, fileName %s", - secretNamespcedName, passFileName, - ) - } - - return string(passBytes), nil -} - -func getAerospikeClient(aeroCluster *asdbv1.AerospikeCluster, k8sClient client.Client) (*as.Client, error) { - policy := getClientPolicy(aeroCluster, k8sClient) - policy.FailIfNotConnected = false - policy.Timeout = time.Minute * 2 - policy.UseServicesAlternate = true - policy.ConnectionQueueSize = 100 - policy.LimitConnectionsToQueueSize = true - - hostList := make([]*as.Host, 0, len(aeroCluster.Status.Pods)) - - for podName := range aeroCluster.Status.Pods { - pod := aeroCluster.Status.Pods[podName] - - host, err := createHost(&pod) - if err != nil { - return nil, err - } - - hostList = append(hostList, host) - } - - asClient, err := as.NewClientWithPolicyAndHost(policy, hostList...) - if asClient == nil { - return nil, fmt.Errorf( - "failed to create aerospike cluster asClient: %v", err, - ) - } - - _, _ = asClient.WarmUp(-1) - - // Wait for 5 minutes for cluster to connect - for j := 0; j < 150; j++ { - if isConnected := asClient.IsConnected(); isConnected { - break - } + k8sClientSet = kubernetes.NewForConfigOrDie(cfg) - time.Sleep(time.Second * 2) + if k8sClientSet == nil { + err = fmt.Errorf("k8sClientSet is nil") + return testEnv, cfg, k8sClient, k8sClientSet, err } - return asClient, nil + return testEnv, cfg, k8sClient, k8sClientSet, nil }