From 7c0e38548aa5569b039f2da041acc16b7f42eb70 Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Sat, 10 Aug 2024 13:42:16 +0530 Subject: [PATCH] Bumped golangci-lint and fixed go-lint --- .github/workflows/golangci-lint.yaml | 2 +- .golangci.yml | 12 +++++- Makefile | 4 +- .../cluster/aerospikecluster_controller.go | 4 +- internal/controller/cluster/client_policy.go | 30 +++++++++---- internal/controller/cluster/configmap.go | 3 ++ internal/controller/cluster/pod.go | 4 +- pkg/jsonpatch/jsonpatch.go | 4 +- test/backup/test_utils.go | 9 ++-- test/cluster/batch_restart_pods_test.go | 21 +++++++++ test/cluster/cluster_helper.go | 2 + test/cluster/cluster_resource_test.go | 2 + test/cluster/cluster_test.go | 43 ++++++++++++++++--- test/cluster/large_reconcile_test.go | 6 +++ test/cluster/network_policy_test.go | 18 ++++++++ test/cluster/security_context_test.go | 9 ++++ test/cluster/test_client.go | 17 +++++--- test/cluster/tls_authenticate_client_test.go | 1 + test/cluster/utils.go | 4 ++ 19 files changed, 160 insertions(+), 35 deletions(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 7b11c0627..1a3be2218 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -25,5 +25,5 @@ jobs: - name: Run golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.57 + version: v1.59.1 args: --timeout=5m \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index 48e7e986f..6327cccdf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,12 @@ linters-settings: + revive: + rules: + - name: dot-imports + arguments: + - allowedPackages: # Allow dot imports for testing packages + - "github.com/onsi/ginkgo/v2" + - "github.com/onsi/gomega" goconst: min-len: 2 min-occurrences: 3 @@ -11,9 +18,10 @@ linters-settings: - performance - style govet: - check-shadowing: true enable: - fieldalignment + - shadow + nolintlint: require-explanation: true require-specific: true @@ -56,7 +64,7 @@ linters: run: issues-exit-code: 1 - go: '1.21' + go: '1.22' # skip-dirs: # - sample # skip-files: diff --git a/Makefile b/Makefile index 731deb0a8..e538015cc 100644 --- a/Makefile +++ b/Makefile @@ -130,7 +130,7 @@ vet: ## Run go vet against code. go vet ./... .PHONY: go-lint -go-lint: golanci-lint ## Run golangci-lint against code. +go-lint: golangci-lint ## Run golangci-lint against code. $(GOLANGCI_LINT) run .PHONY: go-lint-fix @@ -245,7 +245,7 @@ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) KUSTOMIZE_VERSION ?= v5.3.0 CONTROLLER_TOOLS_VERSION ?= v0.14.0 ENVTEST_VERSION ?= release-0.17 -GOLANGCI_LINT_VERSION ?= v1.57.2 +GOLANGCI_LINT_VERSION ?= v1.59.1 # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. OPERATOR_SDK_VERSION ?= v1.36.0 diff --git a/internal/controller/cluster/aerospikecluster_controller.go b/internal/controller/cluster/aerospikecluster_controller.go index abab624e2..1443c6a0c 100644 --- a/internal/controller/cluster/aerospikecluster_controller.go +++ b/internal/controller/cluster/aerospikecluster_controller.go @@ -42,10 +42,10 @@ func (r *AerospikeClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns( &appsv1.StatefulSet{}, builder.WithPredicates( predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { + CreateFunc: func(_ event.CreateEvent) bool { return false }, - UpdateFunc: func(e event.UpdateEvent) bool { + UpdateFunc: func(_ event.UpdateEvent) bool { return false }, }, diff --git a/internal/controller/cluster/client_policy.go b/internal/controller/cluster/client_policy.go index 1cd6f8290..d9b224a0f 100644 --- a/internal/controller/cluster/client_policy.go +++ b/internal/controller/cluster/client_policy.go @@ -231,15 +231,19 @@ func (r *SingleClusterReconciler) appendCACertFromFileOrPath( if err != nil { return err } + if !d.IsDir() { var caData []byte + if caData, err = os.ReadFile(path); err != nil { return err } + serverPool.AppendCertsFromPEM(caData) r.Log.Info("Loaded CA certs from file.", "ca-path", caPath, "file", path) } + return nil }, ) @@ -357,28 +361,36 @@ func (r *SingleClusterReconciler) loadCertAndKeyFromSecret( return nil, err } - if crtData, crtExists := found.Data[secretSource.ClientCertFilename]; !crtExists { + crtData, crtExists := found.Data[secretSource.ClientCertFilename] + if !crtExists { return nil, fmt.Errorf( "can't find certificate `%s` in secret %+v", secretSource.ClientCertFilename, secretName, ) - } else if keyData, keyExists := found.Data[secretSource.ClientKeyFilename]; !keyExists { + } + + keyData, keyExists := found.Data[secretSource.ClientKeyFilename] + if !keyExists { return nil, fmt.Errorf( "can't find client key `%s` in secret %+v", secretSource.ClientKeyFilename, secretName, ) - } else if cert, err := tls.X509KeyPair(crtData, keyData); err != nil { + } + + cert, err := tls.X509KeyPair(crtData, keyData) + if err != nil { return nil, fmt.Errorf( "failed to load X509 key pair for cluster from secret %+v: %w", secretName, err, ) - } else { - r.Log.Info( - "Loading Aerospike Cluster client cert from secret", "secret", - secretName, - ) - return &cert, nil } + + r.Log.Info( + "Loading Aerospike Cluster client cert from secret", "secret", + secretName, + ) + + return &cert, nil } func namespacedSecret( diff --git a/internal/controller/cluster/configmap.go b/internal/controller/cluster/configmap.go index 93d1f41a6..6e218388c 100644 --- a/internal/controller/cluster/configmap.go +++ b/internal/controller/cluster/configmap.go @@ -66,6 +66,7 @@ func init() { if err != nil { return err } + if !d.IsDir() { content, err := fs.ReadFile(scripts, path) if err != nil { @@ -76,9 +77,11 @@ func init() { if err != nil { return err } + key := filepath.Base(path) scriptTemplates[key] = evaluated } + return nil }, ) diff --git a/internal/controller/cluster/pod.go b/internal/controller/cluster/pod.go index 127ee4de0..2b3c93315 100644 --- a/internal/controller/cluster/pod.go +++ b/internal/controller/cluster/pod.go @@ -872,6 +872,7 @@ func (r *SingleClusterReconciler) getIgnorablePods(racksToDelete []asdbv1.Rack, } ignorablePodNames.Insert(failedPod[podIdx]) + failedAllowed-- } } @@ -1474,7 +1475,7 @@ func (r *SingleClusterReconciler) patchPodStatus(ctx context.Context, patches [] constantPatch := client.RawPatch(types.JSONPatchType, jsonPatchJSON) - return retry.OnError(retry.DefaultBackoff, func(err error) bool { + return retry.OnError(retry.DefaultBackoff, func(_ error) bool { // Customize the error check for retrying, return true to retry, false to stop retrying return true }, func() error { @@ -1488,6 +1489,7 @@ func (r *SingleClusterReconciler) patchPodStatus(ctx context.Context, patches [] } r.Log.Info("Pod status patched successfully") + return nil }) } diff --git a/pkg/jsonpatch/jsonpatch.go b/pkg/jsonpatch/jsonpatch.go index d259e0ef7..b555fb0fc 100644 --- a/pkg/jsonpatch/jsonpatch.go +++ b/pkg/jsonpatch/jsonpatch.go @@ -229,7 +229,7 @@ func handleValues( // array replaced by non-array patch = append(patch, NewPatch("replace", p, bv)) } else { - minLen := min(len(at), len(bt)) + minLen := minimum(len(at), len(bt)) for i := 0; i < minLen; i++ { // Patch matching indices. patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) @@ -299,7 +299,7 @@ func compareArray(av, bv []interface{}, p string) []PatchOperation { //nolint:un return retVal } -func min(x, y int) int { +func minimum(x, y int) int { if x < y { return x } diff --git a/test/backup/test_utils.go b/test/backup/test_utils.go index 4abe70ddd..87d8a9334 100644 --- a/test/backup/test_utils.go +++ b/test/backup/test_utils.go @@ -213,6 +213,7 @@ func waitForBackup(cl client.Client, backup *asdbv1beta1.AerospikeBackup, pkgLog.Info("Backup status not updated yet") return false, nil } + return true, nil }) } @@ -280,7 +281,7 @@ func validateTriggeredBackup(k8sClient client.Client, backup *asdbv1beta1.Aerosp // Wait for Service LB IP to be populated if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, func(ctx context.Context) (bool, error) { - if err := k8sClient.Get(testCtx, + if err := k8sClient.Get(ctx, types.NamespacedName{ Name: backup.Spec.BackupService.Name, Namespace: backup.Spec.BackupService.Namespace, @@ -305,7 +306,7 @@ func validateTriggeredBackup(k8sClient client.Client, backup *asdbv1beta1.Aerosp // Wait for Backup service to be ready if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, - func(ctx context.Context) (bool, error) { + func(_ context.Context) (bool, error) { config, err := serviceClient.GetBackupServiceConfig() if err != nil { pkgLog.Error(err, "Failed to get backup service config") @@ -331,7 +332,7 @@ func GetBackupDataPaths(k8sClient client.Client, backup *asdbv1beta1.AerospikeBa // Wait for Service LB IP to be populated if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, func(ctx context.Context) (bool, error) { - if err := k8sClient.Get(testCtx, + if err := k8sClient.Get(ctx, types.NamespacedName{ Name: backup.Spec.BackupService.Name, Namespace: backup.Spec.BackupService.Namespace, @@ -365,7 +366,7 @@ func GetBackupDataPaths(k8sClient client.Client, backup *asdbv1beta1.AerospikeBa } if err := wait.PollUntilContextTimeout(testCtx, interval, timeout, true, - func(ctx context.Context) (bool, error) { + func(_ context.Context) (bool, error) { for routineName := range config.BackupRoutines { backups, err := serviceClient.GetFullBackupsForRoutine(routineName) if err != nil { diff --git a/test/cluster/batch_restart_pods_test.go b/test/cluster/batch_restart_pods_test.go index e5c1dc87e..27803a1fb 100644 --- a/test/cluster/batch_restart_pods_test.go +++ b/test/cluster/batch_restart_pods_test.go @@ -246,15 +246,19 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac // Restart 1 node at a time It("Should restart one pod at a time", func() { By("Using default RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = schedulableResource("1Gi") err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) By("Using RollingUpdateBatchSize PCT which is not enough eg. 1%") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = percent("1%") aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = nil err = updateCluster(k8sClient, ctx, aeroCluster) @@ -291,6 +295,7 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac // Restart batch of nodes It("Should do BatchRollingRestart", func() { By("Use RollingUpdateBatchSize PCT") + err := batchRollingRestartTest(k8sClient, ctx, clusterNamespacedName, percent("90%")) Expect(err).ToNot(HaveOccurred()) @@ -298,6 +303,7 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac Expect(err).ToNot(HaveOccurred()) By("Update RollingUpdateBatchSize Count") + err = batchRollingRestartTest(k8sClient, ctx, clusterNamespacedName, count(3)) Expect(err).ToNot(HaveOccurred()) @@ -308,8 +314,10 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac // User should be able to change RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count when restart is going on It("Should allow multiple changes in RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count", func() { By("Update RollingUpdateBatchSize Count") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(3) aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = schedulableResource("1Gi") err = k8sClient.Update(ctx, aeroCluster) @@ -318,8 +326,10 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac time.Sleep(time.Second * 1) By("Again Update RollingUpdateBatchSize Count") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(1) aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = nil err = k8sClient.Update(ctx, aeroCluster) @@ -328,6 +338,7 @@ func BatchRollingRestart(ctx goctx.Context, clusterNamespacedName types.Namespac time.Sleep(time.Second * 1) By("Again Update RollingUpdateBatchSize Count") + err = rollingRestartTest(k8sClient, ctx, clusterNamespacedName, count(3), "1Gi") Expect(err).ToNot(HaveOccurred()) }) @@ -357,13 +368,16 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) // Restart 1 node at a time It("Should upgrade one pod at a time", func() { By("Using default RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.Image = availableImage1 err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) By("Using RollingUpdateBatchSize PCT which is not enough eg. 1%") + err = upgradeTest(k8sClient, ctx, clusterNamespacedName, percent("1%"), availableImage1) Expect(err).ToNot(HaveOccurred()) }) @@ -398,6 +412,7 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) // Restart batch of nodes It("Should do BatchUpgrade", func() { By("Use RollingUpdateBatchSize PCT") + err := batchUpgradeTest(k8sClient, ctx, clusterNamespacedName, percent("90%")) Expect(err).ToNot(HaveOccurred()) @@ -405,6 +420,7 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) Expect(err).ToNot(HaveOccurred()) By("Update RollingUpdateBatchSize Count") + err = batchUpgradeTest(k8sClient, ctx, clusterNamespacedName, count(3)) Expect(err).ToNot(HaveOccurred()) @@ -415,8 +431,10 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) // User should be able to change RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count when restart is going on It("Should allow multiple changes in RollingUpdateBatchSize PCT/RollingUpdateBatchSize Count", func() { By("Update RollingUpdateBatchSize Count") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(3) aeroCluster.Spec.Image = availableImage1 err = k8sClient.Update(ctx, aeroCluster) @@ -425,8 +443,10 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) time.Sleep(time.Second * 1) By("Again Update RollingUpdateBatchSize Count") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = count(1) aeroCluster.Spec.Image = latestImage err = k8sClient.Update(ctx, aeroCluster) @@ -435,6 +455,7 @@ func BatchUpgrade(ctx goctx.Context, clusterNamespacedName types.NamespacedName) time.Sleep(time.Second * 1) By("Again Update RollingUpdateBatchSize Count") + err = upgradeTest(k8sClient, ctx, clusterNamespacedName, count(3), availableImage1) Expect(err).ToNot(HaveOccurred()) }) diff --git a/test/cluster/cluster_helper.go b/test/cluster/cluster_helper.go index 8911c0752..3eaa477bd 100644 --- a/test/cluster/cluster_helper.go +++ b/test/cluster/cluster_helper.go @@ -578,7 +578,9 @@ func validateMigrateFillDelay( if err != nil { return false, err } + svcConfs := confs["service"].(lib.Stats) + current, exists := svcConfs["migrate-fill-delay"] if !exists { return false, fmt.Errorf("migrate-fill-delay missing from the Aerospike Service config") diff --git a/test/cluster/cluster_resource_test.go b/test/cluster/cluster_resource_test.go index 32cbad0ee..68c4a08af 100644 --- a/test/cluster/cluster_resource_test.go +++ b/test/cluster/cluster_resource_test.go @@ -95,6 +95,7 @@ func invalidResourceTest(ctx goctx.Context, checkAeroServer, checkAeroInit bool) if checkAeroServer { aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = resources } + if checkAeroInit { aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.Resources = resources } @@ -145,6 +146,7 @@ func invalidResourceTest(ctx goctx.Context, checkAeroServer, checkAeroInit bool) if checkAeroServer { aeroCluster.Spec.PodSpec.AerospikeContainerSpec.Resources = resources } + if checkAeroInit { aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.Resources = resources } diff --git a/test/cluster/cluster_test.go b/test/cluster/cluster_test.go index ca4e5824d..7286d3cd8 100644 --- a/test/cluster/cluster_test.go +++ b/test/cluster/cluster_test.go @@ -144,6 +144,7 @@ func ScaleDownWithMigrateFillDelay(ctx goctx.Context) { "migrate-fill-delay-cluster", namespace, ) migrateFillDelay := int64(120) + BeforeEach( func() { aeroCluster := createDummyAerospikeCluster(clusterNamespacedName, 4) @@ -220,13 +221,16 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { func() { nodeList, err = getNodeList(ctx, k8sClient) Expect(err).ToNot(HaveOccurred()) + size := len(nodeList.Items) deployClusterForMaxIgnorablePods(ctx, clusterNamespacedName, size) By("Scale up 1 pod to make that pod pending due to lack of k8s nodes") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.Size++ err = k8sClient.Update(ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) @@ -262,6 +266,7 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { }, 1*time.Minute).ShouldNot(HaveOccurred()) By("Verify pending pod") + podList, err = getPodList(aeroCluster, k8sClient) var counter int @@ -292,6 +297,7 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { }, 1*time.Minute).ShouldNot(HaveOccurred()) By("Verify pending pod") + podList, err = getPodList(aeroCluster, k8sClient) counter = 0 @@ -305,8 +311,10 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { Expect(counter).To(Equal(1)) By("Scale down 1 pod") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.Size-- // As pod is in pending state, CR object won't reach the final phase. // So expectedPhases can be InProgress or Completed @@ -314,6 +322,7 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Verify if all pods are running") + podList, err = getPodList(aeroCluster, k8sClient) Expect(err).ToNot(HaveOccurred()) @@ -340,6 +349,7 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { It( "Should allow rack deletion with failed pods in different rack", func() { By("Fail 1-1 aerospike pod") + ignorePodName := clusterNamespacedName.Name + "-1-1" pod := &v1.Pod{} @@ -353,8 +363,10 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { // Underlying kubernetes cluster should have atleast 6 nodes to run this test successfully. By("Delete rack with id 2") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + val := intstr.FromInt32(1) aeroCluster.Spec.RackConfig.MaxIgnorablePods = &val aeroCluster.Spec.RackConfig.Racks = getDummyRackConf(1) @@ -381,6 +393,7 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { It( "Should allow namespace addition and removal with failed pod", func() { By("Fail 1-1 aerospike pod") + ignorePodName := clusterNamespacedName.Name + "-1-1" pod := &v1.Pod{} @@ -393,8 +406,10 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Set MaxIgnorablePod and Rolling restart by removing namespace") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + val := intstr.FromInt32(1) aeroCluster.Spec.RackConfig.MaxIgnorablePods = &val nsList := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{}) @@ -407,8 +422,10 @@ func clusterWithMaxIgnorablePod(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("RollingRestart by re-using previously removed namespace storage") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) + nsList = aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{}) nsList = append(nsList, getNonSCNamespaceConfig("barnew", "/test/dev/xvdf1")) aeroCluster.Spec.AerospikeConfig.Value["namespaces"] = nsList @@ -484,6 +501,7 @@ func DeployClusterForAllImagesPost490(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Validating Readiness probe") + err = validateReadinessProbe(ctx, k8sClient, aeroCluster, serviceTLSPort) Expect(err).ToNot(HaveOccurred()) @@ -503,7 +521,7 @@ func DeployClusterForDiffStorageTest( } repFact := nHosts - + //nolint:wsl //Comments are for test-case description Context( "Positive", func() { // Cluster with n nodes, enterprise can be more than 8 @@ -525,6 +543,7 @@ func DeployClusterForDiffStorageTest( err := deployCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) + _ = deleteCluster(k8sClient, ctx, aeroCluster) }, ) @@ -715,6 +734,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Modifying unused TLS configuration") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -732,6 +752,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Removing unused TLS configuration") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -745,6 +766,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Changing ca-file to ca-path in TLS configuration") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -789,6 +811,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { It( "Try update operations", func() { By("Modifying name of used TLS configuration") + aeroCluster, err := getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -805,6 +828,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).Should(HaveOccurred()) By("Modifying ca-file of used TLS configuration") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -821,6 +845,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).Should(HaveOccurred()) By("Updating both ca-file and ca-path in TLS configuration") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -837,6 +862,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).Should(HaveOccurred()) By("Updating tls-name in service network config") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -851,6 +877,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { Expect(err).Should(HaveOccurred()) By("Updating tls-port in service network config") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -867,6 +894,7 @@ func UpdateTLSClusterTest(ctx goctx.Context) { // Should fail when changing network config from tls to non-tls in a single step. // Ideally first tls and non-tls config both has to set and then remove tls config. By("Updating tls to non-tls in single step in service network config") + aeroCluster, err = getCluster( k8sClient, ctx, clusterNamespacedName, ) @@ -1460,6 +1488,7 @@ func negativeDeployClusterValidationTest( ) namespaceConfig := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})[0].(map[string]interface{}) + if _, ok := namespaceConfig["storage-engine"].(map[string]interface{})["devices"]; ok { namespaceConfig["storage-engine"].(map[string]interface{})["devices"] = nil @@ -1481,6 +1510,7 @@ func negativeDeployClusterValidationTest( ) namespaceConfig := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})[0].(map[string]interface{}) + if _, ok := namespaceConfig["storage-engine"].(map[string]interface{})["devices"]; ok { aeroCluster.Spec.Storage.Volumes = []asdbv1.VolumeSpec{ @@ -1546,6 +1576,7 @@ func negativeDeployClusterValidationTest( ) namespaceConfig := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})[0].(map[string]interface{}) + if _, ok := namespaceConfig["storage-engine"].(map[string]interface{})["files"]; ok { namespaceConfig["storage-engine"].(map[string]interface{})["files"] = nil aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})[0] = namespaceConfig @@ -1566,6 +1597,7 @@ func negativeDeployClusterValidationTest( ) namespaceConfig := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})[0].(map[string]interface{}) + if _, ok := namespaceConfig["storage-engine"].(map[string]interface{})["devices"]; ok { devList := namespaceConfig["storage-engine"].(map[string]interface{})["devices"].([]interface{}) devList = append( @@ -1615,6 +1647,7 @@ func negativeDeployClusterValidationTest( ) namespaceConfig := aeroCluster.Spec.AerospikeConfig.Value["namespaces"].([]interface{})[0].(map[string]interface{}) + if _, ok := namespaceConfig["storage-engine"].(map[string]interface{})["devices"]; ok { aeroCluster.Spec.Storage = asdbv1.AerospikeStorageSpec{} aeroCluster.Spec.AerospikeConfig.Value["xdr"] = map[string]interface{}{ @@ -1721,9 +1754,6 @@ func negativeDeployClusterValidationTest( Expect(err).Should(HaveOccurred()) }, ) - - // Logging conf - // XDR conf }, ) }, @@ -1953,6 +1983,7 @@ func negativeUpdateClusterValidationTest( k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + defaultDNS := v1.DNSDefault aeroCluster.Spec.PodSpec.InputDNSPolicy = &defaultDNS err = updateCluster(k8sClient, ctx, aeroCluster) @@ -1967,6 +1998,7 @@ func negativeUpdateClusterValidationTest( k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + noneDNS := v1.DNSNone aeroCluster.Spec.PodSpec.InputDNSPolicy = &noneDNS err = updateCluster(k8sClient, ctx, aeroCluster) @@ -2249,9 +2281,6 @@ func negativeUpdateClusterValidationTest( Expect(err).Should(HaveOccurred()) }, ) - - // Logging conf - // XDR conf }, ) }, diff --git a/test/cluster/large_reconcile_test.go b/test/cluster/large_reconcile_test.go index a3e834b24..9cf625f98 100644 --- a/test/cluster/large_reconcile_test.go +++ b/test/cluster/large_reconcile_test.go @@ -272,10 +272,12 @@ func waitForClusterScaleDown( Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, }, newCluster, ) + if err != nil { if apierrors.IsNotFound(err) { return false, nil } + return false, err } @@ -289,6 +291,7 @@ func waitForClusterScaleDown( if err != nil { return false, err } + if len(podList.Items) < replicas { err := fmt.Errorf("cluster pods number can not go below replica size") return false, err @@ -318,10 +321,12 @@ func waitForClusterRollingRestart( Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, }, newCluster, ) + if err != nil { if apierrors.IsNotFound(err) { return false, nil } + return false, err } @@ -360,6 +365,7 @@ func waitForClusterUpgrade( Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, }, newCluster, ) + if err != nil { if apierrors.IsNotFound(err) { return false, nil diff --git a/test/cluster/network_policy_test.go b/test/cluster/network_policy_test.go index 78417a55c..634772d87 100644 --- a/test/cluster/network_policy_test.go +++ b/test/cluster/network_policy_test.go @@ -357,6 +357,7 @@ func negativeDeployNetworkPolicyTest(ctx goctx.Context, multiPodPerHost, enableT func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { Context("Negative cases for customInterface", func() { clusterNamespacedName := getNamespacedName("np-custom-interface", namespace) + Context( "InvalidAerospikeCustomInterface", func() { BeforeEach( @@ -389,6 +390,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.AccessType = asdbv1.AerospikeNetworkTypeCustomInterface err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).Should(HaveOccurred()) @@ -403,6 +405,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.AlternateAccessType = asdbv1.AerospikeNetworkTypeCustomInterface err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).Should(HaveOccurred()) @@ -417,6 +420,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.TLSAccessType = asdbv1.AerospikeNetworkTypeCustomInterface err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).Should(HaveOccurred()) @@ -431,6 +435,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.TLSAlternateAccessType = asdbv1.AerospikeNetworkTypeCustomInterface err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).Should(HaveOccurred()) @@ -444,6 +449,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.FabricType = asdbv1.AerospikeNetworkTypeCustomInterface aeroCluster.Spec.AerospikeNetworkPolicy.CustomFabricNetworkNames = []string{networkOne} aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations = map[string]string{ @@ -461,6 +467,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.TLSFabricType = asdbv1.AerospikeNetworkTypeCustomInterface aeroCluster.Spec.AerospikeNetworkPolicy.CustomTLSFabricNetworkNames = []string{networkOne} aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations = map[string]string{ @@ -481,6 +488,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.AccessType = asdbv1.AerospikeNetworkTypeCustomInterface aeroCluster.Spec.AerospikeNetworkPolicy.CustomAccessNetworkNames = []string{networkOne} @@ -501,6 +509,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.AccessType = asdbv1.AerospikeNetworkTypeCustomInterface aeroCluster.Spec.AerospikeNetworkPolicy.CustomAccessNetworkNames = []string{networkOne} aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations = map[string]string{ @@ -520,6 +529,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { k8sClient, ctx, clusterNamespacedName, ) Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.AerospikeNetworkPolicy.AccessType = asdbv1.AerospikeNetworkTypeCustomInterface aeroCluster.Spec.AerospikeNetworkPolicy.CustomAccessNetworkNames = []string{"random/ipvlan-conf-1"} @@ -579,6 +589,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { } By("Creating cluster with custom fabric interface") + err := deployCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) @@ -588,6 +599,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Updating custom fabric interface network list") + aeroCluster.Spec.AerospikeNetworkPolicy.CustomFabricNetworkNames = []string{nsNetworkTwo} aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations = map[string]string{ networkAnnotationKey: nsNetworkTwo, @@ -614,6 +626,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { } By("Creating cluster with custom tlsfabric interface") + err := deployCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) @@ -623,6 +636,7 @@ func negativeUpdateNetworkPolicyTest(ctx goctx.Context) { Expect(err).ToNot(HaveOccurred()) By("Updating custom tlsFabric interface network list") + aeroCluster.Spec.AerospikeNetworkPolicy.CustomTLSFabricNetworkNames = []string{nsNetworkTwo} aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations = map[string]string{ networkAnnotationKey: nsNetworkTwo, @@ -742,6 +756,7 @@ func doTestNetworkPolicy( validateSvcExistence(aeroCluster, false) By("Updating AccessType to hostInternal") + aeroCluster.Spec.AerospikeNetworkPolicy.AccessType = asdbv1.AerospikeNetworkTypeHostExternal err = aerospikeClusterCreateUpdate(k8sClient, aeroCluster, ctx) Expect(err).ToNot(HaveOccurred()) @@ -755,6 +770,7 @@ func doTestNetworkPolicy( validateSvcExistence(aeroCluster, true) By("Reverting AccessType to pod") + aeroCluster.Spec.AerospikeNetworkPolicy.AccessType = asdbv1.AerospikeNetworkTypePod err = aerospikeClusterCreateUpdate(k8sClient, aeroCluster, ctx) Expect(err).ToNot(HaveOccurred()) @@ -769,6 +785,7 @@ func doTestNetworkPolicy( Context( "When using configuredIP", func() { clusterNamespacedName := getNamespacedName("np-configured-ip", test.MultiClusterNs1) + BeforeEach( func() { err := deleteNodeLabels(ctx, []string{labelAccessAddress, labelAlternateAccessAddress}) @@ -988,6 +1005,7 @@ func doTestNetworkPolicy( Expect(err).ToNot(HaveOccurred()) By("Updating correct custom network name") + aeroCluster.Spec.AerospikeNetworkPolicy.CustomAccessNetworkNames = []string{nsNetworkOne, nsNetworkTwo} err = updateCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) diff --git a/test/cluster/security_context_test.go b/test/cluster/security_context_test.go index 66471bb75..91f079b56 100644 --- a/test/cluster/security_context_test.go +++ b/test/cluster/security_context_test.go @@ -38,6 +38,7 @@ func securityContextTest( It( "Validate SecurityContext applied", func() { By("DeployCluster with SecurityContext") + clusterNamespacedName := getNamespacedName( "security-context-create", namespace, ) @@ -50,9 +51,11 @@ func securityContextTest( SupplementalGroups: []int64{1000}, } } + if checkAeroServer { aeroCluster.Spec.PodSpec.AerospikeContainerSpec.SecurityContext = &corev1.SecurityContext{Privileged: new(bool)} } + if checkAeroInit { aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.SecurityContext = &corev1.SecurityContext{Privileged: new(bool)} } @@ -73,6 +76,7 @@ func securityContextTest( It( "Validate SecurityContext updated", func() { By("DeployCluster") + clusterNamespacedName := getNamespacedName( "security-context-updated", namespace, ) @@ -84,6 +88,7 @@ func securityContextTest( Expect(err).ToNot(HaveOccurred()) By("UpdateCluster with SecurityContext") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) Expect(err).ToNot(HaveOccurred()) @@ -92,9 +97,11 @@ func securityContextTest( SupplementalGroups: []int64{1000}, } } + if checkAeroServer { aeroCluster.Spec.PodSpec.AerospikeContainerSpec.SecurityContext = &corev1.SecurityContext{Privileged: new(bool)} } + if checkAeroInit { aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.SecurityContext = &corev1.SecurityContext{Privileged: new(bool)} } @@ -113,9 +120,11 @@ func securityContextTest( if checkPodSpec { aeroCluster.Spec.PodSpec.SecurityContext = nil } + if checkAeroServer { aeroCluster.Spec.PodSpec.AerospikeContainerSpec.SecurityContext = nil } + if checkAeroInit { aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.SecurityContext = nil } diff --git a/test/cluster/test_client.go b/test/cluster/test_client.go index 4a000ee23..dd9d0521b 100644 --- a/test/cluster/test_client.go +++ b/test/cluster/test_client.go @@ -397,24 +397,31 @@ func loadCertAndKeyFromSecret( return nil, err } - if crtData, crtExists := found.Data[secretSource.ClientCertFilename]; !crtExists { + crtData, crtExists := found.Data[secretSource.ClientCertFilename] + if !crtExists { return nil, fmt.Errorf( "can't find certificate \"%s\" in secret %+v", secretSource.ClientCertFilename, secretName, ) - } else if keyData, keyExists := found.Data[secretSource.ClientKeyFilename]; !keyExists { + } + + keyData, keyExists := found.Data[secretSource.ClientKeyFilename] + if !keyExists { return nil, fmt.Errorf( "can't find certificate \"%s\" in secret %+v", secretSource.ClientKeyFilename, secretName, ) - } else if cert, err := tls.X509KeyPair(crtData, keyData); err != nil { + } + + cert, err := tls.X509KeyPair(crtData, keyData) + if err != nil { return nil, fmt.Errorf( "failed to load X509 key pair for cluster from secret %+v: %w", secretName, err, ) - } else { - return &cert, nil } + + return &cert, nil } func namespacedSecret( diff --git a/test/cluster/tls_authenticate_client_test.go b/test/cluster/tls_authenticate_client_test.go index d412d966c..4b0de590c 100644 --- a/test/cluster/tls_authenticate_client_test.go +++ b/test/cluster/tls_authenticate_client_test.go @@ -446,6 +446,7 @@ func doTestTLSAuthenticateClientDomainList(ctx goctx.Context) { ) err := aerospikeClusterCreateUpdate(k8sClient, aeroCluster, ctx) Expect(err).ToNot(HaveOccurred()) + tlsAuthenticateClient, err := getTLSAuthenticateClient(aeroCluster) if err != nil { Expect(err).ToNot(HaveOccurred()) diff --git a/test/cluster/utils.go b/test/cluster/utils.go index 0acc6a84a..42d4d22ad 100644 --- a/test/cluster/utils.go +++ b/test/cluster/utils.go @@ -66,18 +66,22 @@ func waitForAerospikeCluster( Name: aeroCluster.Name, Namespace: aeroCluster.Namespace, }, newCluster, ) + if err != nil { if errors.IsNotFound(err) { pkgLog.Info( "Waiting for availability of %s AerospikeCluster\n", "name", aeroCluster.Name, ) + return false, nil } + return false, err } isValid = isClusterStateValid(aeroCluster, newCluster, replicas, expectedPhases) + return isValid, nil }, )