From 7d34e31d757460dba7d909aaeb3afcb4afaa486b Mon Sep 17 00:00:00 2001 From: Abhisek Dwivedi Date: Thu, 11 Apr 2024 00:05:01 +0530 Subject: [PATCH] Added test-cases --- api/v1/aerospikecluster_validating_webhook.go | 2 +- controllers/rack.go | 28 ++++++------------- go.sum | 2 -- test/batch_restart_pods_test.go | 21 ++++++++++++++ 4 files changed, 30 insertions(+), 23 deletions(-) diff --git a/api/v1/aerospikecluster_validating_webhook.go b/api/v1/aerospikecluster_validating_webhook.go index 3f6aaaa41..19813a150 100644 --- a/api/v1/aerospikecluster_validating_webhook.go +++ b/api/v1/aerospikecluster_validating_webhook.go @@ -2198,7 +2198,7 @@ func (c *AerospikeCluster) validateBatchSize(batchSize *intstr.IntOrString, fiel // when old rackConfig is not valid for batch-size if c.Status.AerospikeConfig != nil { if err := validateRacksForBatchSize(c.Status.RackConfig); err != nil { - return fmt.Errorf("status invalid for %s: %v", fieldPath, err) + return fmt.Errorf("status invalid for %s: update, %v", fieldPath, err) } } diff --git a/controllers/rack.go b/controllers/rack.go index 36f96c30d..885f52c9e 100644 --- a/controllers/rack.go +++ b/controllers/rack.go @@ -805,7 +805,7 @@ func (r *SingleClusterReconciler) upgradeRack(statefulSet *appsv1.StatefulSet, r podsBatchList[0] = podsToUpgrade } else { // Create batch of pods - podsBatchList = r.getPodBatchToRestart(podsToUpgrade, len(podList)) + podsBatchList = r.getPodBatch(podsToUpgrade, len(podList)) } if len(podsBatchList) > 0 { @@ -896,7 +896,7 @@ func (r *SingleClusterReconciler) scaleDownRack( policy := r.getClientPolicy() diffPods := *found.Spec.Replicas - desiredSize - podsBatchList := r.getPodBatchToScaleDown(oldPodList[:diffPods], len(oldPodList)) + podsBatchList := r.getPodBatch(oldPodList[:diffPods], len(oldPodList)) // Handle one batch podsBatch := podsBatchList[0] @@ -958,8 +958,9 @@ func (r *SingleClusterReconciler) scaleDownRack( ) } - // No need for these checks if pod was not running. - // These checks will fail if there is any other pod in failed state. + // Consider these checks if any pod in the batch is running and ready. + // If all the pods are not running then we can safely ignore these checks. + // These checks will fail if there is any other pod in failed state outside the batch. if isAnyPodRunningAndReady { // Wait for pods to get terminated if err = r.waitForSTSToBeReady(found, ignorablePodNames); err != nil { @@ -1115,7 +1116,7 @@ func (r *SingleClusterReconciler) rollingRestartRack(found *appsv1.StatefulSet, podsBatchList[0] = podsToRestart } else { // Create batch of pods - podsBatchList = r.getPodBatchToRestart(podsToRestart, len(podList)) + podsBatchList = r.getPodBatch(podsToRestart, len(podList)) } // Restart batch of pods @@ -1204,7 +1205,7 @@ func (r *SingleClusterReconciler) handleK8sNodeBlockListPods(statefulSet *appsv1 } } - podsBatchList := r.getPodBatchToRestart(podsToRestart, len(podList)) + podsBatchList := r.getPodBatch(podsToRestart, len(podList)) // Restart batch of pods if len(podsBatchList) > 0 { @@ -1861,7 +1862,7 @@ func getOriginalPath(path string) string { return path } -func (r *SingleClusterReconciler) getPodBatchToRestart(podList []*corev1.Pod, rackSize int) [][]*corev1.Pod { +func (r *SingleClusterReconciler) getPodBatch(podList []*corev1.Pod, rackSize int) [][]*corev1.Pod { // Error is already handled in validation rollingUpdateBatchSize, _ := intstr.GetScaledValueFromIntOrPercent( r.aeroCluster.Spec.RackConfig.RollingUpdateBatchSize, rackSize, false, @@ -1870,19 +1871,6 @@ func (r *SingleClusterReconciler) getPodBatchToRestart(podList []*corev1.Pod, ra return chunkBy(podList, rollingUpdateBatchSize) } -func (r *SingleClusterReconciler) getPodBatchToScaleDown(podList []*corev1.Pod, rackSize int) [][]*corev1.Pod { - // Error is already handled in validation - scaleDownBatchSize, _ := intstr.GetScaledValueFromIntOrPercent( - r.aeroCluster.Spec.RackConfig.ScaleDownBatchSize, rackSize, false, - ) - - if len(podList) < scaleDownBatchSize { - scaleDownBatchSize = len(podList) - } - - return chunkBy(podList, scaleDownBatchSize) -} - func chunkBy[T any](items []*T, chunkSize int) (chunks [][]*T) { if chunkSize <= 0 { chunkSize = 1 diff --git a/go.sum b/go.sum index 947f1be32..f51cff785 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,5 @@ github.com/aerospike/aerospike-client-go/v7 v7.1.0 h1:yvCTKdbpqZxHvv7sWsFHV1j49jZcC8yXRooWsDFqKtA= github.com/aerospike/aerospike-client-go/v7 v7.1.0/go.mod h1:AkHiKvCbqa1c16gCNGju3c5X/yzwLVvblNczqjxNwNk= -github.com/aerospike/aerospike-management-lib v1.2.1-0.20240325134810-f8046fe9872e h1:Q/AfYe++0ouO5csLS8l99kCQqJJvDKlfHwhuWbECpaQ= -github.com/aerospike/aerospike-management-lib v1.2.1-0.20240325134810-f8046fe9872e/go.mod h1:E4dk798IikCp9a8fugpYoeQVIXuvdxogHvt6sKhaORQ= github.com/aerospike/aerospike-management-lib v1.3.1-0.20240404063536-2adfbedf9687 h1:d7oDvHmiKhq4rzcD/w3z9tP3wH0+iaDvxKDk3IYuqeU= github.com/aerospike/aerospike-management-lib v1.3.1-0.20240404063536-2adfbedf9687/go.mod h1:E4dk798IikCp9a8fugpYoeQVIXuvdxogHvt6sKhaORQ= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= diff --git a/test/batch_restart_pods_test.go b/test/batch_restart_pods_test.go index 118127941..8de4ff045 100644 --- a/test/batch_restart_pods_test.go +++ b/test/batch_restart_pods_test.go @@ -121,6 +121,27 @@ var _ = Describe("BatchRestart", func() { err = updateClusterForBatchRestart(k8sClient, ctx, aeroCluster) Expect(err).To(HaveOccurred()) }) + + It("Should fail update when spec is valid and status is invalid for RollingUpdateBatchSize", func() { + By("Remove 2nd rack") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.RackConfig.Racks = aeroCluster.Spec.RackConfig.Racks[:1] + aeroCluster.Spec.RackConfig.Namespaces = nil + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Add 2nd rack with RollingUpdateBatchSize") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.RackConfig.Racks = getDummyRackConf(1, 2) + aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = percent("100%") + + err = k8sClient.Update(ctx, aeroCluster) + Expect(err).To(HaveOccurred()) + }) + }) Context("When doing namespace related operations", func() {