Skip to content

Commit

Permalink
Added test-cases
Browse files Browse the repository at this point in the history
  • Loading branch information
abhishekdwivedi3060 committed Apr 10, 2024
1 parent 22dfa83 commit 4373ba6
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 24 deletions.
2 changes: 1 addition & 1 deletion api/v1/aerospikecluster_validating_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -2198,7 +2198,7 @@ func (c *AerospikeCluster) validateBatchSize(batchSize *intstr.IntOrString, fiel
// when old rackConfig is not valid for batch-size
if c.Status.AerospikeConfig != nil {
if err := validateRacksForBatchSize(c.Status.RackConfig); err != nil {
return fmt.Errorf("status invalid for %s: %v", fieldPath, err)
return fmt.Errorf("status invalid for %s: update, %v", fieldPath, err)
}
}

Expand Down
29 changes: 8 additions & 21 deletions controllers/rack.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,6 @@ func (r *SingleClusterReconciler) reconcileRack(
"Reconcile existing Aerospike cluster statefulset", "stsName",
found.Name,
)

var res reconcileResult

Check failure on line 553 in controllers/rack.go

View workflow job for this annotation

GitHub Actions / lint

declarations should never be cuddled (wsl)

r.Log.Info(
Expand Down Expand Up @@ -805,7 +804,7 @@ func (r *SingleClusterReconciler) upgradeRack(statefulSet *appsv1.StatefulSet, r
podsBatchList[0] = podsToUpgrade
} else {
// Create batch of pods
podsBatchList = r.getPodBatchToRestart(podsToUpgrade, len(podList))
podsBatchList = r.getPodBatch(podsToUpgrade, len(podList))
}

if len(podsBatchList) > 0 {
Expand Down Expand Up @@ -896,7 +895,7 @@ func (r *SingleClusterReconciler) scaleDownRack(
policy := r.getClientPolicy()
diffPods := *found.Spec.Replicas - desiredSize

podsBatchList := r.getPodBatchToScaleDown(oldPodList[:diffPods], len(oldPodList))
podsBatchList := r.getPodBatch(oldPodList[:diffPods], len(oldPodList))

// Handle one batch
podsBatch := podsBatchList[0]
Expand Down Expand Up @@ -958,8 +957,9 @@ func (r *SingleClusterReconciler) scaleDownRack(
)
}

// No need for these checks if pod was not running.
// These checks will fail if there is any other pod in failed state.
// Consider these checks if any pod in the batch is running and ready.
// If all the pods are not running then we can safely ignore these checks.
// These checks will fail if there is any other pod in failed state outside the batch.
if isAnyPodRunningAndReady {
// Wait for pods to get terminated
if err = r.waitForSTSToBeReady(found, ignorablePodNames); err != nil {
Expand Down Expand Up @@ -1115,7 +1115,7 @@ func (r *SingleClusterReconciler) rollingRestartRack(found *appsv1.StatefulSet,
podsBatchList[0] = podsToRestart
} else {
// Create batch of pods
podsBatchList = r.getPodBatchToRestart(podsToRestart, len(podList))
podsBatchList = r.getPodBatch(podsToRestart, len(podList))
}

// Restart batch of pods
Expand Down Expand Up @@ -1204,7 +1204,7 @@ func (r *SingleClusterReconciler) handleK8sNodeBlockListPods(statefulSet *appsv1
}
}

podsBatchList := r.getPodBatchToRestart(podsToRestart, len(podList))
podsBatchList := r.getPodBatch(podsToRestart, len(podList))

// Restart batch of pods
if len(podsBatchList) > 0 {
Expand Down Expand Up @@ -1861,7 +1861,7 @@ func getOriginalPath(path string) string {
return path
}

func (r *SingleClusterReconciler) getPodBatchToRestart(podList []*corev1.Pod, rackSize int) [][]*corev1.Pod {
func (r *SingleClusterReconciler) getPodBatch(podList []*corev1.Pod, rackSize int) [][]*corev1.Pod {
// Error is already handled in validation
rollingUpdateBatchSize, _ := intstr.GetScaledValueFromIntOrPercent(
r.aeroCluster.Spec.RackConfig.RollingUpdateBatchSize, rackSize, false,
Expand All @@ -1870,19 +1870,6 @@ func (r *SingleClusterReconciler) getPodBatchToRestart(podList []*corev1.Pod, ra
return chunkBy(podList, rollingUpdateBatchSize)
}

func (r *SingleClusterReconciler) getPodBatchToScaleDown(podList []*corev1.Pod, rackSize int) [][]*corev1.Pod {
// Error is already handled in validation
scaleDownBatchSize, _ := intstr.GetScaledValueFromIntOrPercent(
r.aeroCluster.Spec.RackConfig.ScaleDownBatchSize, rackSize, false,
)

if len(podList) < scaleDownBatchSize {
scaleDownBatchSize = len(podList)
}

return chunkBy(podList, scaleDownBatchSize)
}

func chunkBy[T any](items []*T, chunkSize int) (chunks [][]*T) {
if chunkSize <= 0 {
chunkSize = 1
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
github.com/aerospike/aerospike-client-go/v7 v7.1.0 h1:yvCTKdbpqZxHvv7sWsFHV1j49jZcC8yXRooWsDFqKtA=
github.com/aerospike/aerospike-client-go/v7 v7.1.0/go.mod h1:AkHiKvCbqa1c16gCNGju3c5X/yzwLVvblNczqjxNwNk=
github.com/aerospike/aerospike-management-lib v1.2.1-0.20240325134810-f8046fe9872e h1:Q/AfYe++0ouO5csLS8l99kCQqJJvDKlfHwhuWbECpaQ=
github.com/aerospike/aerospike-management-lib v1.2.1-0.20240325134810-f8046fe9872e/go.mod h1:E4dk798IikCp9a8fugpYoeQVIXuvdxogHvt6sKhaORQ=
github.com/aerospike/aerospike-management-lib v1.3.1-0.20240404063536-2adfbedf9687 h1:d7oDvHmiKhq4rzcD/w3z9tP3wH0+iaDvxKDk3IYuqeU=
github.com/aerospike/aerospike-management-lib v1.3.1-0.20240404063536-2adfbedf9687/go.mod h1:E4dk798IikCp9a8fugpYoeQVIXuvdxogHvt6sKhaORQ=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
Expand Down
21 changes: 21 additions & 0 deletions test/batch_restart_pods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,27 @@ var _ = Describe("BatchRestart", func() {
err = updateClusterForBatchRestart(k8sClient, ctx, aeroCluster)
Expect(err).To(HaveOccurred())
})

It("Should fail update when spec is valid and status is invalid for RollingUpdateBatchSize", func() {
By("Remove 2nd rack")
aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())

aeroCluster.Spec.RackConfig.Racks = aeroCluster.Spec.RackConfig.Racks[:1]
aeroCluster.Spec.RackConfig.Namespaces = nil
err = updateCluster(k8sClient, ctx, aeroCluster)
Expect(err).ToNot(HaveOccurred())

By("Add 2nd rack with RollingUpdateBatchSize")
aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName)
Expect(err).ToNot(HaveOccurred())
aeroCluster.Spec.RackConfig.Racks = getDummyRackConf(1, 2)
aeroCluster.Spec.RackConfig.RollingUpdateBatchSize = percent("100%")

err = k8sClient.Update(ctx, aeroCluster)
Expect(err).To(HaveOccurred())
})

})

Context("When doing namespace related operations", func() {
Expand Down

0 comments on commit 4373ba6

Please sign in to comment.