From eb23ca77c7c7539c165649e4e4b0ab5ae43a0f1c Mon Sep 17 00:00:00 2001 From: sayalasomayajula-px Date: Tue, 23 Jul 2024 13:09:57 +0530 Subject: [PATCH 1/2] [PTX-25288] Fix AUTFunctional testcase --- tests/basic/autopilot_test.go | 194 ++++++++++++++++++---------------- 1 file changed, 101 insertions(+), 93 deletions(-) diff --git a/tests/basic/autopilot_test.go b/tests/basic/autopilot_test.go index 2390f0021..1f4556d46 100644 --- a/tests/basic/autopilot_test.go +++ b/tests/basic/autopilot_test.go @@ -1931,85 +1931,17 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { StartTorpedoTest(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), "Perform several autopilot functional tests", nil, testrailID) runID = testrailuttils.AddRunsToMilestone(testrailID) }) - - It("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps", func() { - var contexts []*scheduler.Context - testName := strings.ToLower(fmt.Sprintf("%srebalance", testSuiteName)) - poolLabel := map[string]string{"autopilot": "resizedisk"} - storageNodes := node.GetStorageNodes() - // check if we have enough storage nodes to run the test - Expect(len(storageNodes)).Should(BeNumerically(">=", 4)) - - apRules := []apapi.AutopilotRule{ - aututils.PoolRuleRebalanceByProvisionedMean([]string{"-10", "15"}, false), - aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[0])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), - aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[1])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), - aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[2])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), - } - - for i := range apRules { - apRules[i].Spec.ActionsCoolDownPeriod = int64(60) - } - - storageNodeIds := []string{} - // take first 3 (default replicaset for volumes is 3) storage node IDs, label and schedule volumes onto them - for _, n := range storageNodes[0:3] { - for k, v := range poolLabel { - Inst().S.AddLabelOnNode(n, k, v) - } - storageNodeIds = append(storageNodeIds, n.Id) - } - - numberOfVolumes := 3 - // 0.35 value is the 35% of total provisioned size which will trigger rebalance for above autopilot rule - volumeSize := getVolumeSizeByProvisionedPercentage(storageNodes[0], numberOfVolumes, 0.35) - - Step("schedule apps with autopilot rules", func() { - contexts = scheduleAppsWithAutopilot(testName, numberOfVolumes, apRules, - scheduler.ScheduleOptions{PvcNodesAnnotation: storageNodeIds, PvcSize: volumeSize}) - }) - - Step("validate rebalance jobs", func() { - apRule := apRules[0] - - err := aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.AnyToTriggeredEvent}) - Expect(err).NotTo(HaveOccurred()) - - err = Inst().V.ValidateRebalanceJobs() - Expect(err).NotTo(HaveOccurred()) - - err = aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.ActiveActionTakenToAny}) - Expect(err).NotTo(HaveOccurred()) - }) - - Step("validating and verifying size of storage pools", func() { - ValidateStoragePools(contexts) - }) - - Step("destroy apps", func() { - opts := make(map[string]bool) - opts[scheduler.OptionsWaitForResourceLeakCleanup] = true - for _, ctx := range contexts { - TearDownContext(ctx, opts) - } - for _, apRule := range apRules { - Inst().S.DeleteAutopilotRule(apRule.Name) - } - for _, storageNode := range storageNodes { - for key := range poolLabel { - Inst().S.RemoveLabelOnNode(storageNode, key) - } - } - }) - }) - - It("has to fill up 100 volumes completely, resize the volumes, validate and teardown apps", func() { + itLog := "has to fill up 100 volumes completely, resize the volumes, validate and teardown apps" + It(itLog, func() { + log.InfoD(itLog) var contexts []*scheduler.Context var scaleFactor = 100 var appName = "aut-vol-only" testName := strings.ToLower(fmt.Sprintf("%sPvcBasicInScale", testSuiteName)) - Step("schedule applications", func() { + stepLog = "schedule applications" + Step(stepLog, func() { + log.InfoD(stepLog) for i := 0; i < scaleFactor; i++ { id := 0 apRule := aututils.PVCRuleByTotalSize(20, 50, "18Gi") @@ -2030,21 +1962,24 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { contexts = append(contexts, context...) } }) - - Step("wait until workload completes on volume", func() { + stepLog = "wait until workload completes on volume" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { err := Inst().S.WaitForRunning(ctx, workloadTimeout, retryInterval) Expect(err).NotTo(HaveOccurred()) } }) - - Step("validating volumes and verifying size of volumes", func() { + stepLog = "validating volumes and verifying size of volumes" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { ValidateVolumes(ctx) } }) - - Step("destroy apps", func() { + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) opts := make(map[string]bool) opts[scheduler.OptionsWaitForResourceLeakCleanup] = true for _, ctx := range contexts { @@ -2052,14 +1987,15 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } }) }) - It("has to fill up the sharedv4 volume completely, resize the volume, validate and teardown apps", func() { + log.InfoD("has to fill up the sharedv4 volume completely, resize the volume, validate and teardown apps") var contexts []*scheduler.Context testName := strings.ToLower(fmt.Sprintf("%sPvcBasic", testSuiteName)) var scaleFactor = 2 var appName = "aut-postgres-sharedv4" - - Step("schedule applications", func() { + stepLog = "schedule applications" + Step(stepLog, func() { + log.InfoD(stepLog) for i := 0; i < scaleFactor; i++ { for id, apRule := range autopilotruleBasicTestCases { taskName := fmt.Sprintf("%s-%d-aprule%d", testName, i, id) @@ -2080,31 +2016,36 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } } }) - - Step("wait until workload completes on volume", func() { + stepLog = "wait until workload completes on volume" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { err := Inst().S.WaitForRunning(ctx, workloadTimeout, retryInterval) Expect(err).NotTo(HaveOccurred()) } }) - - Step("validating volumes and verifying size of volumes", func() { + stepLog = "validating volumes and verifying size of volumes" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { ValidateVolumes(ctx) } }) - - Step(fmt.Sprintf("wait for unscheduled resize of volume (%s)", unscheduledResizeTimeout), func() { + stepLog = fmt.Sprintf("wait for unscheduled resize of volume (%s)", unscheduledResizeTimeout) + Step(stepLog, func() { + log.InfoD(stepLog) time.Sleep(unscheduledResizeTimeout) }) - - Step("validating volumes and verifying size of volumes", func() { + stepLog = "validating volumes and verifying size of volumes" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { ValidateVolumes(ctx) } }) - - Step("destroy apps", func() { + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) opts := make(map[string]bool) opts[scheduler.OptionsWaitForResourceLeakCleanup] = true for _, ctx := range contexts { @@ -2112,7 +2053,74 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } }) }) + It("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps", func() { + log.InfoD("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps") + var contexts []*scheduler.Context + testName := strings.ToLower(fmt.Sprintf("%srebalance", testSuiteName)) + poolLabel := map[string]string{"autopilot": "resizedisk"} + storageNodes := node.GetStorageNodes() + // check if we have enough storage nodes to run the test + Expect(len(storageNodes)).Should(BeNumerically(">=", 4)) + + apRules := []apapi.AutopilotRule{ + aututils.PoolRuleRebalanceByProvisionedMean([]string{"-10", "15"}, false), + aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[0])*120/100)/units.GiB, 30, aututils.RuleScaleTypeResizeDisk, poolLabel), + } + storageNodeIds := []string{} + for _, n := range storageNodes { + for k, v := range poolLabel { + Inst().S.AddLabelOnNode(n, k, v) + } + storageNodeIds = append(storageNodeIds, n.Id) + } + + numberOfVolumes := 10 + // 0.35 value is the 35% of total provisioned size which will trigger rebalance for above autopilot rule + volumeSize := getVolumeSizeByProvisionedPercentage(storageNodes[0], numberOfVolumes, 0.35) + stepLog := "schedule apps with autopilot rules" + Step(stepLog, func() { + log.InfoD(stepLog) + contexts = scheduleAppsWithAutopilot(testName, numberOfVolumes, apRules, + scheduler.ScheduleOptions{PvcNodesAnnotation: storageNodeIds, PvcSize: volumeSize}) + }) + stepLog = "validate rebalance jobs" + Step(stepLog, func() { + log.InfoD(stepLog) + apRule := apRules[0] + + err := aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.AnyToTriggeredEvent}) + Expect(err).NotTo(HaveOccurred()) + + err = Inst().V.ValidateRebalanceJobs() + Expect(err).NotTo(HaveOccurred()) + + err = aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.ActiveActionTakenToAny}) + Expect(err).NotTo(HaveOccurred()) + }) + stepLog = "validating and verifying size of storage pools" + Step(stepLog, func() { + log.InfoD(stepLog) + ValidateStoragePools(contexts) + }) + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) + opts := make(map[string]bool) + opts[scheduler.OptionsWaitForResourceLeakCleanup] = true + for _, ctx := range contexts { + TearDownContext(ctx, opts) + } + for _, apRule := range apRules { + Inst().S.DeleteAutopilotRule(apRule.Name) + } + for _, storageNode := range storageNodes { + for key := range poolLabel { + Inst().S.RemoveLabelOnNode(storageNode, key) + } + } + }) + }) JustAfterEach(func() { defer EndTorpedoTest() AfterEachTest(contexts, testrailID, runID) From 4f2f8fdfaa50738b23ff531974bc96acc7e42118 Mon Sep 17 00:00:00 2001 From: sayalasomayajula-px Date: Mon, 5 Aug 2024 08:59:37 +0530 Subject: [PATCH 2/2] minor changes --- tests/basic/autopilot_test.go | 142 ++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 68 deletions(-) diff --git a/tests/basic/autopilot_test.go b/tests/basic/autopilot_test.go index 1f4556d46..eb44f63f2 100644 --- a/tests/basic/autopilot_test.go +++ b/tests/basic/autopilot_test.go @@ -1931,6 +1931,80 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { StartTorpedoTest(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), "Perform several autopilot functional tests", nil, testrailID) runID = testrailuttils.AddRunsToMilestone(testrailID) }) + It("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps", func() { + var contexts []*scheduler.Context + var wg sync.WaitGroup + poolLabel := map[string]string{"autopilot": "resizedisk"} + storageNodes := node.GetStorageNodes() + // check if we have enough storage nodes to run the test + Expect(len(storageNodes)).Should(BeNumerically(">=", 4)) + + apRules := []apapi.AutopilotRule{ + aututils.PoolRuleRebalanceByProvisionedMean([]string{"-10", "10"}, false), + aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[0])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), + } + + for i := range apRules { + apRules[i].Spec.ActionsCoolDownPeriod = int64(60) + } + storageNodeIds := []string{} + // take first 3 (default replicaset for volumes is 3) storage node IDs, label and schedule volumes onto them + for _, n := range storageNodes[0:3] { + for k, v := range poolLabel { + Inst().S.AddLabelOnNode(n, k, v) + } + storageNodeIds = append(storageNodeIds, n.Id) + } + + numberOfVolumes := 3 + // 0.35 value is the 35% of total provisioned size which will trigger rebalance for above autopilot rule + volumeSize := getVolumeSizeByProvisionedPercentage(storageNodes[0], numberOfVolumes, 0.35) + testName := strings.ToLower(fmt.Sprintf("%srebalance", testSuiteName)) + Step("schedule apps with autopilot rules", func() { + contexts = scheduleAppsWithAutopilot(testName, numberOfVolumes, apRules, + scheduler.ScheduleOptions{PvcNodesAnnotation: storageNodeIds, PvcSize: volumeSize}) + }) + wg.Add(2) + go func() { + defer wg.Done() + stepLog := "validating and verifying size of storage pools" + Step(stepLog, func() { + log.InfoD(stepLog) + ValidateStoragePools(contexts) + }) + }() + go func() { + defer wg.Done() + stepLog = "validate rebalance jobs" + Step(stepLog, func() { + log.InfoD(stepLog) + err = Inst().S.WaitForRebalanceAROToComplete() + Expect(err).NotTo(HaveOccurred()) + log.InfoD("=====Rebalance Completed ========") + err = Inst().V.ValidateRebalanceJobs() + log.InfoD("====Validate Rebalance Job ========") + Expect(err).NotTo(HaveOccurred()) + }) + }() + wg.Wait() + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) + opts := make(map[string]bool) + opts[scheduler.OptionsWaitForResourceLeakCleanup] = true + for _, ctx := range contexts { + TearDownContext(ctx, opts) + } + for _, apRule := range apRules { + Inst().S.DeleteAutopilotRule(apRule.Name) + } + for _, storageNode := range storageNodes { + for key := range poolLabel { + Inst().S.RemoveLabelOnNode(storageNode, key) + } + } + }) + }) itLog := "has to fill up 100 volumes completely, resize the volumes, validate and teardown apps" It(itLog, func() { log.InfoD(itLog) @@ -2053,74 +2127,6 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } }) }) - It("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps", func() { - log.InfoD("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps") - var contexts []*scheduler.Context - testName := strings.ToLower(fmt.Sprintf("%srebalance", testSuiteName)) - poolLabel := map[string]string{"autopilot": "resizedisk"} - storageNodes := node.GetStorageNodes() - // check if we have enough storage nodes to run the test - Expect(len(storageNodes)).Should(BeNumerically(">=", 4)) - - apRules := []apapi.AutopilotRule{ - aututils.PoolRuleRebalanceByProvisionedMean([]string{"-10", "15"}, false), - aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[0])*120/100)/units.GiB, 30, aututils.RuleScaleTypeResizeDisk, poolLabel), - } - storageNodeIds := []string{} - for _, n := range storageNodes { - for k, v := range poolLabel { - Inst().S.AddLabelOnNode(n, k, v) - } - storageNodeIds = append(storageNodeIds, n.Id) - } - - numberOfVolumes := 10 - // 0.35 value is the 35% of total provisioned size which will trigger rebalance for above autopilot rule - volumeSize := getVolumeSizeByProvisionedPercentage(storageNodes[0], numberOfVolumes, 0.35) - - stepLog := "schedule apps with autopilot rules" - Step(stepLog, func() { - log.InfoD(stepLog) - contexts = scheduleAppsWithAutopilot(testName, numberOfVolumes, apRules, - scheduler.ScheduleOptions{PvcNodesAnnotation: storageNodeIds, PvcSize: volumeSize}) - }) - stepLog = "validate rebalance jobs" - Step(stepLog, func() { - log.InfoD(stepLog) - apRule := apRules[0] - - err := aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.AnyToTriggeredEvent}) - Expect(err).NotTo(HaveOccurred()) - - err = Inst().V.ValidateRebalanceJobs() - Expect(err).NotTo(HaveOccurred()) - - err = aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.ActiveActionTakenToAny}) - Expect(err).NotTo(HaveOccurred()) - }) - stepLog = "validating and verifying size of storage pools" - Step(stepLog, func() { - log.InfoD(stepLog) - ValidateStoragePools(contexts) - }) - stepLog = "destroy apps" - Step(stepLog, func() { - log.InfoD(stepLog) - opts := make(map[string]bool) - opts[scheduler.OptionsWaitForResourceLeakCleanup] = true - for _, ctx := range contexts { - TearDownContext(ctx, opts) - } - for _, apRule := range apRules { - Inst().S.DeleteAutopilotRule(apRule.Name) - } - for _, storageNode := range storageNodes { - for key := range poolLabel { - Inst().S.RemoveLabelOnNode(storageNode, key) - } - } - }) - }) JustAfterEach(func() { defer EndTorpedoTest() AfterEachTest(contexts, testrailID, runID)