diff --git a/tests/basic/autopilot_test.go b/tests/basic/autopilot_test.go index 2390f0021..eb44f63f2 100644 --- a/tests/basic/autopilot_test.go +++ b/tests/basic/autopilot_test.go @@ -1931,26 +1931,22 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { StartTorpedoTest(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), "Perform several autopilot functional tests", nil, testrailID) runID = testrailuttils.AddRunsToMilestone(testrailID) }) - It("has to run rebalance and resize pools, validate rebalance, validate pools and teardown apps", func() { var contexts []*scheduler.Context - testName := strings.ToLower(fmt.Sprintf("%srebalance", testSuiteName)) + var wg sync.WaitGroup poolLabel := map[string]string{"autopilot": "resizedisk"} storageNodes := node.GetStorageNodes() // check if we have enough storage nodes to run the test Expect(len(storageNodes)).Should(BeNumerically(">=", 4)) apRules := []apapi.AutopilotRule{ - aututils.PoolRuleRebalanceByProvisionedMean([]string{"-10", "15"}, false), + aututils.PoolRuleRebalanceByProvisionedMean([]string{"-10", "10"}, false), aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[0])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), - aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[1])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), - aututils.PoolRuleByTotalSize((getTotalPoolSize(storageNodes[2])*120/100)/units.GiB, 50, aututils.RuleScaleTypeResizeDisk, poolLabel), } for i := range apRules { apRules[i].Spec.ActionsCoolDownPeriod = int64(60) } - storageNodeIds := []string{} // take first 3 (default replicaset for volumes is 3) storage node IDs, label and schedule volumes onto them for _, n := range storageNodes[0:3] { @@ -1963,30 +1959,37 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { numberOfVolumes := 3 // 0.35 value is the 35% of total provisioned size which will trigger rebalance for above autopilot rule volumeSize := getVolumeSizeByProvisionedPercentage(storageNodes[0], numberOfVolumes, 0.35) - + testName := strings.ToLower(fmt.Sprintf("%srebalance", testSuiteName)) Step("schedule apps with autopilot rules", func() { contexts = scheduleAppsWithAutopilot(testName, numberOfVolumes, apRules, scheduler.ScheduleOptions{PvcNodesAnnotation: storageNodeIds, PvcSize: volumeSize}) }) - - Step("validate rebalance jobs", func() { - apRule := apRules[0] - - err := aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.AnyToTriggeredEvent}) - Expect(err).NotTo(HaveOccurred()) - - err = Inst().V.ValidateRebalanceJobs() - Expect(err).NotTo(HaveOccurred()) - - err = aututils.WaitForAutopilotEvent(apRule, "", []string{aututils.ActiveActionTakenToAny}) - Expect(err).NotTo(HaveOccurred()) - }) - - Step("validating and verifying size of storage pools", func() { - ValidateStoragePools(contexts) - }) - - Step("destroy apps", func() { + wg.Add(2) + go func() { + defer wg.Done() + stepLog := "validating and verifying size of storage pools" + Step(stepLog, func() { + log.InfoD(stepLog) + ValidateStoragePools(contexts) + }) + }() + go func() { + defer wg.Done() + stepLog = "validate rebalance jobs" + Step(stepLog, func() { + log.InfoD(stepLog) + err = Inst().S.WaitForRebalanceAROToComplete() + Expect(err).NotTo(HaveOccurred()) + log.InfoD("=====Rebalance Completed ========") + err = Inst().V.ValidateRebalanceJobs() + log.InfoD("====Validate Rebalance Job ========") + Expect(err).NotTo(HaveOccurred()) + }) + }() + wg.Wait() + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) opts := make(map[string]bool) opts[scheduler.OptionsWaitForResourceLeakCleanup] = true for _, ctx := range contexts { @@ -2002,14 +2005,17 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } }) }) - - It("has to fill up 100 volumes completely, resize the volumes, validate and teardown apps", func() { + itLog := "has to fill up 100 volumes completely, resize the volumes, validate and teardown apps" + It(itLog, func() { + log.InfoD(itLog) var contexts []*scheduler.Context var scaleFactor = 100 var appName = "aut-vol-only" testName := strings.ToLower(fmt.Sprintf("%sPvcBasicInScale", testSuiteName)) - Step("schedule applications", func() { + stepLog = "schedule applications" + Step(stepLog, func() { + log.InfoD(stepLog) for i := 0; i < scaleFactor; i++ { id := 0 apRule := aututils.PVCRuleByTotalSize(20, 50, "18Gi") @@ -2030,21 +2036,24 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { contexts = append(contexts, context...) } }) - - Step("wait until workload completes on volume", func() { + stepLog = "wait until workload completes on volume" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { err := Inst().S.WaitForRunning(ctx, workloadTimeout, retryInterval) Expect(err).NotTo(HaveOccurred()) } }) - - Step("validating volumes and verifying size of volumes", func() { + stepLog = "validating volumes and verifying size of volumes" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { ValidateVolumes(ctx) } }) - - Step("destroy apps", func() { + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) opts := make(map[string]bool) opts[scheduler.OptionsWaitForResourceLeakCleanup] = true for _, ctx := range contexts { @@ -2052,14 +2061,15 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } }) }) - It("has to fill up the sharedv4 volume completely, resize the volume, validate and teardown apps", func() { + log.InfoD("has to fill up the sharedv4 volume completely, resize the volume, validate and teardown apps") var contexts []*scheduler.Context testName := strings.ToLower(fmt.Sprintf("%sPvcBasic", testSuiteName)) var scaleFactor = 2 var appName = "aut-postgres-sharedv4" - - Step("schedule applications", func() { + stepLog = "schedule applications" + Step(stepLog, func() { + log.InfoD(stepLog) for i := 0; i < scaleFactor; i++ { for id, apRule := range autopilotruleBasicTestCases { taskName := fmt.Sprintf("%s-%d-aprule%d", testName, i, id) @@ -2080,31 +2090,36 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } } }) - - Step("wait until workload completes on volume", func() { + stepLog = "wait until workload completes on volume" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { err := Inst().S.WaitForRunning(ctx, workloadTimeout, retryInterval) Expect(err).NotTo(HaveOccurred()) } }) - - Step("validating volumes and verifying size of volumes", func() { + stepLog = "validating volumes and verifying size of volumes" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { ValidateVolumes(ctx) } }) - - Step(fmt.Sprintf("wait for unscheduled resize of volume (%s)", unscheduledResizeTimeout), func() { + stepLog = fmt.Sprintf("wait for unscheduled resize of volume (%s)", unscheduledResizeTimeout) + Step(stepLog, func() { + log.InfoD(stepLog) time.Sleep(unscheduledResizeTimeout) }) - - Step("validating volumes and verifying size of volumes", func() { + stepLog = "validating volumes and verifying size of volumes" + Step(stepLog, func() { + log.InfoD(stepLog) for _, ctx := range contexts { ValidateVolumes(ctx) } }) - - Step("destroy apps", func() { + stepLog = "destroy apps" + Step(stepLog, func() { + log.InfoD(stepLog) opts := make(map[string]bool) opts[scheduler.OptionsWaitForResourceLeakCleanup] = true for _, ctx := range contexts { @@ -2112,7 +2127,6 @@ var _ = Describe(fmt.Sprintf("{%sFunctionalTests}", testSuiteName), func() { } }) }) - JustAfterEach(func() { defer EndTorpedoTest() AfterEachTest(contexts, testrailID, runID)