diff --git a/drivers/volume/common.go b/drivers/volume/common.go index 44264d861..592e49aac 100644 --- a/drivers/volume/common.go +++ b/drivers/volume/common.go @@ -1055,6 +1055,14 @@ func (d *DefaultDriver) GetTrashCanVolumeIds(n node.Node) ([]string, error) { } } +// GetTrashCanVolumeNames returns the volume names in the trashcan and an error if any +func (d *DefaultDriver) GetTrashCanVolumeNames(n node.Node) ([]string, error) { + return nil, &errors.ErrNotSupported{ + Type: "Function", + Operation: "GetTrashCanVolumeNames()", + } +} + // IsPureFileVolume returns true if volume is FB volumes else false func (d *DefaultDriver) IsPureFileVolume(volume *Volume) (bool, error) { return false, &errors.ErrNotSupported{ diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index 725825248..cfccd3a93 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -5659,6 +5659,69 @@ func (d *portworx) GetTrashCanVolumeIds(n node.Node) ([]string, error) { return trashcanVols, nil } +// GetTrashCanVolumeName returns list of volume names present in trashcan +func (d *portworx) GetTrashCanVolumeNames(n node.Node) ([]string, error) { + opts := node.ConnectionOpts{ + IgnoreError: false, + TimeBeforeRetry: defaultRetryInterval, + Timeout: defaultTimeout, + } + + pxctlPath := d.getPxctlPath(n) + + // Create context + if len(d.token) > 0 { + _, err := d.nodeDriver.RunCommand(n, fmt.Sprintf("%s context create admin --token=%s", pxctlPath, d.token), opts) + if err != nil { + return nil, fmt.Errorf("failed to create pxctl context. cause: %v", err) + } + } + + out, err := d.nodeDriver.RunCommand(n, fmt.Sprintf("%s v l --trashcan -j", pxctlPath), opts) + if err != nil { + return nil, fmt.Errorf("failed to get pxctl status. cause: %v", err) + } + log.Info(out) + + var data interface{} + err = json.Unmarshal([]byte(out), &data) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal pxctl status. cause: %v", err) + } + + // Delete context + if len(d.token) > 0 { + _, err := d.nodeDriver.RunCommand(n, fmt.Sprintf("%s context delete admin", pxctlPath), opts) + if err != nil { + return nil, fmt.Errorf("failed to delete pxctl context. cause: %v", err) + } + } + + res := data.([]interface{}) + + trashcanVols := make([]string, 50) + + for _, v := range res { + var tp map[string]interface{} = v.(map[string]interface{}) + str := fmt.Sprintf("%v", tp["id"]) + // Access the "locator" field and then the "name" field within it + if locator, ok := tp["locator"].(map[string]interface{}); ok { + name := fmt.Sprintf("%v", locator["name"]) + trashcanVols = append(trashcanVols, strings.Trim(str, " ")) + fmt.Println("Name:", name) + } else { + + fmt.Println("Locator field not found or not a map") + return nil, err + } + } + + log.Infof("trash vols: %v", trashcanVols) + + return trashcanVols, nil + +} + // GetNodePureVolumeAttachedCountMap return Map of nodeName and number of pure volume attached on that node func (d *portworx) GetNodePureVolumeAttachedCountMap() (map[string]int, error) { // nodePureVolAttachedCountMap maintains count of attached volume @@ -6316,4 +6379,4 @@ func (d *portworx) UpdateSkinnySnapReplNum(repl string) error { break } return nil -} \ No newline at end of file +} diff --git a/drivers/volume/volume.go b/drivers/volume/volume.go index edf295a95..532defc03 100644 --- a/drivers/volume/volume.go +++ b/drivers/volume/volume.go @@ -458,9 +458,12 @@ type Driver interface { // GetNodeStats returns the node stats of the given node and an error if any GetNodeStats(n node.Node) (map[string]map[string]int, error) - // GetTrashCanVolumeIds returns the node stats of the given node and an error if any + // GetTrashCanVolumeIds returns list of volume ids present in the trashcan GetTrashCanVolumeIds(n node.Node) ([]string, error) + // GetTrashCanVolumeName returns the list of volume names present in trashcan + GetTrashCanVolumeNames(n node.Node) ([]string, error) + //GetKvdbMembers returns KVDB memebers of the PX cluster GetKvdbMembers(n node.Node) (map[string]*MetadataNode, error) diff --git a/tests/basic/pure_test.go b/tests/basic/pure_test.go index 4c5d0281a..84caa723c 100644 --- a/tests/basic/pure_test.go +++ b/tests/basic/pure_test.go @@ -2450,7 +2450,7 @@ var _ = Describe("{VolAttachFAPxRestart}", func() { // select a random node to run the test n := node.GetStorageDriverNodes()[0] - stepLog := "get the secrete of FA which is not present in pure secret" + stepLog := "get the secret of FA which is not present in pure secret" Step(stepLog, func() { log.InfoD(stepLog) //get the flash array details @@ -4575,9 +4575,10 @@ var _ = Describe("{CreateAndValidatePVCWithIopsAndBandwidth}", func() { }) //GetVolumeNameFromPvc will collect volume name from pvc which indirect will be the px volume name and this name is suffix to the volumes created in FA backend - GetVolumeNameFromPvc := func(namespace string, pvclist []string) []string { + GetVolumeNameFromPvc := func(namespace string) []string { + pvclist := make([]string, 0) allPvcList, err := core.Instance().GetPersistentVolumeClaims(namespace, nil) - log.FailOnError(err, fmt.Sprintf("error getting pvcs from namespace [%s]", FadaAppNameSpace)) + log.FailOnError(err, fmt.Sprintf("error getting pvcs from namespace [%s]", namespace)) for _, p := range allPvcList.Items { pvclist = append(pvclist, p.Spec.VolumeName) } @@ -4586,8 +4587,8 @@ var _ = Describe("{CreateAndValidatePVCWithIopsAndBandwidth}", func() { log.InfoD("waiting for a minute for volume name to populate") time.Sleep(1 * time.Minute) //collect volumes names which are required to find out the volumes in FA and FB backend - listofFadaPvc = GetVolumeNameFromPvc(FadaAppNameSpace, listofFadaPvc) - listofFbdaPvc = GetVolumeNameFromPvc(FbdaAppNameSpace, listofFbdaPvc) + listofFadaPvc = GetVolumeNameFromPvc(FadaAppNameSpace) + listofFbdaPvc = GetVolumeNameFromPvc(FbdaAppNameSpace) stepLog = "check if the FA and FB volumes are created in the backend" Step(stepLog, func() { @@ -6534,3 +6535,137 @@ var _ = Describe("{CheckCloudDrivesinFA}", func() { defer EndTorpedoTest() }) }) + +var _ = Describe("{EnableTrashCanDeleteVol}", func() { + /* + https://portworx.testrail.net/index.php?/cases/view/298122 + https://purestorage.atlassian.net/browse/PTX-25046 + + 1. Deploy Applications + 2. Enable TrashCan for the cluster + 4. Destroy the applications + 5. Check if the volumes are not placed in trashcan and deleted from the FA + 6. Disable TrashCan for the node + + */ + JustBeforeEach(func() { + StartTorpedoTest("EnableTrashCanDeleteVol", "Enable TrashCan and Delete Volume this shouldn't delete volumes in the backend", nil, 298122) + }) + + var contexts []*scheduler.Context + itLog := "Enable TrashCan and Delete Volume" + It(itLog, func() { + // getPureVolName translates the volume name into its equivalent in the pure backend + cluster, err := Inst().V.InspectCurrentCluster() + log.FailOnError(err, "failed to inspect current cluster") + log.Infof("Current cluster [%s] UID: [%s]", cluster.Cluster.Name, cluster.Cluster.Id) + + clusterUIDPrefix := strings.Split(cluster.Cluster.Id, "-")[0] + + getPureVolName := func(volName string) string { + return "px_" + clusterUIDPrefix + "-" + volName + } + + //GetVolumeNameFromPvc will collect volume name from pvc which indirect will be the px volume name and this name is suffix to the volumes created in FA backend + GetVolumeNameFromPvc := func(namespace string) []string { + pvclist := make([]string, 0) + allPvcList, err := core.Instance().GetPersistentVolumeClaims(namespace, nil) + log.FailOnError(err, fmt.Sprintf("error getting pvcs from namespace [%s]", namespace)) + for _, p := range allPvcList.Items { + pvclist = append(pvclist, p.Spec.VolumeName) + } + return pvclist + } + defer func() { + stepLog := "Disable trashcan" + Step(stepLog, func() { + log.InfoD(stepLog) + //Disable trashcan in the node + err := Inst().V.SetClusterOptsWithConfirmation(node.GetStorageNodes()[0], map[string]string{ + "--volume-expiration-minutes": "0", + }) + log.FailOnError(err, "error while disabling trashcan") + log.InfoD("Trashcan is successfully disabled") + }) + }() + log.InfoD(itLog) + + stepLog = "Enable trashcan" + Step(stepLog, func() { + log.InfoD(stepLog) + //Enable trashcan in the node + err = Inst().V.SetClusterOptsWithConfirmation(node.GetStorageNodes()[0], map[string]string{ + "--volume-expiration-minutes": "600", + }) + log.FailOnError(err, "error while enabling trashcan") + log.InfoD("Trashcan is successfully enabled") + + }) + + stepLog = "Schedule Applications" + Step(stepLog, func() { + log.InfoD(stepLog) + for i := 0; i < Inst().GlobalScaleFactor; i++ { + ns := fmt.Sprintf("enabletrashcan-%d", i) + contexts = append(contexts, ScheduleApplications(ns)...) + } + }) + ValidateApplications(contexts) + + // before destroying the apps, get the volume list + var volumeList []string + for _, ctx := range contexts { + vol := GetVolumeNameFromPvc(ctx.App.NameSpace) + volumeList = append(volumeList, vol...) + } + + stepLog = "Destroy apps and let it's volumes should not be placed in trashcan" + Step(stepLog, func() { + log.InfoD(stepLog) + DestroyApps(contexts, nil) + }) + + stepLog = "Check if the volumes are placed in trashcan and not deleted from the FA" + Step(stepLog, func() { + log.InfoD(stepLog) + //Get pure secrets + volDriverNamespace, err := Inst().V.GetVolumeDriverNamespace() + log.FailOnError(err, "failed to get volume driver [%s] namespace", Inst().V.String()) + + pxPureSecret, err := pureutils.GetPXPureSecret(volDriverNamespace) + log.FailOnError(err, "Failed to get secret %v", pxPureSecret) + flashArraysInSecret := pxPureSecret.Arrays + flashBladesInSecret := pxPureSecret.Blades + + //Check if the volumes are placed in trashcan + volumeNamesInTrashCan, err := Inst().V.GetTrashCanVolumeNames(node.GetStorageDriverNodes()[0]) + log.FailOnError(err, "Failed to get volume ids in trashcan") + + for _, vol := range volumeNamesInTrashCan { + if len(vol) == 0 { + continue + } + for _, volName := range volumeList { + if strings.Contains(volName, vol) { + log.FailOnError(fmt.Errorf("Volume [%v] is placed in trashcan", volName), "Volume is placed in trashcan") + } + } + } + + for _, volName := range volumeList { + pureVolName := getPureVolName(volName) + + exists, err := CheckIfVolumeExistsInFBorFA(flashBladesInSecret, flashArraysInSecret, pureVolName) + log.FailOnError(err, "Failed to check if volume exists in FB or FA") + dash.VerifyFatal(exists, false, fmt.Sprintf("Volume [%v] Present in backend?", pureVolName)) + } + + }) + + }) + + JustAfterEach(func() { + defer EndTorpedoTest() + AfterEachTest(contexts) + }) +}) diff --git a/tests/basic/storage_pool_test.go b/tests/basic/storage_pool_test.go index 3d0133cbd..1bf1bc36e 100644 --- a/tests/basic/storage_pool_test.go +++ b/tests/basic/storage_pool_test.go @@ -8662,6 +8662,12 @@ var _ = Describe("{DriveAddAsJournal}", func() { It(stepLog, func() { log.InfoD(stepLog) + isDmthin, err := IsDMthin() + log.FailOnError(err, "Error while checking cluster type") + if isDmthin { + Skip("Drive add Journal Device is not supported for DMThin") + } + contexts = make([]*scheduler.Context, 0) for i := 0; i < Inst().GlobalScaleFactor; i++ { contexts = append(contexts, ScheduleApplications(fmt.Sprintf("adddriveasjournal-%d", i))...) @@ -11754,4 +11760,4 @@ var _ = Describe("{PoolDeleteMultiplePools}", func() { AfterEachTest(contexts) }) -}) \ No newline at end of file +}) diff --git a/tests/common.go b/tests/common.go index accd1c858..c80a9c878 100644 --- a/tests/common.go +++ b/tests/common.go @@ -13387,6 +13387,45 @@ func CheckVolumesExistinFB(flashBlades []pureutils.FlashBladeEntry, listofFbdaPv } return nil } + +// CheckIfVolumeExistsInFBorFA Checks if volume exists in FB or FA +func CheckIfVolumeExistsInFBorFA(flashBlades []pureutils.FlashBladeEntry, flashArrays []pureutils.FlashArrayEntry, volume string) (bool, error) { + if IsPureCluster() { + log.InfoD("checking if volume:%v exists in FB or FA", volume) + for _, fb := range flashBlades { + fbClient, err := pureutils.PureCreateFbClientAndConnect(fb.MgmtEndPoint, fb.APIToken) + if err != nil { + return false, err + } + FsFullName, nameErr := pureutils.GetFilesystemFullName(fbClient, volume) + log.FailOnError(nameErr, fmt.Sprintf("Failed to get volume name for volume [%v] on FB [%v]", volume, fb.MgmtEndPoint)) + if FsFullName != "" { + log.Infof("Volume [%v] exists on FB [%v]", volume, fb.MgmtEndPoint) + return true, nil + } else if err != nil && FsFullName == "" { + return false, err + } + + } + for _, fa := range flashArrays { + faClient, err := pureutils.PureCreateClientAndConnect(fa.MgmtEndPoint, fa.APIToken) + if err != nil { + return false, err + } + volName, err := GetVolumeCompleteNameOnFA(faClient, volume) + if err != nil { + return false, err + } + if volName != "" { + log.Infof("Volume [%v] exists on FA [%v]", volName, fa.MgmtEndPoint) + return true, err + } + + } + } + return false, nil +} + func CheckIopsandBandwidthinFA(flashArrays []pureutils.FlashArrayEntry, listofFadaPvc []string, reqBandwidth uint64, reqIops uint64) error { pvcFadaMap := make(map[string]bool) for _, volumeName := range listofFadaPvc { @@ -13686,4 +13725,4 @@ func GetVolumeNamefromPVC(namespace string) ([]string, error) { return pvclist, nil } return nil, fmt.Errorf("No PVCs found in namespace [%s]", namespace) -} \ No newline at end of file +}