diff --git a/tests/backup/backup_basic_test.go b/tests/backup/backup_basic_test.go index de5a4415c..2827dca5a 100644 --- a/tests/backup/backup_basic_test.go +++ b/tests/backup/backup_basic_test.go @@ -48,9 +48,12 @@ func getGlobalBucketName(provider string) string { } func getGlobalLockedBucketName(provider string) string { - if provider == drivers.ProviderAws { + switch provider { + case drivers.ProviderAws: return GlobalAWSLockedBucketName - } else { + case drivers.ProviderAzure: + return GlobalAzureLockedBucketName + default: log.Errorf("environment variable [%s] not provided with valid values", "PROVIDERS") return "" } diff --git a/tests/backup/backup_delete_test.go b/tests/backup/backup_delete_test.go index b8499cbc7..545733387 100644 --- a/tests/backup/backup_delete_test.go +++ b/tests/backup/backup_delete_test.go @@ -580,7 +580,7 @@ var _ = Describe("{DeleteBucketVerifyCloudBackupMissing}", Label(TestCaseLabelsM err = DestroyAppsWithData(scheduledAppContexts, opts, controlChannel, errorGroup) log.FailOnError(err, "Data validations failed") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } log.Infof("Deleting backup schedule policy") diff --git a/tests/backup/backup_dr_test.go b/tests/backup/backup_dr_test.go index 23a704fab..c4467b910 100644 --- a/tests/backup/backup_dr_test.go +++ b/tests/backup/backup_dr_test.go @@ -338,7 +338,7 @@ var _ = Describe("{BackupAndRestoreSyncDR}", Label(TestCaseLabelsMap[BackupAndRe dash.VerifySafely(err, nil, fmt.Sprintf("Deleting restore [%s]", restoreName)) } for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting schedule [%s]", scheduleName)) } for migrationName, migrationNamespace := range migrationNamespaceMap { diff --git a/tests/backup/backup_kdmp_test.go b/tests/backup/backup_kdmp_test.go index c2bb1a72a..da7e96c4a 100644 --- a/tests/backup/backup_kdmp_test.go +++ b/tests/backup/backup_kdmp_test.go @@ -740,7 +740,7 @@ var _ = Describe("{ExcludeDirectoryFileBackup}", Label(TestCaseLabelsMap[Exclude } for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting schedule [%s]", scheduleName)) } @@ -1392,7 +1392,7 @@ var _ = Describe("{ExcludeInvalidDirectoryFileBackup}", Label(TestCaseLabelsMap[ } for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting schedule [%s]", scheduleName)) } diff --git a/tests/backup/backup_kubevirt_test.go b/tests/backup/backup_kubevirt_test.go index 421a121e5..c1e9c3362 100644 --- a/tests/backup/backup_kubevirt_test.go +++ b/tests/backup/backup_kubevirt_test.go @@ -2506,7 +2506,7 @@ var _ = Describe("{DefaultBackupRestoreWithKubevirtAndNonKubevirtNS}", Label(Tes go func(scheduleName string) { defer GinkgoRecover() defer wg.Done() - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedules [%s]", scheduleName)) if err != nil { mutex.Lock() @@ -2811,7 +2811,7 @@ var _ = Describe("{KubevirtScheduledVMDelete}", Label(TestCaseLabelsMap[Kubevirt opts := make(map[string]bool) opts[SkipClusterScopedObjects] = true for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } log.Infof("Deleting backup schedule policy") @@ -3254,7 +3254,7 @@ var _ = Describe("{CustomBackupRestoreWithKubevirtAndNonKubevirtNS}", Label(Test go func(scheduleName string) { defer GinkgoRecover() defer wg.Done() - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedules [%s]", scheduleName)) if err != nil { mutex.Lock() diff --git a/tests/backup/backup_locked_bucket_test.go b/tests/backup/backup_locked_bucket_test.go index 4722d772b..9e7d65a8b 100644 --- a/tests/backup/backup_locked_bucket_test.go +++ b/tests/backup/backup_locked_bucket_test.go @@ -2,6 +2,8 @@ package tests import ( "fmt" + "github.com/portworx/torpedo/drivers" + "reflect" "strings" "sync" "time" @@ -25,11 +27,12 @@ import ( // This testcase verifies alternating backups between locked and unlocked bucket var _ = Describe("{BackupAlternatingBetweenLockedAndUnlockedBuckets}", Label(TestCaseLabelsMap[BackupAlternatingBetweenLockedAndUnlockedBuckets]...), func() { var ( - appList = Inst().AppList - credName string - restoreNames []string - controlChannel chan string - errorGroup *errgroup.Group + appList = Inst().AppList + credName string + restoreNames []string + controlChannel chan string + errorGroup *errgroup.Group + cloudCredentials map[string]string ) var preRuleNameList []string var postRuleNameList []string @@ -44,7 +47,7 @@ var _ = Describe("{BackupAlternatingBetweenLockedAndUnlockedBuckets}", Label(Tes var clusterStatus api.ClusterInfo_StatusInfo_Status bkpNamespaces = make([]string, 0) JustBeforeEach(func() { - StartPxBackupTorpedoTest("BackupAlternatingBetweenLockedAndUnlockedBuckets", "Deploying backup", nil, 60018, Kshithijiyer, Q4FY23) + StartPxBackupTorpedoTest("BackupAlternatingBetweenLockedAndUnlockedBuckets", "Alternate backup between locked and unlocked buckets", nil, 60018, Kshithijiyer, Q4FY23) log.InfoD("Verifying if the pre/post rules for the required apps are present in the list or not") for i := 0; i < len(appList); i++ { if Contains(PostRuleApp, appList[i]) { @@ -101,27 +104,54 @@ var _ = Describe("{BackupAlternatingBetweenLockedAndUnlockedBuckets}", Label(Tes ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, provider := range providers { - credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) - CloudCredUID = uuid.New() - CloudCredUIDMap[CloudCredUID] = credName - err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) - dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + if provider == drivers.ProviderAws { + credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) + CloudCredUID = uuid.New() + CloudCredUIDMap[CloudCredUID] = credName + err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + } else if provider == drivers.ProviderAzure { + cloudCredentials, err = CreateAzureCredentialsForImmutableBackupLocations(ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credentials")) + credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) + CloudCredUID = uuid.New() + CloudCredUIDMap[CloudCredUID] = credName + err = CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + + } } }) Step("Creating a locked bucket and backup location", func() { log.InfoD("Creating locked buckets and backup location") - modes := [2]string{"GOVERNANCE", "COMPLIANCE"} for _, provider := range providers { - for _, mode := range modes { - bucketName := fmt.Sprintf("%s-%s-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) - backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) - err := CreateS3Bucket(bucketName, true, 3, mode) - log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) - BackupLocationUID = uuid.New() - err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) - dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) - BackupLocationMap[BackupLocationUID] = backupLocation + bucketMap, err := CreateLockedBucket(provider, 3, false) + dash.VerifyFatal(err, nil, "Check if locked buckets are created or not") + if drivers.ProviderAws == provider { + for mode, bucketName := range bucketMap { + backupLocation = fmt.Sprintf("%s-%s-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) + log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) + BackupLocationUID = uuid.New() + err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + BackupLocationMap[BackupLocationUID] = backupLocation + } + } else if drivers.ProviderAzure == provider { + modes := reflect.ValueOf(bucketMap).MapKeys() + credentials := reflect.ValueOf(cloudCredentials).MapKeys() + dash.VerifyFatal(len(modes), len(credentials), "Checking if length of creds and modes are same or not") + for i := 0; i < len(modes); i++ { + mode := modes[i].String() + bucketName := bucketMap[mode] + lockedCredName := credentials[i].String() + lockedCredUid := cloudCredentials[lockedCredName] + backupLocation = fmt.Sprintf("%s%v", getGlobalLockedBucketName(provider), time.Now().Unix()) + BackupLocationUID = uuid.New() + err = CreateAzureBackupLocation(backupLocation, BackupLocationUID, lockedCredName, lockedCredUid, bucketName, BackupOrgID, true, true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + BackupLocationMap[BackupLocationUID] = backupLocation + } } } log.InfoD("Successfully created locked buckets and backup location") @@ -209,10 +239,8 @@ var _ = Describe("{BackupAlternatingBetweenLockedAndUnlockedBuckets}", Label(Tes log.FailOnError(err, "Fetching px-central-admin ctx") log.Infof("Deleting registered clusters for admin context") - err = DeleteCluster(SourceClusterName, BackupOrgID, ctx, true) + err = DeleteCluster(SourceClusterName, BackupOrgID, ctx, false) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting cluster %s", SourceClusterName)) - err = DeleteCluster(DestinationClusterName, BackupOrgID, ctx, true) - dash.VerifySafely(err, nil, fmt.Sprintf("Deleting cluster %s", DestinationClusterName)) }) }) @@ -235,6 +263,7 @@ var _ = Describe("{LockedBucketResizeOnRestoredVolume}", Label(TestCaseLabelsMap restoreNames []string controlChannel chan string errorGroup *errgroup.Group + cloudCredentials map[string]string ) labelSelectors := make(map[string]string) CloudCredUIDMap := make(map[string]string) @@ -306,27 +335,49 @@ var _ = Describe("{LockedBucketResizeOnRestoredVolume}", Label(TestCaseLabelsMap ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, provider := range providers { - credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) - CloudCredUID = uuid.New() - CloudCredUIDMap[CloudCredUID] = credName - err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) - dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", credName, BackupOrgID, provider)) + if provider == drivers.ProviderAws { + credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) + CloudCredUID = uuid.New() + CloudCredUIDMap[CloudCredUID] = credName + err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + } else if provider == drivers.ProviderAzure { + cloudCredentials, err = CreateAzureCredentialsForImmutableBackupLocations(ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credentials")) + + } } }) Step("Creating a locked bucket and backup location", func() { log.InfoD("Creating locked buckets and backup location") - modes := [2]string{"GOVERNANCE", "COMPLIANCE"} for _, provider := range providers { - for _, mode := range modes { - bucketName := fmt.Sprintf("%s-%v", getGlobalLockedBucketName(provider), time.Now().Unix()) - backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) - err := CreateS3Bucket(bucketName, true, 3, mode) - log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) - BackupLocationUID = uuid.New() - err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) - dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) - BackupLocationMap[BackupLocationUID] = backupLocation + bucketMap, err := CreateLockedBucket(provider, 3, false) + dash.VerifyFatal(err, nil, "Check if locked buckets are created or not") + if drivers.ProviderAws == provider { + for mode, bucketName := range bucketMap { + backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) + log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) + BackupLocationUID = uuid.New() + err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + BackupLocationMap[BackupLocationUID] = backupLocation + } + } else if drivers.ProviderAzure == provider { + modes := reflect.ValueOf(bucketMap).MapKeys() + credentials := reflect.ValueOf(cloudCredentials).MapKeys() + dash.VerifyFatal(len(modes), len(credentials), "Checking if length of creds and modes are same or not") + for i := 0; i < len(modes); i++ { + mode := modes[i].String() + bucketName := bucketMap[mode] + lockedCredName := credentials[i].String() + lockedCredUid := cloudCredentials[lockedCredName] + backupLocation = fmt.Sprintf("%s%v", getGlobalLockedBucketName(provider), time.Now().Unix()) + BackupLocationUID = uuid.New() + err = CreateAzureBackupLocation(backupLocation, BackupLocationUID, lockedCredName, lockedCredUid, bucketName, BackupOrgID, true, true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + BackupLocationMap[BackupLocationUID] = backupLocation + } } } log.InfoD("Successfully created locked buckets and backup location") @@ -461,7 +512,12 @@ var _ = Describe("{LockedBucketResizeOnRestoredVolume}", Label(TestCaseLabelsMap log.InfoD("Deleting backup location, cloud creds and clusters") ctx, err = backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") - CleanupCloudSettingsAndClusters(BackupLocationMap, credName, CloudCredUID, ctx) + + log.Infof("Deleting registered clusters for admin context") + err = DeleteCluster(SourceClusterName, BackupOrgID, ctx, false) + dash.VerifySafely(err, nil, fmt.Sprintf("Deleting cluster %s", SourceClusterName)) + err = DeleteCluster(DestinationClusterName, BackupOrgID, ctx, false) + dash.VerifySafely(err, nil, fmt.Sprintf("Deleting cluster %s", DestinationClusterName)) }) }) @@ -473,7 +529,6 @@ var _ = Describe("{LockedBucketResizeVolumeOnScheduleBackup}", Label(TestCaseLab periodicSchedulePolicyName string periodicSchedulePolicyUid string scheduleName string - cloudCredUID string backupLocation string appList = Inst().AppList scheduledAppContexts []*scheduler.Context @@ -486,6 +541,7 @@ var _ = Describe("{LockedBucketResizeVolumeOnScheduleBackup}", Label(TestCaseLab podList []v1.Pod controlChannel chan string errorGroup *errgroup.Group + cloudCredentials map[string]string ) labelSelectors := make(map[string]string) cloudCredUIDMap := make(map[string]string) @@ -495,7 +551,6 @@ var _ = Describe("{LockedBucketResizeVolumeOnScheduleBackup}", Label(TestCaseLab AppContextsMapping := make(map[string]*scheduler.Context) volListBeforeSizeMap := make(map[string]int) volListAfterSizeMap := make(map[string]int) - modes := [2]string{"GOVERNANCE", "COMPLIANCE"} JustBeforeEach(func() { StartPxBackupTorpedoTest("LockedBucketResizeVolumeOnScheduleBackup", "Verify schedule backups are successful while volume resize is in progress for locked bucket", nil, 59899, Apimpalgaonkar, Q1FY24) log.InfoD("Verifying if the pre/post rules for the required apps are present in the list or not") @@ -550,25 +605,47 @@ var _ = Describe("{LockedBucketResizeVolumeOnScheduleBackup}", Label(TestCaseLab ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Unable to px-central-admin ctx") for _, provider := range providers { - credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) - cloudCredUID = uuid.New() - cloudCredUIDMap[cloudCredUID] = credName - err = CreateCloudCredential(provider, credName, cloudCredUID, BackupOrgID, ctx) - dash.VerifyFatal(err, nil, fmt.Sprintf("Creating cloud credentials %v", credName)) + if provider == drivers.ProviderAws { + credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) + CloudCredUID = uuid.New() + cloudCredUIDMap[CloudCredUID] = credName + err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + } else if provider == drivers.ProviderAzure { + cloudCredentials, err = CreateAzureCredentialsForImmutableBackupLocations(ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credentials")) + } } }) Step("Creating a locked bucket and backup location", func() { log.InfoD("Creating a locked bucket and backup location") for _, provider := range providers { - for _, mode := range modes { - bucketName := fmt.Sprintf("%s-%v", getGlobalLockedBucketName(provider), time.Now().Unix()) - backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) - err := CreateS3Bucket(bucketName, true, 3, mode) - dash.VerifyFatal(err, nil, fmt.Sprintf("Creating locked s3 bucket %s", bucketName)) - BackupLocationUID = uuid.New() - backupLocationMap[BackupLocationUID] = backupLocation - err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, cloudCredUID, bucketName, BackupOrgID, "", true) - dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + bucketMap, err := CreateLockedBucket(provider, 3, false) + dash.VerifyFatal(err, nil, "Check if locked buckets are created or not") + if drivers.ProviderAws == provider { + for mode, bucketName := range bucketMap { + backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) + log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) + BackupLocationUID = uuid.New() + err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + backupLocationMap[BackupLocationUID] = backupLocation + } + } else if drivers.ProviderAzure == provider { + modes := reflect.ValueOf(bucketMap).MapKeys() + credentials := reflect.ValueOf(cloudCredentials).MapKeys() + dash.VerifyFatal(len(modes), len(credentials), "Checking if length of creds and modes are same or not") + for i := 0; i < len(modes); i++ { + mode := modes[i].String() + bucketName := bucketMap[mode] + lockedCredName := credentials[i].String() + lockedCredUid := cloudCredentials[lockedCredName] + backupLocation = fmt.Sprintf("%s%v", getGlobalLockedBucketName(provider), time.Now().Unix()) + BackupLocationUID = uuid.New() + err = CreateAzureBackupLocation(backupLocation, BackupLocationUID, lockedCredName, lockedCredUid, bucketName, BackupOrgID, true, true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + backupLocationMap[BackupLocationUID] = backupLocation + } } } }) @@ -714,7 +791,7 @@ var _ = Describe("{LockedBucketResizeVolumeOnScheduleBackup}", Label(TestCaseLab ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Unable to px-central-admin ctx") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, false) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchedulePolicyName}) @@ -723,10 +800,16 @@ var _ = Describe("{LockedBucketResizeVolumeOnScheduleBackup}", Label(TestCaseLab opts[SkipClusterScopedObjects] = true err = DestroyAppsWithData(scheduledAppContexts, opts, controlChannel, errorGroup) log.FailOnError(err, "Data validations failed") - CleanupCloudSettingsAndClusters(backupLocationMap, credName, cloudCredUID, ctx) + + log.Infof("Deleting registered clusters for admin context") + err = DeleteCluster(SourceClusterName, BackupOrgID, ctx, false) + dash.VerifySafely(err, nil, fmt.Sprintf("Deleting cluster %s", SourceClusterName)) + err = DeleteCluster(DestinationClusterName, BackupOrgID, ctx, false) + dash.VerifySafely(err, nil, fmt.Sprintf("Deleting cluster %s", DestinationClusterName)) }) }) +// TODO: Split this testcase according to the new architecture and mark it automated // DeleteLockedBucketUserObjectsFromAdmin delete backups, backup schedules, restore and cluster objects created with locked bucket from the admin var _ = Describe("{DeleteLockedBucketUserObjectsFromAdmin}", Label(TestCaseLabelsMap[DeleteLockedBucketUserObjectsFromAdmin]...), func() { var ( @@ -786,19 +869,17 @@ var _ = Describe("{DeleteLockedBucketUserObjectsFromAdmin}", Label(TestCaseLabel log.InfoD(fmt.Sprintf("Creating cloud credential and locked bucket backup location from the user %s", user)) nonAdminCtx, err := backup.GetNonAdminCtx(user, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", user) - modes := [2]string{"GOVERNANCE", "COMPLIANCE"} for _, provider := range providers { userCloudCredentialName := fmt.Sprintf("autogenerated-cred-%v", time.Now().Unix()) userCloudCredentialUID := uuid.New() err = CreateCloudCredential(provider, userCloudCredentialName, userCloudCredentialUID, BackupOrgID, nonAdminCtx) log.FailOnError(err, "failed to create cloud credential %s using provider %s for the user", userCloudCredentialName, provider) userCloudCredentialMap[user] = map[string]string{userCloudCredentialUID: userCloudCredentialName} - for _, mode := range modes { + bucketMap, err := CreateLockedBucket(provider, 3, false) + dash.VerifyFatal(err, nil, "Check if locked buckets are created or not") + for _, lockedBucketName := range bucketMap { userBackupLocationName := fmt.Sprintf("autogenerated-backup-location-%v", time.Now().Unix()) userBackupLocationUID := uuid.New() - lockedBucketName := fmt.Sprintf("%s-%s-%s-locked", provider, getGlobalLockedBucketName(provider), strings.ToLower(mode)) - err := CreateS3Bucket(lockedBucketName, true, 3, mode) - log.FailOnError(err, "failed to create locked s3 bucket %s", lockedBucketName) err = CreateBackupLocationWithContext(provider, userBackupLocationName, userBackupLocationUID, userCloudCredentialName, userCloudCredentialUID, lockedBucketName, BackupOrgID, "", nonAdminCtx, true) log.FailOnError(err, "failed to create locked bucket backup location %s using provider %s for the user", userBackupLocationName, provider) userBackupLocationMap[user] = map[string]string{userBackupLocationUID: userBackupLocationName} @@ -1109,6 +1190,7 @@ var _ = Describe("{BackupToLockedBucketWithSharedObjects}", Label(TestCaseLabels clusterUid string scheduleList []string clusterStatus api.ClusterInfo_StatusInfo_Status + cloudCredentials map[string]string labelSelectors = make(map[string]string) CloudCredUIDMap = make(map[string]string) BackupLocationMap = make(map[string]string) @@ -1191,30 +1273,49 @@ var _ = Describe("{BackupToLockedBucketWithSharedObjects}", Label(TestCaseLabels ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, provider := range providers { - credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) - CloudCredUID = uuid.New() - CloudCredUIDMap[CloudCredUID] = credName - err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) - dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + if provider == drivers.ProviderAws { + credName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) + CloudCredUID = uuid.New() + CloudCredUIDMap[CloudCredUID] = credName + err := CreateCloudCredential(provider, credName, CloudCredUID, BackupOrgID, ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", CredName, BackupOrgID, provider)) + } else if provider == drivers.ProviderAzure { + cloudCredentials, err = CreateAzureCredentialsForImmutableBackupLocations(ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credentials")) + + } } }) Step("Creating a locked bucket and backup location", func() { log.InfoD("Creating locked buckets and backup location") - modes := [2]string{"GOVERNANCE", "COMPLIANCE"} for _, provider := range providers { - for _, mode := range modes { - - bucketName := fmt.Sprintf("%s-%s-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) - backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) - err := CreateS3Bucket(bucketName, true, 3, mode) - log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) - - BackupLocationUID = uuid.New() - err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) - dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) - - BackupLocationMap[BackupLocationUID] = backupLocation + bucketMap, err := CreateLockedBucket(provider, 3, false) + dash.VerifyFatal(err, nil, "Check if locked buckets are created or not") + if drivers.ProviderAws == provider { + for mode, bucketName := range bucketMap { + backupLocation = fmt.Sprintf("%s-%s-lock-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) + log.FailOnError(err, "Unable to create locked s3 bucket %s", bucketName) + BackupLocationUID = uuid.New() + err = CreateBackupLocation(provider, backupLocation, BackupLocationUID, credName, CloudCredUID, bucketName, BackupOrgID, "", true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + BackupLocationMap[BackupLocationUID] = backupLocation + } + } else if drivers.ProviderAzure == provider { + modes := reflect.ValueOf(bucketMap).MapKeys() + credentials := reflect.ValueOf(cloudCredentials).MapKeys() + dash.VerifyFatal(len(modes), len(credentials), "Checking if length of creds and modes are same or not") + for i := 0; i < len(modes); i++ { + mode := modes[i].String() + bucketName := bucketMap[mode] + lockedCredName := credentials[i].String() + lockedCredUid := cloudCredentials[lockedCredName] + backupLocation = fmt.Sprintf("%s%v", getGlobalLockedBucketName(provider), time.Now().Unix()) + BackupLocationUID = uuid.New() + err = CreateAzureBackupLocation(backupLocation, BackupLocationUID, lockedCredName, lockedCredUid, bucketName, BackupOrgID, true, true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", backupLocation)) + BackupLocationMap[BackupLocationUID] = backupLocation + } } } log.InfoD("Successfully created locked buckets and backup location") diff --git a/tests/backup/backup_multiple_provisioner_test.go b/tests/backup/backup_multiple_provisioner_test.go index adcd4f896..2e401e5ad 100644 --- a/tests/backup/backup_multiple_provisioner_test.go +++ b/tests/backup/backup_multiple_provisioner_test.go @@ -616,7 +616,7 @@ var _ = Describe("{MultipleProvisionerCsiKdmpBackupAndRestore}", Label(TestCaseL ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } diff --git a/tests/backup/backup_namespace_labelled_test.go b/tests/backup/backup_namespace_labelled_test.go index 9ef4defe3..b026cd271 100644 --- a/tests/backup/backup_namespace_labelled_test.go +++ b/tests/backup/backup_namespace_labelled_test.go @@ -419,7 +419,7 @@ var _ = Describe("{BackupScheduleForOldAndNewNS}", Label(TestCaseLabelsMap[Backu defer EndPxBackupTorpedoTest(contexts) ctx, err := backup.GetAdminCtxFromSecret() dash.VerifySafely(err, nil, "Fetching px-central-admin ctx") - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchPolicyName}) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedule policies %s ", []string{periodicSchPolicyName})) @@ -616,7 +616,7 @@ var _ = Describe("{ManualAndScheduledBackupUsingNamespaceAndResourceLabel}", Lab defer EndPxBackupTorpedoTest(scheduledAppContexts) ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Unable to fetch px-central-admin ctx") - err = DeleteSchedule(scheduleBackupName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleBackupName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleBackupName)) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchPolicyName}) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedule policies %s ", []string{periodicSchPolicyName})) @@ -848,7 +848,7 @@ var _ = Describe("{ScheduleBackupWithAdditionAndRemovalOfNS}", Label(TestCaseLab defer EndPxBackupTorpedoTest(scheduledAppContexts) ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) time.Sleep(1 * time.Minute) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchPolicyName}) @@ -1216,7 +1216,7 @@ var _ = Describe("{ManualAndScheduleBackupUsingNSLabelWithMaxCharLimit}", Label( ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchPolicyName}) @@ -1417,7 +1417,7 @@ var _ = Describe("{NamespaceLabelledBackupOfEmptyNamespace}", Label(TestCaseLabe log.InfoD("Deleting the restores taken") err = DeleteRestore(restoreWithNamespaces, BackupOrgID, ctx) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting restore %s", restoreWithNamespaces)) - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchedulePolicyName}) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedule policies %s ", []string{periodicSchedulePolicyName})) diff --git a/tests/backup/backup_nfs_test.go b/tests/backup/backup_nfs_test.go index 12c5e25bb..026e5ee7f 100644 --- a/tests/backup/backup_nfs_test.go +++ b/tests/backup/backup_nfs_test.go @@ -206,7 +206,7 @@ var _ = Describe("{DeleteNfsExecutorPodWhileBackupAndRestoreInProgress}", Label( err = DeleteRestore(restoreName, BackupOrgID, ctx) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting restore %s", restoreName)) } - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{schedulePolicyName}) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedule policies %s ", []string{schedulePolicyName})) @@ -664,7 +664,7 @@ var _ = Describe("{CloudSnapshotMissingValidationForNFSLocation}", Label(TestCas go func(schedule string) { defer GinkgoRecover() defer wg.Done() - err = DeleteSchedule(schedule, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(schedule, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s]", schedule)) }(schedule) } diff --git a/tests/backup/backup_partial_test.go b/tests/backup/backup_partial_test.go index d79c5e683..a5f255eb0 100644 --- a/tests/backup/backup_partial_test.go +++ b/tests/backup/backup_partial_test.go @@ -240,7 +240,7 @@ var _ = Describe("{BackupCSIVolumesWithPartialSuccess}", Label(TestCaseLabelsMap log.FailOnError(err, "Fetching px-central-admin ctx") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } @@ -712,7 +712,7 @@ var _ = Describe("{PartialBackupSuccessWithPxAndKDMPVolumes}", Label(TestCaseLab ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{schedulePolicyName}) @@ -966,7 +966,7 @@ var _ = Describe("{BackupStateTransitionForScheduledBackups}", Label(TestCaseLab DestroyApps(scheduledAppContexts, opts) log.InfoD("Deleting all the backup schedules") for _, schBkpName := range scheduledNames { - err = DeleteSchedule(schBkpName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(schBkpName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verifying deletion of backup schedule [%s]", schBkpName)) } log.InfoD("Deleting all the backups") @@ -1199,7 +1199,7 @@ var _ = Describe("{PartialBackupWithLowerStorkVersion}", Label(TestCaseLabelsMap ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } // Cleanup all backups diff --git a/tests/backup/backup_portworx_test.go b/tests/backup/backup_portworx_test.go index e6f918779..1694956ce 100644 --- a/tests/backup/backup_portworx_test.go +++ b/tests/backup/backup_portworx_test.go @@ -943,7 +943,7 @@ var _ = Describe("{ResizeVolumeOnScheduleBackup}", Label(TestCaseLabelsMap[Resiz ctx, err := backup.GetAdminCtxFromSecret() dash.VerifySafely(err, nil, "Fetching px-central-admin ctx") for i := 0; i < len(scheduleNames); i++ { - err = DeleteSchedule(scheduleNames[i], SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleNames[i], SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verifying deletion of schedule named [%s] and schedule policies [%v]", scheduleNames[i], periodicSchedulePolicyNames[i])) } log.InfoD("Deleting created restores") diff --git a/tests/backup/backup_psa_test.go b/tests/backup/backup_psa_test.go index f1699df08..b80ab844d 100644 --- a/tests/backup/backup_psa_test.go +++ b/tests/backup/backup_psa_test.go @@ -1543,7 +1543,7 @@ var _ = Describe("{PsaTakeBackupInLowerPrivilegeRestoreInHigherPrivilege}", Labe log.Info("Deleting schedules") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting schedule [%s]", scheduleName)) } err = DeleteBackupSchedulePolicyWithContext(BackupOrgID, []string{periodicSchedulePolicyName}, ctx) @@ -1971,7 +1971,7 @@ var _ = Describe("{PSALowerPrivilegeToHigherPrivilegeWithProjectMapping}", Label DestroyApps(scheduledAppContexts, opts) log.Info("Deleting schedules") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting schedule [%s]", scheduleName)) } log.Infof("Deleting pre & post exec rules") diff --git a/tests/backup/backup_rbac_test.go b/tests/backup/backup_rbac_test.go index 93a87866a..94200fe62 100644 --- a/tests/backup/backup_rbac_test.go +++ b/tests/backup/backup_rbac_test.go @@ -260,7 +260,7 @@ var _ = Describe("{VerifyRBACForInfraAdmin}", Label(TestCaseLabelsMap[VerifyRBAC log.InfoD(fmt.Sprintf("Delete Infra Admin %s backup schedule ", infraAdminUser)) nonAdminCtx, err := backup.GetNonAdminCtx(infraAdminUser, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", infraAdminUser) - err = DeleteSchedule(backupScheduleWithLabel, SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(backupScheduleWithLabel, SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", backupScheduleWithLabel, infraAdminUser)) }) @@ -421,7 +421,7 @@ var _ = Describe("{VerifyRBACForInfraAdmin}", Label(TestCaseLabelsMap[VerifyRBAC log.InfoD(fmt.Sprintf("Delete user %s backup schedule ", user)) nonAdminCtx, err := backup.GetNonAdminCtx(user, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", user) - err = DeleteSchedule(scheduleNameMap[user], SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(scheduleNameMap[user], SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", scheduleNameMap[user], user)) }) Step(fmt.Sprintf("Delete user %s source and destination cluster from the user context", user), func() { @@ -639,7 +639,7 @@ var _ = Describe("{VerifyRBACForInfraAdmin}", Label(TestCaseLabelsMap[VerifyRBAC log.InfoD(fmt.Sprintf("Delete custom user %s backup schedule ", customUser)) nonAdminCtx, err := backup.GetNonAdminCtx(customUser, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", customUser) - err = DeleteSchedule(backupScheduleWithLabel, SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(backupScheduleWithLabel, SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", backupScheduleWithLabel, customUser)) }) @@ -733,7 +733,7 @@ var _ = Describe("{VerifyRBACForInfraAdmin}", Label(TestCaseLabelsMap[VerifyRBAC currentSchedulesForCustomUser, err := backupDriver.EnumerateBackupSchedule(nonAdminCtx, bkpScheduleEnumerateReq) log.FailOnError(err, "Getting a list of all schedules for Custom user") for _, sch := range currentSchedulesForCustomUser.GetBackupSchedules() { - err = DeleteSchedule(sch.Name, sch.Cluster, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(sch.Name, sch.Cluster, BackupOrgID, nonAdminCtx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", sch.Name, customUser)) } nonAdminCtx, err = backup.GetNonAdminCtx(infraAdminUser, CommonPassword) @@ -741,7 +741,7 @@ var _ = Describe("{VerifyRBACForInfraAdmin}", Label(TestCaseLabelsMap[VerifyRBAC currentSchedulesForInfraAdmin, err := backupDriver.EnumerateBackupSchedule(nonAdminCtx, bkpScheduleEnumerateReq) log.FailOnError(err, "Getting a list of all schedules for Infra admin") for _, sch := range currentSchedulesForInfraAdmin.GetBackupSchedules() { - err = DeleteSchedule(sch.Name, sch.Cluster, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(sch.Name, sch.Cluster, BackupOrgID, nonAdminCtx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", sch.Name, infraAdminUser)) } log.InfoD("Deleting the deployed apps after the testcase") @@ -1033,7 +1033,7 @@ var _ = Describe("{VerifyRBACForPxAdmin}", Label(TestCaseLabelsMap[VerifyRBACFor log.InfoD(fmt.Sprintf("Delete user %s backup schedule ", user)) nonAdminCtx, err := backup.GetNonAdminCtx(user, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", user) - err = DeleteSchedule(scheduleNameMap[user], SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(scheduleNameMap[user], SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", scheduleNameMap[user], user)) }) Step(fmt.Sprintf("Delete user %s source and destination cluster from the user context", user), func() { @@ -1410,7 +1410,7 @@ var _ = Describe("{VerifyRBACForAppAdmin}", Label(TestCaseLabelsMap[VerifyRBACFo log.InfoD(fmt.Sprintf("Delete user %s backup schedule ", user)) nonAdminCtx, err := backup.GetNonAdminCtx(user, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", user) - err = DeleteSchedule(scheduleNameMap[user], SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(scheduleNameMap[user], SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", scheduleNameMap[user], user)) }) Step(fmt.Sprintf("Delete user %s source and destination cluster from the user context", user), func() { @@ -1578,9 +1578,9 @@ var _ = Describe("{VerifyRBACForAppAdmin}", Label(TestCaseLabelsMap[VerifyRBACFo log.InfoD(fmt.Sprintf("Delete App-admin user %s backup schedule ", appAdminUser)) nonAdminCtx, err := backup.GetNonAdminCtx(appAdminUser, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", appAdminUser) - err = DeleteSchedule(scheduleNameMap[appAdminUser], SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(scheduleNameMap[appAdminUser], SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", scheduleNameMap[appAdminUser], appAdminUser)) - err = DeleteSchedule(backupScheduleWithLabel, SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(backupScheduleWithLabel, SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", backupScheduleWithLabel, appAdminUser)) }) @@ -1633,7 +1633,7 @@ var _ = Describe("{VerifyRBACForAppAdmin}", Label(TestCaseLabelsMap[VerifyRBACFo currentSchedules, err := backupDriver.EnumerateBackupSchedule(nonAdminCtx, bkpScheduleEnumerateReq) log.FailOnError(err, "Getting a list of all schedules") for _, sch := range currentSchedules.GetBackupSchedules() { - err = DeleteSchedule(sch.Name, SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(sch.Name, SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", sch.Name, appAdminUser)) } ctx, err := backup.GetAdminCtxFromSecret() @@ -1999,7 +1999,7 @@ var _ = Describe("{VerifyRBACForAppUser}", Label(TestCaseLabelsMap[VerifyRBACFor nonAdminCtx, err := backup.GetNonAdminCtx(appUser, CommonPassword) log.FailOnError(err, "failed to fetch user %s ctx", appUser) for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", scheduleName, appUser)) } }) @@ -2032,7 +2032,7 @@ var _ = Describe("{VerifyRBACForAppUser}", Label(TestCaseLabelsMap[VerifyRBACFor currentSchedules, err := backupDriver.EnumerateBackupSchedule(nonAdminCtx, bkpScheduleEnumerateReq) log.FailOnError(err, "Getting a list of all schedules") for _, sch := range currentSchedules.GetBackupSchedules() { - err = DeleteSchedule(sch.Name, SourceClusterName, BackupOrgID, nonAdminCtx) + err = DeleteSchedule(sch.Name, SourceClusterName, BackupOrgID, nonAdminCtx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting Backup Schedule [%s] for user [%s]", sch.Name, appUser)) } ctx, err := backup.GetAdminCtxFromSecret() diff --git a/tests/backup/backup_restore_basic_test.go b/tests/backup/backup_restore_basic_test.go index 3650bcd36..8251265bb 100644 --- a/tests/backup/backup_restore_basic_test.go +++ b/tests/backup/backup_restore_basic_test.go @@ -765,7 +765,7 @@ var _ = Describe("{ScheduleBackupCreationAllNS}", Label(TestCaseLabelsMap[Schedu log.InfoD("Clean up objects after test execution") log.Infof("Deleting backup schedules") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } log.Infof("Deleting backup schedule policy") @@ -1849,7 +1849,7 @@ var _ = Describe("{AddMultipleNamespaceLabels}", Label(TestCaseLabelsMap[AddMult defer EndPxBackupTorpedoTest(scheduledAppContexts) ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Unable to px-central-admin ctx") - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchedulePolicyName}) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedule policies %s ", []string{periodicSchedulePolicyName})) @@ -2497,7 +2497,7 @@ var _ = Describe("{SetUnsetNSLabelDuringScheduleBackup}", Label(TestCaseLabelsMa ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Unable to fetch px-central-admin ctx") log.InfoD("Deleting schedule named [%s] along with its backups [%v] and schedule policies [%v]", scheduleName, allScheduleBackupNames, []string{periodicSchedulePolicyName}) - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{periodicSchedulePolicyName}) dash.VerifySafely(err, nil, fmt.Sprintf("Deleting backup schedule policies %s ", []string{periodicSchedulePolicyName})) @@ -3192,7 +3192,7 @@ var _ = Describe("{ScheduleBackupDeleteAndRecreateNS}", Label(TestCaseLabelsMap[ defer EndPxBackupTorpedoTest(scheduledAppContexts) ctx, err := backup.GetAdminCtxFromSecret() log.FailOnError(err, "Fetching px-central-admin ctx") - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) log.Infof("Deleting backup schedule policy") err = Inst().Backup.DeleteBackupSchedulePolicy(BackupOrgID, []string{schedulePolicyName}) @@ -4126,7 +4126,7 @@ var _ = Describe("{KubeAndPxNamespacesSkipOnAllNSBackup}", Label(TestCaseLabelsM err = SetSourceKubeConfig() log.FailOnError(err, "failed to switch context to source cluster") - err = DeleteSchedule(scheduleName, DestinationClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, DestinationClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) log.Infof("Deleting backup schedule policy") diff --git a/tests/backup/backup_sanity_test.go b/tests/backup/backup_sanity_test.go index 78eff6800..261c0f66a 100644 --- a/tests/backup/backup_sanity_test.go +++ b/tests/backup/backup_sanity_test.go @@ -26,16 +26,8 @@ var _ = Describe("{BackupClusterVerification}", Label(TestCaseLabelsMap[BackupCl It("Backup Cluster Verification", func() { Step("Check the status of backup pods", func() { log.InfoD("Check the status of backup pods") - //err := ValidateAllPodsInPxBackupNamespace() - //dash.VerifyFatal(err, nil, "Backup Cluster Verification successful") - data := make(map[string]string) - data["backups"] = "backupA,backupB,backupC" - data["restores"] = "restore1,backupB,backupC" - log.InfoD("%s", data) - err := UpdateConfigmap("backupclusterverification", data) - data["kshithij"] = "restore1,backupB,backupC" - err = UpdateConfigmap("backupclusterverification", data) - log.InfoD("%s", err) + err := ValidateAllPodsInPxBackupNamespace() + dash.VerifyFatal(err, nil, "Backup Cluster Verification successful") }) }) JustAfterEach(func() { diff --git a/tests/backup/backup_share_test.go b/tests/backup/backup_share_test.go index a8bfa5d14..e61329fdc 100644 --- a/tests/backup/backup_share_test.go +++ b/tests/backup/backup_share_test.go @@ -2420,7 +2420,7 @@ var _ = Describe("{ClusterBackupShareToggle}", Label(TestCaseLabelsMap[ClusterBa //Delete Schedule Backup- log.Infof("Deleting backup schedule") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, backupClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, backupClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } log.Infof("Deleting backup schedule policy") diff --git a/tests/backup/backup_sse_test.go b/tests/backup/backup_sse_test.go index 2b087ac83..6202ba10e 100644 --- a/tests/backup/backup_sse_test.go +++ b/tests/backup/backup_sse_test.go @@ -522,7 +522,7 @@ var _ = Describe("{CreateBackupAndRestoreForAllCombinationsOfSSES3AndDenyPolicy} } // Delete backup schedule log.Infof("Deleting backup schedule") - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) CleanupCloudSettingsAndClusters(backupLocationMap, credName, cloudCredUID, ctx) // Delete custom buckets diff --git a/tests/backup/backup_stork_test.go b/tests/backup/backup_stork_test.go index b5dcd6f4d..c2094204d 100644 --- a/tests/backup/backup_stork_test.go +++ b/tests/backup/backup_stork_test.go @@ -355,7 +355,7 @@ var _ = Describe("{BackupAndRestoreWithNonExistingAdminNamespaceAndUpdatedResume opts[SkipClusterScopedObjects] = true log.Infof("Deleting backup schedule policy") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } log.InfoD("Deleting deployed applications") diff --git a/tests/backup/backup_upgrade_test.go b/tests/backup/backup_upgrade_test.go index 0bf34e222..8b3d18c02 100644 --- a/tests/backup/backup_upgrade_test.go +++ b/tests/backup/backup_upgrade_test.go @@ -208,7 +208,7 @@ var _ = Describe("{StorkUpgradeWithBackup}", Label(TestCaseLabelsMap[StorkUpgrad log.InfoD("Clean up objects after test execution") log.Infof("Deleting backup schedule") for _, scheduleName := range scheduleNames { - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verification of deleting backup schedule - %s", scheduleName)) } log.Infof("Deleting backup schedule policy") @@ -1014,15 +1014,15 @@ var _ = Describe("{PXBackupEndToEndBackupAndRestoreWithUpgrade}", Label(TestCase log.FailOnError(err, "Fetching px-central-admin ctx") deleteSingleNSScheduleTask := func(scheduleName string) { log.InfoD("Deleting single namespace backup schedule [%s]", scheduleName) - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verifying deletion of backup schedule [%s]", scheduleName)) } _ = TaskHandler(singleNSScheduleNames, deleteSingleNSScheduleTask, Parallel) log.InfoD("Deleting all namespaces backup schedule [%s]", allNSScheduleName) - err = DeleteSchedule(allNSScheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(allNSScheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verifying deletion of backup schedule [%s]", allNSScheduleName)) log.InfoD("Deleting partial backup schedule [%s]", partialScheduledBackupName) - err = DeleteSchedule(partialScheduledBackupName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(partialScheduledBackupName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verifying deletion of backup schedule [%s]", partialScheduledBackupName)) log.InfoD("Deleting pre exec rules %s", preRuleNames) for _, preRuleName := range preRuleNames { @@ -1516,7 +1516,7 @@ var _ = Describe("{PXBackupClusterUpgradeTest}", Label(TestCaseLabelsMap[PXBacku log.FailOnError(err, "Fetching px-central-admin ctx") deleteSingleNSScheduleTask := func(scheduleName string) { log.InfoD("Deleting single namespace backup schedule [%s]", scheduleName) - err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx) + err = DeleteSchedule(scheduleName, SourceClusterName, BackupOrgID, ctx, true) dash.VerifySafely(err, nil, fmt.Sprintf("Verifying deletion of backup schedule [%s]", scheduleName)) } _ = TaskHandler(singleNSScheduleNames, deleteSingleNSScheduleTask, Parallel) diff --git a/tests/backuptestTriggers.go b/tests/backuptestTriggers.go index 48235453a..1bd720b24 100644 --- a/tests/backuptestTriggers.go +++ b/tests/backuptestTriggers.go @@ -496,10 +496,14 @@ func getGlobalBucketName(provider string) string { return bucketName } +// getGlobalLockedBucketName Returns a global locked bucket string func getGlobalLockedBucketName(provider string) string { - if provider == drivers.ProviderAws { + switch provider { + case drivers.ProviderAws: return GlobalAWSLockedBucketName - } else { + case drivers.ProviderAzure: + return GlobalAzureLockedBucketName + default: log.Errorf("environment variable [%s] not provided with valid values", "PROVIDERS") return "" } diff --git a/tests/common.go b/tests/common.go index 69155d9f1..066856053 100644 --- a/tests/common.go +++ b/tests/common.go @@ -4898,7 +4898,7 @@ func DeleteBackupLocationWithContext(name string, backupLocationUID string, orgI } // DeleteSchedule deletes backup schedule -func DeleteSchedule(backupScheduleName string, clusterName string, orgID string, ctx context1.Context) error { +func DeleteSchedule(backupScheduleName string, clusterName string, orgID string, ctx context1.Context, deleteBackups bool) error { backupDriver := Inst().Backup backupScheduleInspectRequest := &api.BackupScheduleInspectRequest{ Name: backupScheduleName, @@ -4915,7 +4915,7 @@ func DeleteSchedule(backupScheduleName string, clusterName string, orgID string, Name: backupScheduleName, // DeleteBackups indicates whether the cloud backup files need to // be deleted or retained. - DeleteBackups: true, + DeleteBackups: deleteBackups, Uid: backupScheduleUID, } _, err = backupDriver.DeleteBackupSchedule(ctx, bkpScheduleDeleteRequest) @@ -5394,7 +5394,7 @@ func CreateBackupLocation(provider, name, uid, credName, credUID, bucketName, or case drivers.ProviderAws: err = CreateS3BackupLocation(name, uid, credName, credUID, bucketName, orgID, encryptionKey, validate) case drivers.ProviderAzure: - err = CreateAzureBackupLocation(name, uid, credName, credUID, bucketName, orgID, validate) + err = CreateAzureBackupLocation(name, uid, credName, credUID, bucketName, orgID, validate, false) case drivers.ProviderGke: err = CreateGCPBackupLocation(name, uid, credName, credUID, bucketName, orgID, validate) case drivers.ProviderNfs: @@ -5642,6 +5642,40 @@ func CreateCloudCredential(provider, credName string, uid, orgID string, ctx con return nil } +// CreateAzureCredentialsForImmutableBackupLocations creates azure cloud credentials for immutable backup locations +func CreateAzureCredentialsForImmutableBackupLocations(ctx context1.Context, softDelete bool) (map[string]string, error) { + tenantID, clientID, clientSecret, subscriptionID, _, _ := GetAzureCredsFromEnv() + _, containerLevelSA, containerLevelSAKey, storageAccountLevelSA, storageAccountLevelSAKey, softDeleteAccountLevelSA, softAccountLevelSAKey := GetAzureImmutabilityCredsFromEnv() + + // Creating a key Value par of StorageAccount and its keys + storageAccountKeys := map[string]string{ + storageAccountLevelSA: storageAccountLevelSAKey, + containerLevelSA: containerLevelSAKey, + } + if softDelete { + storageAccountKeys[softDeleteAccountLevelSA] = softAccountLevelSAKey + } + storageAccountWithKey := make(map[string]string) + for storageAccount, storageAccountKey := range storageAccountKeys { + credUidWithAllFields := uuid.New() + azureConfigFields := &api.AzureConfig{ + TenantId: tenantID, + ClientId: clientID, + ClientSecret: clientSecret, + AccountName: storageAccount, + AccountKey: storageAccountKey, + SubscriptionId: subscriptionID, + } + azureCredNameWithAllFields := fmt.Sprintf("azure-cloud-cred-%s-%s", storageAccount, RandomString(10)) + err := CreateAzureCloudCredential(azureCredNameWithAllFields, credUidWithAllFields, BackupOrgID, azureConfigFields, ctx) + if err != nil { + return nil, err + } + storageAccountWithKey[azureCredNameWithAllFields] = credUidWithAllFields + } + return storageAccountWithKey, nil +} + // CreateAzureCloudCredential creates azure cloud credentials func CreateAzureCloudCredential(credName string, uid, orgID string, azConfig *api.AzureConfig, ctx context1.Context) error { log.Infof("Create azure cloud credential with name [%s] for org [%s]", credName, orgID) @@ -5824,7 +5858,8 @@ func UpdateS3BackupLocation(name string, uid string, orgID string, cloudCred str } // CreateAzureBackupLocation creates backup location for Azure -func CreateAzureBackupLocation(name string, uid string, cloudCred string, cloudCredUID string, bucketName string, orgID string, validate bool) error { +func CreateAzureBackupLocation(name string, uid string, cloudCred string, cloudCredUID string, bucketName string, orgID string, validate bool, immutability bool) error { + var bLocationCreateReq *api.BackupLocationCreateRequest backupDriver := Inst().Backup encryptionKey := "torpedo" azureRegion := os.Getenv("AZURE_ENDPOINT") @@ -5832,29 +5867,58 @@ func CreateAzureBackupLocation(name string, uid string, cloudCred string, cloudC if azureRegion == "CHINA" { environmentType = api.S3Config_AzureEnvironmentType_AZURE_CHINA } - bLocationCreateReq := &api.BackupLocationCreateRequest{ - CreateMetadata: &api.CreateMetadata{ - Name: name, - OrgId: orgID, - Uid: uid, - }, - BackupLocation: &api.BackupLocationInfo{ - Path: bucketName, - EncryptionKey: encryptionKey, - ValidateCloudCredential: validate, - CloudCredentialRef: &api.ObjectRef{ - Name: cloudCred, - Uid: cloudCredUID, + if immutability { + resourceGroup := os.Getenv("AZURE_RESOURCE_GROUP") + bLocationCreateReq = &api.BackupLocationCreateRequest{ + CreateMetadata: &api.CreateMetadata{ + Name: name, + OrgId: orgID, + Uid: uid, }, - Type: api.BackupLocationInfo_Azure, - Config: &api.BackupLocationInfo_S3Config{ - S3Config: &api.S3Config{ - AzureEnvironment: &api.S3Config_AzureEnvironmentType{ - Type: environmentType, + BackupLocation: &api.BackupLocationInfo{ + Path: bucketName, + EncryptionKey: encryptionKey, + ValidateCloudCredential: validate, + CloudCredentialRef: &api.ObjectRef{ + Name: cloudCred, + Uid: cloudCredUID, + }, + Type: api.BackupLocationInfo_Azure, + Config: &api.BackupLocationInfo_S3Config{ + S3Config: &api.S3Config{ + AzureEnvironment: &api.S3Config_AzureEnvironmentType{ + Type: environmentType, + }, + AzureResourceGroupName: resourceGroup, }, }, }, - }, + } + } else { + bLocationCreateReq = &api.BackupLocationCreateRequest{ + CreateMetadata: &api.CreateMetadata{ + Name: name, + OrgId: orgID, + Uid: uid, + }, + BackupLocation: &api.BackupLocationInfo{ + Path: bucketName, + EncryptionKey: encryptionKey, + ValidateCloudCredential: validate, + CloudCredentialRef: &api.ObjectRef{ + Name: cloudCred, + Uid: cloudCredUID, + }, + Type: api.BackupLocationInfo_Azure, + Config: &api.BackupLocationInfo_S3Config{ + S3Config: &api.S3Config{ + AzureEnvironment: &api.S3Config_AzureEnvironmentType{ + Type: environmentType, + }, + }, + }, + }, + } } ctx, err := backup.GetAdminCtxFromSecret() if err != nil { @@ -6981,6 +7045,36 @@ func CreateBucket(provider string, bucketName string) { }) } +// CreateLockedBucket creates buckets with all the different modes for locked s3 bucket or immutable azure bucket +func CreateLockedBucket(provider string, retentionPeriod int, softDelete bool) (map[string]string, error) { + log.Info(fmt.Sprintf("Creating multiple locked buckets with different modes on %s", provider)) + bucketMap := make(map[string]string) + switch provider { + case drivers.ProviderAws: + modes := [2]string{"GOVERNANCE", "COMPLIANCE"} + for _, mode := range modes { + lockedBucketName := fmt.Sprintf("%s-%s-%v", getGlobalLockedBucketName(provider), strings.ToLower(mode), time.Now().Unix()) + CreateS3Bucket(lockedBucketName, true, int64(retentionPeriod), mode) + bucketMap[mode] = lockedBucketName + } + return bucketMap, nil + case drivers.ProviderAzure: + modes := [2]Mode{SA_level, Container_level} + for _, mode := range modes { + lockedBucketName := fmt.Sprintf("%s%v", getGlobalLockedBucketName(provider), time.Now().Unix()) + CreateAzureBucket(lockedBucketName, true, mode, retentionPeriod, false) + bucketMap[string(mode)] = lockedBucketName + } + if softDelete { + lockedBucketName := fmt.Sprintf("%ssoft-%v", getGlobalLockedBucketName(provider), time.Now().Unix()) + CreateAzureBucket(lockedBucketName, true, SA_level, retentionPeriod, true) + bucketMap[string(SA_level)+"_soft"] = lockedBucketName + } + return bucketMap, nil + } + return nil, fmt.Errorf("function does not support %s provider", provider) +} + // IsBackupLocationEmpty returns true if the bucket for a provider is empty func IsBackupLocationEmpty(provider, bucketName string) (bool, error) { switch provider { @@ -7292,9 +7386,9 @@ const ( ) // CreateAzureBucket creates bucket in Azure -func CreateAzureBucket(bucketName string, immutability bool, mode Mode, retentionDays int, safeMode bool) { +func CreateAzureBucket(bucketName string, immutability bool, mode Mode, retentionDays int, softDeleteMode bool) { // From the Azure portal, get your Storage account blob service URL endpoint. - _, _, _, _, accountName, accountKey := GetAzureCredsFromEnv() + accountName, accountKey := "", "" azureRegion := os.Getenv("AZURE_ENDPOINT") if immutability { tenantID, clientID, clientSecret, _, _, _ := GetAzureCredsFromEnv() @@ -7327,7 +7421,7 @@ func CreateAzureBucket(bucketName string, immutability bool, mode Mode, retentio } if mode == SA_level { // Create a ContainerURL object that wraps a soon-to-be-created container's URL and a default pipeline. - if safeMode == true { + if softDeleteMode { accountName, accountKey = safeAccountLevelSA, safeAccountLevelSAKey } else { accountName, accountKey = storageAccountLevelSA, storageAccountLevelSAKey @@ -7352,6 +7446,7 @@ func CreateAzureBucket(bucketName string, immutability bool, mode Mode, retentio } } else { + _, _, _, _, accountName, accountKey := GetAzureCredsFromEnv() urlStr := fmt.Sprintf("https://%s.blob.core.windows.net/%s", accountName, bucketName) // Default value if azureRegion == "CHINA" { urlStr = fmt.Sprintf("https://%s.blob.core.chinacloudapi.cn/%s", accountName, bucketName)