diff --git a/controllers/drclusters.go b/controllers/drclusters.go index 82241a2c7..5db431194 100644 --- a/controllers/drclusters.go +++ b/controllers/drclusters.go @@ -232,7 +232,7 @@ func drClusterUndeploy( for i := range drpolicies.Items { drpolicy1 := &drpolicies.Items[i] - clusterNames = clusterNames.Insert(util.DrpolicyClusterNames(drpolicy1)...) + clusterNames = clusterNames.Insert(util.DRPolicyClusterNames(drpolicy1)...) } if clusterNames.Has(drcluster.Name) { diff --git a/controllers/drplacementcontrol.go b/controllers/drplacementcontrol.go index eab0cf011..14672d112 100644 --- a/controllers/drplacementcontrol.go +++ b/controllers/drplacementcontrol.go @@ -1307,7 +1307,7 @@ func (d *DRPCInstance) moveVRGToSecondaryEverywhere() bool { failedCount := 0 - for _, clusterName := range rmnutil.DrpolicyClusterNames(d.drPolicy) { + for _, clusterName := range rmnutil.DRPolicyClusterNames(d.drPolicy) { _, err := d.updateVRGState(clusterName, rmn.Secondary) if err != nil { if errors.IsNotFound(err) { @@ -1331,7 +1331,7 @@ func (d *DRPCInstance) moveVRGToSecondaryEverywhere() bool { } func (d *DRPCInstance) cleanupSecondaries(skipCluster string) (bool, error) { - for _, clusterName := range rmnutil.DrpolicyClusterNames(d.drPolicy) { + for _, clusterName := range rmnutil.DRPolicyClusterNames(d.drPolicy) { if skipCluster == clusterName { continue } @@ -1527,6 +1527,7 @@ func (d *DRPCInstance) generateVRG(dstCluster string, repState rmn.ReplicationSt }, Spec: rmn.VolumeReplicationGroupSpec{ PVCSelector: d.instance.Spec.PVCSelector, + ProtectedNamespaces: d.instance.Spec.ProtectedNamespaces, ReplicationState: repState, S3Profiles: AvailableS3Profiles(d.drClusters), KubeObjectProtection: d.instance.Spec.KubeObjectProtection, @@ -1571,7 +1572,7 @@ func dRPolicySupportsMetro(drpolicy *rmn.DRPolicy, drclusters []rmn.DRCluster) ( allRegionsMap := make(map[rmn.Region][]string) metroMap = make(map[rmn.Region][]string) - for _, managedCluster := range rmnutil.DrpolicyClusterNames(drpolicy) { + for _, managedCluster := range rmnutil.DRPolicyClusterNames(drpolicy) { for _, v := range drclusters { if v.Name == managedCluster { allRegionsMap[v.Spec.Region] = append( @@ -1729,7 +1730,7 @@ func (d *DRPCInstance) cleanupForVolSync(clusterToSkip string) error { peersReady := true - for _, clusterName := range rmnutil.DrpolicyClusterNames(d.drPolicy) { + for _, clusterName := range rmnutil.DRPolicyClusterNames(d.drPolicy) { if clusterToSkip == clusterName { continue } @@ -1816,7 +1817,7 @@ func (d *DRPCInstance) ensureVRGManifestWorkOnClusterDeleted(clusterName string) // a cluster if provided. It returns true if all clusters report secondary for the VRG, // otherwise, it returns false func (d *DRPCInstance) ensureVRGIsSecondaryEverywhere(clusterToSkip string) bool { - for _, clusterName := range rmnutil.DrpolicyClusterNames(d.drPolicy) { + for _, clusterName := range rmnutil.DRPolicyClusterNames(d.drPolicy) { if clusterToSkip == clusterName { continue } @@ -1874,7 +1875,7 @@ func (d *DRPCInstance) ensureVRGIsSecondaryOnCluster(clusterName string) bool { // has to be ensured. This can only be done at the other cluster which has been moved to // secondary by now. func (d *DRPCInstance) ensureDataProtected(targetCluster string) bool { - for _, clusterName := range rmnutil.DrpolicyClusterNames(d.drPolicy) { + for _, clusterName := range rmnutil.DRPolicyClusterNames(d.drPolicy) { if targetCluster == clusterName { continue } diff --git a/controllers/drplacementcontrol_controller.go b/controllers/drplacementcontrol_controller.go index 93ba95fb7..b361445ff 100644 --- a/controllers/drplacementcontrol_controller.go +++ b/controllers/drplacementcontrol_controller.go @@ -9,6 +9,8 @@ import ( "reflect" "time" + "golang.org/x/exp/slices" + "github.com/go-logr/logr" "github.com/google/uuid" ocmworkv1 "github.com/open-cluster-management/api/work/v1" @@ -745,6 +747,20 @@ func (r *DRPlacementControlReconciler) Reconcile(ctx context.Context, req ctrl.R return ctrl.Result{}, nil } + err = ensureDRPCValidNamespace(drpc, ramenConfig) + if err != nil { + r.recordFailure(ctx, drpc, placementObj, "Error", err.Error(), logger) + + return ctrl.Result{}, err + } + + err = r.ensureNoConflictingDRPCs(ctx, drpc, ramenConfig, logger) + if err != nil { + r.recordFailure(ctx, drpc, placementObj, "Error", err.Error(), logger) + + return ctrl.Result{}, err + } + drPolicy, err := r.getAndEnsureValidDRPolicy(ctx, drpc, logger) if err != nil { r.recordFailure(ctx, drpc, placementObj, "Error", err.Error(), logger) @@ -1090,7 +1106,7 @@ func (r DRPlacementControlReconciler) updateObjectMetadata(ctx context.Context, func getDRClusters(ctx context.Context, client client.Client, drPolicy *rmn.DRPolicy) ([]rmn.DRCluster, error) { drClusters := []rmn.DRCluster{} - for _, managedCluster := range rmnutil.DrpolicyClusterNames(drPolicy) { + for _, managedCluster := range rmnutil.DRPolicyClusterNames(drPolicy) { drCluster := &rmn.DRCluster{} err := client.Get(ctx, types.NamespacedName{Name: managedCluster}, drCluster) @@ -1199,7 +1215,7 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r } // delete manifestworks (VRGs) - for _, drClusterName := range rmnutil.DrpolicyClusterNames(drPolicy) { + for _, drClusterName := range rmnutil.DRPolicyClusterNames(drPolicy) { err := mwu.DeleteManifestWorksForCluster(drClusterName) if err != nil { return fmt.Errorf("%w", err) @@ -1211,7 +1227,7 @@ func (r *DRPlacementControlReconciler) finalizeDRPC(ctx context.Context, drpc *r } // delete MCVs used in the previous call - if err := r.deleteAllManagedClusterViews(drpc, rmnutil.DrpolicyClusterNames(drPolicy)); err != nil { + if err := r.deleteAllManagedClusterViews(drpc, rmnutil.DRPolicyClusterNames(drPolicy)); err != nil { return fmt.Errorf("error in deleting MCV (%w)", err) } @@ -1640,11 +1656,11 @@ func (r *DRPlacementControlReconciler) deleteClonedPlacementRule(ctx context.Con func (r *DRPlacementControlReconciler) addClusterPeersToPlacementRule( drPolicy *rmn.DRPolicy, plRule *plrv1.PlacementRule, log logr.Logger, ) error { - if len(rmnutil.DrpolicyClusterNames(drPolicy)) == 0 { + if len(rmnutil.DRPolicyClusterNames(drPolicy)) == 0 { return fmt.Errorf("DRPolicy %s is missing DR clusters", drPolicy.Name) } - for _, v := range rmnutil.DrpolicyClusterNames(drPolicy) { + for _, v := range rmnutil.DRPolicyClusterNames(drPolicy) { plRule.Spec.Clusters = append(plRule.Spec.Clusters, plrv1.GenericClusterReference{Name: v}) } @@ -2650,3 +2666,150 @@ func constructVRGFromView(viewVRG *rmn.VolumeReplicationGroup) *rmn.VolumeReplic return vrg } + +func ensureDRPCValidNamespace(drpc *rmn.DRPlacementControl, ramenConfig *rmn.RamenConfig) error { + if drpcInAdminNamespace(drpc, ramenConfig) { + if !ramenConfig.MultiNamespace.FeatureEnabled { + return fmt.Errorf("drpc cannot be in admin namespace when multinamespace feature is disabled") + } + + if drpc.Spec.ProtectedNamespaces == nil || len(*drpc.Spec.ProtectedNamespaces) == 0 { + return fmt.Errorf("drpc in admin namespace must have protected namespaces") + } + + adminNamespace := drpcAdminNamespaceName(*ramenConfig) + if slices.Contains(*drpc.Spec.ProtectedNamespaces, adminNamespace) { + return fmt.Errorf("admin namespace cannot be a protected namespace, admin namespace: %s", adminNamespace) + } + + return nil + } + + if drpc.Spec.ProtectedNamespaces != nil && len(*drpc.Spec.ProtectedNamespaces) > 0 { + adminNamespace := drpcAdminNamespaceName(*ramenConfig) + + return fmt.Errorf("drpc in non-admin namespace(%v) cannot have protected namespaces, admin-namespaces: %v", + drpc.Namespace, adminNamespace) + } + + return nil +} + +func drpcsProtectCommonNamespace(drpcProtectedNs []string, otherDRPCProtectedNs []string) bool { + for _, ns := range drpcProtectedNs { + if slices.Contains(otherDRPCProtectedNs, ns) { + return true + } + } + + return false +} + +func (r *DRPlacementControlReconciler) getProtectedNamespaces(drpc *rmn.DRPlacementControl, + log logr.Logger, +) ([]string, error) { + if drpc.Spec.ProtectedNamespaces != nil && len(*drpc.Spec.ProtectedNamespaces) > 0 { + return *drpc.Spec.ProtectedNamespaces, nil + } + + placementObj, err := getPlacementOrPlacementRule(context.TODO(), r.Client, drpc, log) + if err != nil { + return []string{}, err + } + + protectedNamespace, err := selectVRGNamespace(r.Client, log, drpc, placementObj) + if err != nil { + return []string{}, err + } + + return []string{protectedNamespace}, nil +} + +func (r *DRPlacementControlReconciler) ensureNoConflictingDRPCs(ctx context.Context, + drpc *rmn.DRPlacementControl, ramenConfig *rmn.RamenConfig, log logr.Logger, +) error { + drpcList := &rmn.DRPlacementControlList{} + if err := r.Client.List(ctx, drpcList); err != nil { + return fmt.Errorf("failed to list DRPlacementControls (%w)", err) + } + + for i := range drpcList.Items { + otherDRPC := &drpcList.Items[i] + + // Skip the drpc itself + if otherDRPC.Name == drpc.Name && otherDRPC.Namespace == drpc.Namespace { + continue + } + + if err := r.twoDRPCsConflict(ctx, drpc, otherDRPC, ramenConfig, log); err != nil { + return err + } + } + + return nil +} + +func (r *DRPlacementControlReconciler) twoDRPCsConflict(ctx context.Context, + drpc *rmn.DRPlacementControl, otherDRPC *rmn.DRPlacementControl, ramenConfig *rmn.RamenConfig, log logr.Logger, +) error { + drpcIsInAdminNamespace := drpcInAdminNamespace(drpc, ramenConfig) + otherDRPCIsInAdminNamespace := drpcInAdminNamespace(otherDRPC, ramenConfig) + + // we don't check for conflicts between drpcs in non-admin namespace + if !drpcIsInAdminNamespace && !otherDRPCIsInAdminNamespace { + return nil + } + + // If the drpcs don't have common clusters, they definitely don't conflict + common, err := r.drpcHaveCommonClusters(ctx, drpc, otherDRPC, log) + if err != nil { + return fmt.Errorf("failed to check if drpcs have common clusters (%w)", err) + } + + if !common { + return nil + } + + drpcProtectedNamespaces, err := r.getProtectedNamespaces(drpc, log) + if err != nil { + return fmt.Errorf("failed to get protected namespaces for drpc: %v, %w", drpc.Name, err) + } + + otherDRPCProtectedNamespaces, err := r.getProtectedNamespaces(otherDRPC, log) + if err != nil { + return fmt.Errorf("failed to get protected namespaces for drpc: %v, %w", otherDRPC.Name, err) + } + + conflict := drpcsProtectCommonNamespace(drpcProtectedNamespaces, otherDRPCProtectedNamespaces) + if conflict { + return fmt.Errorf("drpc: %s and drpc: %s protect the same namespace", + drpc.Name, otherDRPC.Name) + } + + return nil +} + +func drpcInAdminNamespace(drpc *rmn.DRPlacementControl, ramenConfig *rmn.RamenConfig) bool { + adminNamespace := drpcAdminNamespaceName(*ramenConfig) + + return adminNamespace == drpc.Namespace +} + +func (r *DRPlacementControlReconciler) drpcHaveCommonClusters(ctx context.Context, + drpc, otherDRPC *rmn.DRPlacementControl, log logr.Logger, +) (bool, error) { + drpolicy, err := r.getDRPolicy(ctx, drpc, log) + if err != nil { + return false, fmt.Errorf("failed to get DRPolicy %w", err) + } + + otherDrpolicy, err := r.getDRPolicy(ctx, otherDRPC, log) + if err != nil { + return false, fmt.Errorf("failed to get DRPolicy %w", err) + } + + drpolicyClusters := rmnutil.DRPolicyClusterNamesAsASet(drpolicy) + otherDrpolicyClusters := rmnutil.DRPolicyClusterNamesAsASet(otherDrpolicy) + + return drpolicyClusters.Intersection(otherDrpolicyClusters).Len() > 0, nil +} diff --git a/controllers/drplacementcontrolvolsync.go b/controllers/drplacementcontrolvolsync.go index 9ef2c3e59..507d95a8a 100644 --- a/controllers/drplacementcontrolvolsync.go +++ b/controllers/drplacementcontrolvolsync.go @@ -44,7 +44,7 @@ func (d *DRPCInstance) ensureVolSyncReplicationCommon(srcCluster string) error { // Make sure we have Source and Destination VRGs - Source should already have been created at this point d.setProgression(rmn.ProgressionEnsuringVolSyncSetup) - vrgMWCount := d.mwu.GetVRGManifestWorkCount(rmnutil.DrpolicyClusterNames(d.drPolicy)) + vrgMWCount := d.mwu.GetVRGManifestWorkCount(rmnutil.DRPolicyClusterNames(d.drPolicy)) const maxNumberOfVRGs = 2 if len(d.vrgs) != maxNumberOfVRGs || vrgMWCount != maxNumberOfVRGs { @@ -291,7 +291,7 @@ func (d *DRPCInstance) createVolSyncDestManifestWork(clusterToSkip string) error "Last State:", d.getLastDRState(), "homeCluster", clusterToSkip) // Create or update ManifestWork for all the peers - for _, dstCluster := range rmnutil.DrpolicyClusterNames(d.drPolicy) { + for _, dstCluster := range rmnutil.DRPolicyClusterNames(d.drPolicy) { if dstCluster == clusterToSkip { // skip source cluster continue diff --git a/controllers/drpolicy.go b/controllers/drpolicy.go index c8deccadc..d6e8697a9 100644 --- a/controllers/drpolicy.go +++ b/controllers/drpolicy.go @@ -25,7 +25,7 @@ func propagateS3Secret( drClustersMutex.Lock() defer drClustersMutex.Unlock() - for _, clusterName := range util.DrpolicyClusterNames(drpolicy) { + for _, clusterName := range util.DRPolicyClusterNames(drpolicy) { if err := drClusterSecretsDeploy(clusterName, drpolicy, drclusters, secretsUtil, hubOperatorRamenConfig, log); err != nil { return err @@ -107,7 +107,7 @@ func drClustersUndeploySecrets( mustHaveS3Secrets := map[string]sets.String{} // Determine S3 secrets that must continue to exist per cluster in the policy being deleted - for _, clusterName := range util.DrpolicyClusterNames(drpolicy) { + for _, clusterName := range util.DRPolicyClusterNames(drpolicy) { mustHaveS3Secrets[clusterName] = drClusterListMustHaveSecrets(drpolicies, drclusters, clusterName, drpolicy, ramenConfig) } @@ -175,7 +175,7 @@ func drClusterListMustHaveS3Profiles(drpolicies rmn.DRPolicyList, continue } - for _, cluster := range util.DrpolicyClusterNames(&drpolicies.Items[idx]) { + for _, cluster := range util.DRPolicyClusterNames(&drpolicies.Items[idx]) { // Skip if not the current cluster if cluster != clusterName { continue @@ -199,7 +199,7 @@ func drPolicySecretNames(drpolicy *rmn.DRPolicy, var err error - for _, managedCluster := range util.DrpolicyClusterNames(drpolicy) { + for _, managedCluster := range util.DRPolicyClusterNames(drpolicy) { mcProfileFound := false s3ProfileName := "" diff --git a/controllers/drpolicy_controller.go b/controllers/drpolicy_controller.go index fa05a8c0e..be03659f9 100644 --- a/controllers/drpolicy_controller.go +++ b/controllers/drpolicy_controller.go @@ -276,9 +276,9 @@ func hasConflictingDRPolicy(match *ramen.DRPolicy, drclusters *ramen.DRClusterLi } func haveOverlappingMetroZones(d1 *ramen.DRPolicy, d2 *ramen.DRPolicy, drclusters *ramen.DRClusterList) bool { - d1ClusterNames := sets.NewString(util.DrpolicyClusterNames(d1)...) + d1ClusterNames := sets.NewString(util.DRPolicyClusterNames(d1)...) d1SupportsMetro, d1MetroRegions := dRPolicySupportsMetro(d1, drclusters.Items) - d2ClusterNames := sets.NewString(util.DrpolicyClusterNames(d2)...) + d2ClusterNames := sets.NewString(util.DRPolicyClusterNames(d2)...) d2SupportsMetro, d2MetroRegions := dRPolicySupportsMetro(d2, drclusters.Items) commonClusters := d1ClusterNames.Intersection(d2ClusterNames) diff --git a/controllers/kubeobjects/velero/requests.go b/controllers/kubeobjects/velero/requests.go index a43c7e19b..3283488e1 100644 --- a/controllers/kubeobjects/velero/requests.go +++ b/controllers/kubeobjects/velero/requests.go @@ -370,7 +370,8 @@ func getBackupSpecFromObjectsSpec(objectsSpec kubeobjects.Spec) velero.BackupSpe IncludedNamespaces: objectsSpec.IncludedNamespaces, IncludedResources: objectsSpec.IncludedResources, // exclude VRs from Backup so VRG can create them: see https://github.com/RamenDR/ramen/issues/884 - ExcludedResources: append(objectsSpec.ExcludedResources, "volumereplications.replication.storage.openshift.io"), + ExcludedResources: append(objectsSpec.ExcludedResources, "volumereplications.replication.storage.openshift.io", + "replicationsources.volsync.backube", "replicationdestinations.volsync.backube"), LabelSelector: objectsSpec.LabelSelector, OrLabelSelectors: objectsSpec.OrLabelSelectors, TTL: metav1.Duration{}, // TODO: set default here diff --git a/controllers/util/drpolicy_util.go b/controllers/util/drpolicy_util.go index 021138a0b..cc062ccd4 100644 --- a/controllers/util/drpolicy_util.go +++ b/controllers/util/drpolicy_util.go @@ -18,14 +18,18 @@ import ( rmn "github.com/ramendr/ramen/api/v1alpha1" ) -func DrpolicyClusterNames(drpolicy *rmn.DRPolicy) []string { +func DRPolicyClusterNames(drpolicy *rmn.DRPolicy) []string { return drpolicy.Spec.DRClusters } +func DRPolicyClusterNamesAsASet(drpolicy *rmn.DRPolicy) sets.String { + return sets.NewString(DRPolicyClusterNames(drpolicy)...) +} + func DrpolicyRegionNames(drpolicy *rmn.DRPolicy, drClusters []rmn.DRCluster) []string { - regionNames := make([]string, len(DrpolicyClusterNames(drpolicy))) + regionNames := make([]string, len(DRPolicyClusterNames(drpolicy))) - for i, v := range DrpolicyClusterNames(drpolicy) { + for i, v := range DRPolicyClusterNames(drpolicy) { regionName := "" for _, drCluster := range drClusters { @@ -71,7 +75,7 @@ func GetAllDRPolicies(ctx context.Context, client client.Reader) (rmn.DRPolicyLi func DRPolicyS3Profiles(drpolicy *rmn.DRPolicy, drclusters []rmn.DRCluster) sets.String { mustHaveS3Profiles := sets.String{} - for _, managedCluster := range DrpolicyClusterNames(drpolicy) { + for _, managedCluster := range DRPolicyClusterNames(drpolicy) { s3ProfileName := "" for i := range drclusters { diff --git a/controllers/volumereplicationgroup_controller.go b/controllers/volumereplicationgroup_controller.go index 4acdbbed1..c27be224b 100644 --- a/controllers/volumereplicationgroup_controller.go +++ b/controllers/volumereplicationgroup_controller.go @@ -69,7 +69,7 @@ func (r *VolumeReplicationGroupReconciler) SetupWithManager( &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) objectToReconcileRequestsMapper := objectToReconcileRequestsMapper{reader: r.Client, log: ctrl.Log} - builder := ctrl.NewControllerManagedBy(mgr). + ctrlBuilder := ctrl.NewControllerManagedBy(mgr). WithOptions(ctrlcontroller.Options{ MaxConcurrentReconciles: getMaxConcurrentReconciles(r.Log), RateLimiter: rateLimiter, @@ -87,12 +87,16 @@ func (r *VolumeReplicationGroupReconciler) SetupWithManager( handler.EnqueueRequestsFromMapFunc(r.pvcMapFunc), builder.WithPredicates(pvcPredicateFunc()), ). + Watches(&volrep.VolumeReplication{}, + handler.EnqueueRequestsFromMapFunc(r.VRMapFunc), + builder.WithPredicates(rmnutil.CreateOrDeleteOrResourceVersionUpdatePredicate{}), + ). Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.configMapFun)). Owns(&volrep.VolumeReplication{}) if !ramenConfig.VolSync.Disabled { - builder.Owns(&volsyncv1alpha1.ReplicationDestination{}). - Owns(&volsyncv1alpha1.ReplicationSource{}) + r.Log.Info("VolSync enabled; adding owns and watches") + ctrlBuilder = r.addVolsyncOwnsAndWatches(ctrlBuilder) } else { r.Log.Info("VolSync disabled; don't own volsync resources") } @@ -100,13 +104,13 @@ func (r *VolumeReplicationGroupReconciler) SetupWithManager( r.kubeObjects = velero.RequestsManager{} if !ramenConfig.KubeObjectProtection.Disabled { r.Log.Info("Kube object protection enabled; watch kube objects requests") - recipesWatch(builder, objectToReconcileRequestsMapper) - kubeObjectsRequestsWatch(builder, r.Scheme, r.kubeObjects) + recipesWatch(ctrlBuilder, objectToReconcileRequestsMapper) + kubeObjectsRequestsWatch(ctrlBuilder, r.Scheme, r.kubeObjects) } else { r.Log.Info("Kube object protection disabled; don't watch kube objects requests") } - return builder.Complete(r) + return ctrlBuilder.Complete(r) } type objectToReconcileRequestsMapper struct { @@ -496,6 +500,7 @@ func (v *VRGInstance) requeue() { v.result.Requeue = true } +// nolint: cyclop func (v *VRGInstance) processVRG() ctrl.Result { if err := v.validateVRGState(); err != nil { // No requeue, as there is no reconcile till user changes desired spec to a valid value @@ -509,6 +514,13 @@ func (v *VRGInstance) processVRG() ctrl.Result { return v.invalid(err, "VolumeReplicationGroup mode is invalid", false) } + if v.instance.Spec.ProtectedNamespaces != nil && len(*v.instance.Spec.ProtectedNamespaces) > 0 { + if v.instance.Namespace != RamenOperandsNamespace(*v.ramenConfig) { + return v.invalid(fmt.Errorf("VolumeReplicationGroup is not allowed to protect namespaces"), + "VolumeReplicationGroup is not in the admin namespace", false) + } + } + if err := RecipeElementsGet( v.ctx, v.reconciler.Client, *v.instance, *v.ramenConfig, v.log, &v.recipeElements, ); err != nil { @@ -1284,3 +1296,101 @@ func removeString(values []string, s string) []string { return result } + +func vrgInAdminNamespace(vrg *ramendrv1alpha1.VolumeReplicationGroup, ramenConfig *ramendrv1alpha1.RamenConfig) bool { + vrgAdminNamespaceNames := vrgAdminNamespaceNames(*ramenConfig) + + return slices.Contains(vrgAdminNamespaceNames, vrg.Namespace) +} + +func filterVRGDependentObjects(reader client.Reader, obj client.Object, log logr.Logger) []reconcile.Request { + req := []reconcile.Request{} + + var vrgs ramendrv1alpha1.VolumeReplicationGroupList + + err := reader.List(context.TODO(), &vrgs) + if err != nil { + log.Error(err, "Failed to get list of VolumeReplicationGroup resources") + + return req + } + + for _, vrg := range vrgs.Items { + log1 := log.WithValues("vrg", vrg.Name) + + if vrg.Spec.ProtectedNamespaces == nil || len(*vrg.Spec.ProtectedNamespaces) == 0 { + continue + } + + if slices.Contains(*vrg.Spec.ProtectedNamespaces, obj.GetNamespace()) { + log1.Info("Found VolumeReplicationGroup with matching namespace", + "vrg", vrg.Name, "namespace", obj.GetNamespace()) + + req = append(req, reconcile.Request{NamespacedName: types.NamespacedName{ + Name: vrg.Name, + Namespace: vrg.Namespace, + }}) + + break + } + } + + return req +} + +func (r *VolumeReplicationGroupReconciler) VRMapFunc(ctx context.Context, obj client.Object) []reconcile.Request { + log := ctrl.Log.WithName("vrmap").WithName("VolumeReplicationGroup") + + vr, ok := obj.(*volrep.VolumeReplication) + if !ok { + log.Info("map function received non-vr resource") + + return []reconcile.Request{} + } + + return filterVRGDependentObjects(r.Client, obj, + log.WithValues("vr", types.NamespacedName{Name: vr.Name, Namespace: vr.Namespace})) +} + +func (r *VolumeReplicationGroupReconciler) RDMapFunc(ctx context.Context, obj client.Object) []reconcile.Request { + log := ctrl.Log.WithName("rdmap").WithName("VolumeReplicationGroup") + + rd, ok := obj.(*volsyncv1alpha1.ReplicationDestination) + if !ok { + log.Info("map function received not a replication destination resource") + + return []reconcile.Request{} + } + + return filterVRGDependentObjects(r.Client, obj, + log.WithValues("rd", types.NamespacedName{Name: rd.Name, Namespace: rd.Namespace})) +} + +func (r *VolumeReplicationGroupReconciler) RSMapFunc(ctx context.Context, obj client.Object) []reconcile.Request { + log := ctrl.Log.WithName("rsmap").WithName("VolumeReplicationGroup") + + rs, ok := obj.(*volsyncv1alpha1.ReplicationSource) + if !ok { + log.Info("map function received not a replication source resource") + + return []reconcile.Request{} + } + + return filterVRGDependentObjects(r.Client, obj, + log.WithValues("rs", types.NamespacedName{Name: rs.Name, Namespace: rs.Namespace})) +} + +func (r *VolumeReplicationGroupReconciler) addVolsyncOwnsAndWatches(ctrlBuilder *builder.Builder) *builder.Builder { + ctrlBuilder.Owns(&volsyncv1alpha1.ReplicationDestination{}). + Owns(&volsyncv1alpha1.ReplicationSource{}). + Watches(&volsyncv1alpha1.ReplicationDestination{}, + handler.EnqueueRequestsFromMapFunc(r.RDMapFunc), + builder.WithPredicates(rmnutil.CreateOrDeleteOrResourceVersionUpdatePredicate{}), + ). + Watches(&volsyncv1alpha1.ReplicationSource{}, + handler.EnqueueRequestsFromMapFunc(r.RSMapFunc), + builder.WithPredicates(rmnutil.CreateOrDeleteOrResourceVersionUpdatePredicate{}), + ) + + return ctrlBuilder +} diff --git a/controllers/vrg_pvc_selector.go b/controllers/vrg_pvc_selector.go index ed4d1242e..dc76dbc70 100644 --- a/controllers/vrg_pvc_selector.go +++ b/controllers/vrg_pvc_selector.go @@ -13,7 +13,14 @@ type PvcSelector struct { NamespaceNames []string } +// pvcNamespaceNamesDefault returns the default pvc namespaces for the VRG. +// If the VRG namespace is the Ramen operands namespace, then the protected namespaces are used. +// In the else cases, vrg in application namespace or the ramen operator namespace, the VRG namespace is used. func pvcNamespaceNamesDefault(vrg ramen.VolumeReplicationGroup, ramenConfig ramen.RamenConfig) []string { + if vrg.Namespace == RamenOperandsNamespace(ramenConfig) { + return *vrg.Spec.ProtectedNamespaces + } + return []string{vrg.Namespace} } diff --git a/controllers/vrg_recipe.go b/controllers/vrg_recipe.go index 3f7125656..271dcdf24 100644 --- a/controllers/vrg_recipe.go +++ b/controllers/vrg_recipe.go @@ -30,16 +30,28 @@ type RecipeElements struct { RecoverWorkflow []kubeobjects.RecoverSpec } -func captureWorkflowDefault(vrg ramen.VolumeReplicationGroup) []kubeobjects.CaptureSpec { - return []kubeobjects.CaptureSpec{ +func captureWorkflowDefault(vrg ramen.VolumeReplicationGroup, ramenConfig ramen.RamenConfig) []kubeobjects.CaptureSpec { + namespaces := []string{vrg.Namespace} + + if vrg.Namespace == RamenOperandsNamespace(ramenConfig) { + namespaces = *vrg.Spec.ProtectedNamespaces + } + + captureSpecs := []kubeobjects.CaptureSpec{ { Spec: kubeobjects.Spec{ KubeResourcesSpec: kubeobjects.KubeResourcesSpec{ - IncludedNamespaces: []string{vrg.Namespace}, + IncludedNamespaces: namespaces, }, }, }, } + + if vrg.Spec.KubeObjectProtection.KubeObjectSelector != nil { + captureSpecs[0].Spec.LabelSelector = vrg.Spec.KubeObjectProtection.KubeObjectSelector + } + + return captureSpecs } func recoverWorkflowDefault() []kubeobjects.RecoverSpec { return []kubeobjects.RecoverSpec{{}} } @@ -52,7 +64,9 @@ func GetPVCSelector(ctx context.Context, reader client.Reader, vrg ramen.VolumeR return recipeElements.PvcSelector, recipeVolumesAndOptionallyWorkflowsGet( ctx, reader, vrg, ramenConfig, log, &recipeElements, - func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup) error { return nil }, + func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup, ramen.RamenConfig) error { + return nil + }, ) } @@ -66,7 +80,7 @@ func RecipeElementsGet(ctx context.Context, reader client.Reader, vrg ramen.Volu func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.Reader, vrg ramen.VolumeReplicationGroup, ramenConfig ramen.RamenConfig, log logr.Logger, recipeElements *RecipeElements, - workflowsGet func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup) error, + workflowsGet func(recipe.Recipe, *RecipeElements, ramen.VolumeReplicationGroup, ramen.RamenConfig) error, ) error { if vrg.Spec.KubeObjectProtection == nil { *recipeElements = RecipeElements{ @@ -79,7 +93,7 @@ func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.R if vrg.Spec.KubeObjectProtection.RecipeRef == nil { *recipeElements = RecipeElements{ PvcSelector: getPVCSelector(vrg, ramenConfig, nil, nil), - CaptureWorkflow: captureWorkflowDefault(vrg), + CaptureWorkflow: captureWorkflowDefault(vrg, ramenConfig), RecoverWorkflow: recoverWorkflowDefault(), } @@ -100,11 +114,19 @@ func recipeVolumesAndOptionallyWorkflowsGet(ctx context.Context, reader client.R return err } + var selector PvcSelector + if recipe.Spec.Volumes == nil { + selector = getPVCSelector(vrg, ramenConfig, nil, nil) + } else { + selector = getPVCSelector(vrg, ramenConfig, recipe.Spec.Volumes.IncludedNamespaces, + recipe.Spec.Volumes.LabelSelector) + } + *recipeElements = RecipeElements{ PvcSelector: selector, } - if err := workflowsGet(recipe, recipeElements, vrg); err != nil { + if err := workflowsGet(recipe, recipeElements, vrg, ramenConfig); err != nil { return err } @@ -142,11 +164,13 @@ func parametersExpand(s string, parameters map[string][]string) string { }) } -func recipeWorkflowsGet(recipe recipe.Recipe, recipeElements *RecipeElements, vrg ramen.VolumeReplicationGroup) error { +func recipeWorkflowsGet(recipe recipe.Recipe, recipeElements *RecipeElements, vrg ramen.VolumeReplicationGroup, + ramenConfig ramen.RamenConfig, +) error { var err error if recipe.Spec.CaptureWorkflow == nil { - recipeElements.CaptureWorkflow = captureWorkflowDefault(vrg) + recipeElements.CaptureWorkflow = captureWorkflowDefault(vrg, ramenConfig) } else { recipeElements.CaptureWorkflow, err = getCaptureGroups(recipe) if err != nil { @@ -176,22 +200,34 @@ func recipeNamespacesValidate(recipeElements RecipeElements, vrg ramen.VolumeRep } if !ramenConfig.MultiNamespace.FeatureEnabled { - return fmt.Errorf("extra-VRG namespaces %v require feature be enabled", extraVrgNamespaceNames) + return fmt.Errorf("requested protection of other namespaces when MultiNamespace feature is disabled. %v: %v", + "other namespaces", extraVrgNamespaceNames) } - adminNamespaceNames := adminNamespaceNames() - if !slices.Contains(adminNamespaceNames, vrg.Namespace) { - return fmt.Errorf("extra-VRG namespaces %v require VRG's namespace, %v, be an admin one %v", - extraVrgNamespaceNames, + if !vrgInAdminNamespace(&vrg, &ramenConfig) { + vrgAdminNamespaceNames := vrgAdminNamespaceNames(ramenConfig) + + return fmt.Errorf("vrg namespace: %v needs to be in admin namespaces: %v to protect other namespaces: %v", vrg.Namespace, - adminNamespaceNames, + vrgAdminNamespaceNames, + extraVrgNamespaceNames, ) } - if vrg.Spec.Async != nil { - return fmt.Errorf("extra-VRG namespaces %v require VRG's async mode be disabled", extraVrgNamespaceNames) + // we know vrg is in one of the admin namespaces but if the vrg is in the ramen ops namespace + // then the every namespace in recipe should be in the protected namespace list. + if vrg.Namespace == RamenOperandsNamespace(ramenConfig) { + for _, ns := range extraVrgNamespaceNames { + if !slices.Contains(*vrg.Spec.ProtectedNamespaces, ns) { + return fmt.Errorf("recipe mentions namespace: %v which is not in protected namespaces: %v", + ns, + vrg.Spec.ProtectedNamespaces, + ) + } + } } + // vrg is in the ramen operator namespace, allow it to protect any namespace return nil } diff --git a/controllers/vrg_recipe_test.go b/controllers/vrg_recipe_test.go index 9409980da..b13f92fc9 100644 --- a/controllers/vrg_recipe_test.go +++ b/controllers/vrg_recipe_test.go @@ -193,12 +193,6 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() { }, } } - vrgAsyncModeEnable := func() { - vrg.Spec.Async = &ramen.VRGAsyncSpec{ - SchedulingInterval: vrInterval, - } - vrg.Spec.Sync = nil - } vrgRecipeRefDefine := func(name string) { vrg.Spec.KubeObjectProtection.RecipeRef = &ramen.RecipeRef{ Namespace: r.Namespace, @@ -453,22 +447,12 @@ var _ = Describe("VolumeReplicationGroupRecipe", func() { }) }) Context("with Ramen's extra-VRG namespaces feature enabled", func() { - Context("with async mode enabled", func() { - BeforeEach(func() { - vrgAsyncModeEnable() - }) - It("has an invalid PVC selector", func() { - Expect(err).To(HaveOccurred()) - }) + It("includes only them in its PVC selection", func() { + Expect(err).ToNot(HaveOccurred()) + Expect(pvcSelector.NamespaceNames).To(ConsistOf(nsNamesSlice)) }) - Context("with async mode disabled", func() { - It("includes only them in its PVC selection", func() { - Expect(err).ToNot(HaveOccurred()) - Expect(pvcSelector.NamespaceNames).To(ConsistOf(nsNamesSlice)) - }) - It("lists only their PVCs in the VRG's status", func() { - vrgPvcsConsistOfEventually(pvcsSlice...) - }) + It("lists only their PVCs in the VRG's status", func() { + vrgPvcsConsistOfEventually(pvcsSlice...) }) }) }) diff --git a/controllers/vrg_volrep.go b/controllers/vrg_volrep.go index e14922acf..55239e9d6 100644 --- a/controllers/vrg_volrep.go +++ b/controllers/vrg_volrep.go @@ -1189,6 +1189,7 @@ func (v *VRGInstance) createVR(vrNamespacedName types.NamespacedName, state volr ObjectMeta: metav1.ObjectMeta{ Name: vrNamespacedName.Name, Namespace: vrNamespacedName.Namespace, + Labels: rmnutil.OwnerLabels(v.instance), }, Spec: volrep.VolumeReplicationSpec{ DataSource: corev1.TypedLocalObjectReference{ @@ -1202,11 +1203,14 @@ func (v *VRGInstance) createVR(vrNamespacedName types.NamespacedName, state volr }, } - // Let VRG receive notification for any changes to VolumeReplication CR - // created by VRG. - if err := ctrl.SetControllerReference(v.instance, volRep, v.reconciler.Scheme); err != nil { - return fmt.Errorf("failed to set owner reference to VolumeReplication resource (%s/%s), %w", - volRep.Name, volRep.Namespace, err) + if !vrgInAdminNamespace(v.instance, v.ramenConfig) { + // This is to keep existing behavior of ramen. + // Set the owner reference only for the VRs which are in the same namespace as the VRG and + // when VRG is not in the admin namespace. + if err := ctrl.SetControllerReference(v.instance, volRep, v.reconciler.Scheme); err != nil { + return fmt.Errorf("failed to set owner reference to VolumeReplication resource (%s/%s), %w", + volRep.Name, volRep.Namespace, err) + } } v.log.Info("Creating VolumeReplication resource", "resource", volRep)