diff --git a/PROJECT b/PROJECT
index 7c02eec49..d2319b5bf 100644
--- a/PROJECT
+++ b/PROJECT
@@ -108,9 +108,12 @@ resources:
     crdVersion: v1
     namespaced: true
   controller: true
-  domain: k0rdent.mirantis.com
-  group: k0rdent.mirantis.com
-  kind: Backup
+  domain: hmc.mirantis.com
+  group: hmc.mirantis.com
+  kind: ManagementBackup
   path: github.com/K0rdent/kcm/api/v1alpha1
   version: v1alpha1
+  webhooks:
+      validation: true
+      webhookVersion: v1
 version: "3"
diff --git a/api/v1alpha1/backup_types.go b/api/v1alpha1/backup_types.go
deleted file mode 100644
index f05239a7a..000000000
--- a/api/v1alpha1/backup_types.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2024
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1alpha1
-
-import (
-	velerov1 "github.com/zerospiel/velero/pkg/apis/velero/v1"
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
-	// Name to label most of the KCM-related components.
-	// Mostly utilized by the backup feature.
-	GenericComponentLabelName = "k0rdent.mirantis.com/component"
-	// Component label value for the KCM-related components.
-	GenericComponentLabelValueKCM = "kcm"
-)
-
-// BackupSpec defines the desired state of Backup
-type BackupSpec struct {
-	// Oneshot indicates whether the Backup should not be scheduled
-	// and rather created immediately and only once.
-	Oneshot bool `json:"oneshot,omitempty"`
-}
-
-// BackupStatus defines the observed state of Backup
-type BackupStatus struct {
-	// Reference to the underlying Velero object being managed.
-	// Might be either Velero Backup or Schedule.
-	Reference *corev1.ObjectReference `json:"reference,omitempty"`
-	// Status of the Velero Schedule for the Management scheduled backups.
-	// Always absent for the Backups with the .spec.oneshot set to true.
-	Schedule *velerov1.ScheduleStatus `json:"schedule,omitempty"`
-	// NextAttempt indicates the time when the next scheduled backup will be performed.
-	// Always absent for the Backups with the .spec.oneshot set to true.
-	NextAttempt *metav1.Time `json:"nextAttempt,omitempty"`
-	// Last Velero Backup that has been created.
-	LastBackup *velerov1.BackupStatus `json:"lastBackup,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-// +kubebuilder:subresource:status
-// +kubebuilder:resource:scope=Cluster
-
-// Backup is the Schema for the backups API
-type Backup struct {
-	metav1.TypeMeta   `json:",inline"`
-	metav1.ObjectMeta `json:"metadata,omitempty"`
-
-	Spec   BackupSpec   `json:"spec,omitempty"`
-	Status BackupStatus `json:"status,omitempty"`
-}
-
-// +kubebuilder:object:root=true
-
-// BackupList contains a list of Backup
-type BackupList struct {
-	metav1.TypeMeta `json:",inline"`
-	metav1.ListMeta `json:"metadata,omitempty"`
-	Items           []Backup `json:"items"`
-}
-
-func init() {
-	SchemeBuilder.Register(&Backup{}, &BackupList{})
-}
diff --git a/api/v1alpha1/management_backup_types.go b/api/v1alpha1/management_backup_types.go
new file mode 100644
index 000000000..3ae21e24c
--- /dev/null
+++ b/api/v1alpha1/management_backup_types.go
@@ -0,0 +1,95 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+	velerov1 "github.com/zerospiel/velero/pkg/apis/velero/v1"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+	// Name to label most of the HMC-related components.
+	// Mostly utilized by the backup feature.
+	GenericComponentLabelName = "hmc.mirantis.com/component"
+	// Component label value for the HMC-related components.
+	GenericComponentLabelValueHMC = "hmc"
+)
+
+// ManagementBackupSpec defines the desired state of ManagementBackup
+type ManagementBackupSpec struct{}
+
+// ManagementBackupStatus defines the observed state of ManagementBackup
+type ManagementBackupStatus struct {
+	// Reference to the underlying Velero object being managed.
+	// Might be either Velero Backup or Schedule.
+	Reference *corev1.ObjectReference `json:"reference,omitempty"`
+	// NextAttempt indicates the time when the next scheduled backup will be performed.
+	// Always absent for the ManagementBackups with a schedule.
+	NextAttempt *metav1.Time `json:"nextAttempt,omitempty"`
+	// Last Velero Backup that has been created.
+	LastBackup *velerov1.BackupStatus `json:"lastBackup,omitempty"`
+	// Status of the Velero Schedule for the Management scheduled backups.
+	// Always absent for the ManagementBackups with a schedule.
+	Schedule *velerov1.ScheduleStatus `json:"schedule,omitempty"`
+	// SchedulePaused indicates if the Velero Schedule is paused.
+	SchedulePaused bool `json:"schedulePaused,omitempty"`
+}
+
+func (in *ManagementBackupStatus) GetLastBackupCopy() velerov1.BackupStatus {
+	if in.LastBackup == nil {
+		return velerov1.BackupStatus{}
+	}
+	return *in.LastBackup
+}
+
+func (in *ManagementBackupStatus) GetScheduleCopy() velerov1.ScheduleStatus {
+	if in.Schedule == nil {
+		return velerov1.ScheduleStatus{}
+	}
+	return *in.Schedule
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:scope=Cluster,shortName=hmcbackup;mgmtbackup
+// +kubebuilder:printcolumn:name="NextBackup",type=string,JSONPath=`.status.nextAttempt`,description="Next scheduled attempt to back up",priority=0
+// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.schedule.phase`,description="Schedule phase",priority=0
+// +kubebuilder:printcolumn:name="SinceLastBackup",type=date,JSONPath=`.status.schedule.lastBackup`,description="Time elapsed since last backup run",priority=1
+// +kubebuilder:printcolumn:name="LastBackupStatus",type=string,JSONPath=`.status.lastBackup.phase`,description="Status of last backup run",priority=0
+// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`,description="Time elapsed since object creation",priority=0
+// +kubebuilder:printcolumn:name="Paused",type=boolean,JSONPath=`.status.schedulePaused`,description="Schedule is on pause",priority=1
+
+// ManagementBackup is the Schema for the backups API
+type ManagementBackup struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	Spec   ManagementBackupSpec   `json:"spec,omitempty"`
+	Status ManagementBackupStatus `json:"status,omitempty"`
+}
+
+// +kubebuilder:object:root=true
+
+// ManagementBackupList contains a list of ManagementBackup
+type ManagementBackupList struct {
+	metav1.TypeMeta `json:",inline"`
+	metav1.ListMeta `json:"metadata,omitempty"`
+	Items           []ManagementBackup `json:"items"`
+}
+
+func init() {
+	SchemeBuilder.Register(&ManagementBackup{}, &ManagementBackupList{})
+}
diff --git a/api/v1alpha1/management_types.go b/api/v1alpha1/management_types.go
index 24c0c2ee1..b0c0f3526 100644
--- a/api/v1alpha1/management_types.go
+++ b/api/v1alpha1/management_types.go
@@ -44,7 +44,7 @@ type ManagementSpec struct {
 	// Providers is the list of supported CAPI providers.
 	Providers []Provider `json:"providers,omitempty"`
 
-	Backup ManagementBackup `json:"backup,omitempty"`
+	Backup Backup `json:"backup,omitempty"`
 }
 
 // Core represents a structure describing core Management components.
@@ -55,15 +55,29 @@ type Core struct {
 	CAPI Component `json:"capi,omitempty"`
 }
 
-// ManagementBackup enables a feature to backup KCM objects into a cloud.
-type ManagementBackup struct {
-	// Schedule is a Cron expression defining when to run the scheduled Backup.
+// Backup enables a feature to backup HMC objects into a cloud.
+type Backup struct {
+	// +kubebuilder:example={customPlugins: {"alibabacloud": "registry.<region>.aliyuncs.com/acs/velero:1.4.2", "community.openstack.org/openstack": "lirt/velero-plugin-for-openstack:v0.6.0"}}
+
+	// CustomPlugins holds key value pairs with [Velero] [community] and [custom] plugins, where:
+	// 	- key represents the provider's name in the format [velero.io/]<plugin-name>;
+	// 	- value represents the provider's plugin name;
+	//
+	// Provider name must be exactly the same as in a [BackupStorageLocation] object.
+	//
+	// [Velero]: https://velero.io
+	// [community]: https://velero.io/docs/v1.15/supported-providers/#provider-plugins-maintained-by-the-velero-community
+	// [custom]: https://velero.io/docs/v1.15/custom-plugins/
+	// [BackupStorageLocation]: https://velero.io/docs/v1.15/api-types/backupstoragelocation/
+	CustomPlugins map[string]string `json:"customPlugins,omitempty"`
+
+	// Schedule is a Cron expression defining when to run the scheduled ManagementBackup.
 	// Default value is to backup every 6 hours.
 	Schedule string `json:"schedule,omitempty"`
 
 	// Flag to indicate whether the backup feature is enabled.
 	// If set to true, [Velero] platform will be installed.
-	// If set to false, creation or modification of Backups/Restores will be blocked.
+	// If set to false, creation or modification of ManagementBackups will be blocked.
 	//
 	// [Velero]: https://velero.io
 	Enabled bool `json:"enabled,omitempty"`
@@ -123,6 +137,15 @@ func (in *Management) Templates() []string {
 	return templates
 }
 
+// GetBackupSchedule safely returns backup schedule.
+func (in *Management) GetBackupSchedule() string {
+	if in == nil {
+		return ""
+	}
+
+	return in.Spec.Backup.Schedule
+}
+
 // ManagementStatus defines the observed state of Management
 type ManagementStatus struct {
 	// For each CAPI provider name holds its compatibility [contract versions]
diff --git a/api/v1alpha1/templates_common.go b/api/v1alpha1/templates_common.go
index f7c6d22a2..ee60a189c 100644
--- a/api/v1alpha1/templates_common.go
+++ b/api/v1alpha1/templates_common.go
@@ -23,13 +23,10 @@ import (
 	helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2"
 	sourcev1 "github.com/fluxcd/source-controller/api/v1"
 	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	clusterapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
 )
 
 const (
-	// ChartAnnotationProviderName is the annotation set on components in a Template.
-	// This annotations allows to identify all the components belonging to a provider.
-	ChartAnnotationProviderName = "cluster.x-k8s.io/provider"
-
 	chartAnnoCAPIPrefix = "cluster.x-k8s.io/"
 
 	DefaultRepoName = "kcm-templates"
@@ -103,7 +100,7 @@ func getProvidersList(providers Providers, annotations map[string]string) Provid
 		return slices.Compact(res)
 	}
 
-	providersFromAnno := annotations[ChartAnnotationProviderName]
+	providersFromAnno := annotations[clusterapiv1beta1.ProviderNameLabel]
 	if len(providersFromAnno) == 0 {
 		return Providers{}
 	}
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 3bd853ce1..079248af4 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -181,10 +181,13 @@ func (in *AvailableUpgrade) DeepCopy() *AvailableUpgrade {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *Backup) DeepCopyInto(out *Backup) {
 	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
-	out.Spec = in.Spec
-	in.Status.DeepCopyInto(&out.Status)
+	if in.CustomPlugins != nil {
+		in, out := &in.CustomPlugins, &out.CustomPlugins
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
 }
 
 // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup.
@@ -197,95 +200,6 @@ func (in *Backup) DeepCopy() *Backup {
 	return out
 }
 
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Backup) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BackupList) DeepCopyInto(out *BackupList) {
-	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ListMeta.DeepCopyInto(&out.ListMeta)
-	if in.Items != nil {
-		in, out := &in.Items, &out.Items
-		*out = make([]Backup, len(*in))
-		for i := range *in {
-			(*in)[i].DeepCopyInto(&(*out)[i])
-		}
-	}
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList.
-func (in *BackupList) DeepCopy() *BackupList {
-	if in == nil {
-		return nil
-	}
-	out := new(BackupList)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *BackupList) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
-	*out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec.
-func (in *BackupSpec) DeepCopy() *BackupSpec {
-	if in == nil {
-		return nil
-	}
-	out := new(BackupSpec)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
-	*out = *in
-	if in.Reference != nil {
-		in, out := &in.Reference, &out.Reference
-		*out = new(corev1.ObjectReference)
-		**out = **in
-	}
-	if in.Schedule != nil {
-		in, out := &in.Schedule, &out.Schedule
-		*out = new(velerov1.ScheduleStatus)
-		(*in).DeepCopyInto(*out)
-	}
-	if in.NextAttempt != nil {
-		in, out := &in.NextAttempt, &out.NextAttempt
-		*out = (*in).DeepCopy()
-	}
-	if in.LastBackup != nil {
-		in, out := &in.LastBackup, &out.LastBackup
-		*out = new(velerov1.BackupStatus)
-		(*in).DeepCopyInto(*out)
-	}
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus.
-func (in *BackupStatus) DeepCopy() *BackupStatus {
-	if in == nil {
-		return nil
-	}
-	out := new(BackupStatus)
-	in.DeepCopyInto(out)
-	return out
-}
-
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ClusterDeployment) DeepCopyInto(out *ClusterDeployment) {
 	*out = *in
@@ -817,6 +731,10 @@ func (in *Management) DeepCopyObject() runtime.Object {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ManagementBackup) DeepCopyInto(out *ManagementBackup) {
 	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
 }
 
 // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementBackup.
@@ -829,6 +747,95 @@ func (in *ManagementBackup) DeepCopy() *ManagementBackup {
 	return out
 }
 
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ManagementBackup) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagementBackupList) DeepCopyInto(out *ManagementBackupList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ManagementBackup, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementBackupList.
+func (in *ManagementBackupList) DeepCopy() *ManagementBackupList {
+	if in == nil {
+		return nil
+	}
+	out := new(ManagementBackupList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ManagementBackupList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagementBackupSpec) DeepCopyInto(out *ManagementBackupSpec) {
+	*out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementBackupSpec.
+func (in *ManagementBackupSpec) DeepCopy() *ManagementBackupSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ManagementBackupSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagementBackupStatus) DeepCopyInto(out *ManagementBackupStatus) {
+	*out = *in
+	if in.Reference != nil {
+		in, out := &in.Reference, &out.Reference
+		*out = new(corev1.ObjectReference)
+		**out = **in
+	}
+	if in.NextAttempt != nil {
+		in, out := &in.NextAttempt, &out.NextAttempt
+		*out = (*in).DeepCopy()
+	}
+	if in.LastBackup != nil {
+		in, out := &in.LastBackup, &out.LastBackup
+		*out = new(velerov1.BackupStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Schedule != nil {
+		in, out := &in.Schedule, &out.Schedule
+		*out = new(velerov1.ScheduleStatus)
+		(*in).DeepCopyInto(*out)
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementBackupStatus.
+func (in *ManagementBackupStatus) DeepCopy() *ManagementBackupStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ManagementBackupStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ManagementList) DeepCopyInto(out *ManagementList) {
 	*out = *in
@@ -876,7 +883,7 @@ func (in *ManagementSpec) DeepCopyInto(out *ManagementSpec) {
 			(*in)[i].DeepCopyInto(&(*out)[i])
 		}
 	}
-	out.Backup = in.Backup
+	in.Backup.DeepCopyInto(&out.Backup)
 }
 
 // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementSpec.
diff --git a/cmd/main.go b/cmd/main.go
index 00e652f14..1ae8bad5a 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -304,7 +304,8 @@ func main() {
 	}
 
 	if err = (&controller.CredentialReconciler{
-		Client: mgr.GetClient(),
+		SystemNamespace: currentNamespace,
+		Client:          mgr.GetClient(),
 	}).SetupWithManager(mgr); err != nil {
 		setupLog.Error(err, "unable to create controller", "controller", "Credential")
 		os.Exit(1)
@@ -317,13 +318,12 @@ func main() {
 		setupLog.Error(err, "unable to create controller", "controller", "MultiClusterService")
 		os.Exit(1)
 	}
-	// TODO (zerospiel): disabled until the #605
-	// if err = (&controller.BackupReconciler{
-	// 	Client: mgr.GetClient(),
-	// }).SetupWithManager(mgr); err != nil {
-	// 	setupLog.Error(err, "unable to create controller", "controller", "Backup")
-	// 	os.Exit(1)
-	// }
+	if err = (&controller.ManagementBackupReconciler{
+		Client: mgr.GetClient(),
+	}).SetupWithManager(mgr); err != nil {
+		setupLog.Error(err, "unable to create controller", "controller", "ManagementBackup")
+		os.Exit(1)
+	}
 	// +kubebuilder:scaffold:builder
 
 	if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
@@ -394,5 +394,9 @@ func setupWebhooks(mgr ctrl.Manager, currentNamespace string) error {
 		setupLog.Error(err, "unable to create webhook", "webhook", "Release")
 		return err
 	}
+	if err := (&hmcwebhook.ManagementBackupValidator{}).SetupWebhookWithManager(mgr); err != nil {
+		setupLog.Error(err, "unable to create webhook", "webhook", "ManagementBackup")
+		return err
+	}
 	return nil
 }
diff --git a/config/dev/aws-clusterdeployment.yaml b/config/dev/aws-clusterdeployment.yaml
index f17e24b05..346e4c2d3 100644
--- a/config/dev/aws-clusterdeployment.yaml
+++ b/config/dev/aws-clusterdeployment.yaml
@@ -3,8 +3,6 @@ kind: ClusterDeployment
 metadata:
   name: aws-dev
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   template: aws-standalone-cp-0-0-4
   credential: aws-cluster-identity-cred
diff --git a/config/dev/aws-credentials.yaml b/config/dev/aws-credentials.yaml
index e614003d3..d681d2e8c 100644
--- a/config/dev/aws-credentials.yaml
+++ b/config/dev/aws-credentials.yaml
@@ -4,8 +4,6 @@ kind: AWSClusterStaticIdentity
 metadata:
   name: aws-cluster-identity
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   secretRef: aws-cluster-identity-secret
   allowedNamespaces:
@@ -17,8 +15,6 @@ kind: Secret
 metadata:
   name: aws-cluster-identity-secret
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 type: Opaque
 stringData:
   AccessKeyID: ${AWS_ACCESS_KEY_ID}
@@ -30,8 +26,6 @@ kind: Credential
 metadata:
   name: aws-cluster-identity-cred
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   description: AWS credentials
   identityRef:
diff --git a/config/dev/azure-clusterdeployment.yaml b/config/dev/azure-clusterdeployment.yaml
index 94ced35df..1c3531c0e 100644
--- a/config/dev/azure-clusterdeployment.yaml
+++ b/config/dev/azure-clusterdeployment.yaml
@@ -3,8 +3,6 @@ kind: ClusterDeployment
 metadata:
   name: azure-dev
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   template: azure-standalone-cp-0-0-4
   credential: azure-cluster-identity-cred
diff --git a/config/dev/azure-credentials.yaml b/config/dev/azure-credentials.yaml
index 6ca798cbb..36f4c786b 100644
--- a/config/dev/azure-credentials.yaml
+++ b/config/dev/azure-credentials.yaml
@@ -4,7 +4,6 @@ kind: AzureClusterIdentity
 metadata:
   labels:
     clusterctl.cluster.x-k8s.io/move-hierarchy: "true"
-    k0rdent.mirantis.com/component: kcm
   name: azure-cluster-identity
   namespace: ${NAMESPACE}
 spec:
@@ -21,8 +20,6 @@ kind: Secret
 metadata:
   name: azure-cluster-identity-secret
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 stringData:
   clientSecret: "${AZURE_CLIENT_SECRET}"
 type: Opaque
@@ -32,8 +29,6 @@ kind: Credential
 metadata:
   name: azure-cluster-identity-cred
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   description: Azure credentials
   identityRef:
diff --git a/config/dev/eks-clusterdeployment.yaml b/config/dev/eks-clusterdeployment.yaml
index bd28ed94c..e3437df46 100644
--- a/config/dev/eks-clusterdeployment.yaml
+++ b/config/dev/eks-clusterdeployment.yaml
@@ -3,8 +3,6 @@ kind: ClusterDeployment
 metadata:
   name: eks-dev
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   template: aws-eks-0-0-2
   credential: "aws-cluster-identity-cred"
diff --git a/config/dev/vsphere-clusterdeployment.yaml b/config/dev/vsphere-clusterdeployment.yaml
index 97594fb70..1bc506d6f 100644
--- a/config/dev/vsphere-clusterdeployment.yaml
+++ b/config/dev/vsphere-clusterdeployment.yaml
@@ -3,8 +3,6 @@ kind: ClusterDeployment
 metadata:
   name: vsphere-dev
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   template: vsphere-standalone-cp-0-0-3
   credential: vsphere-cluster-identity-cred
diff --git a/config/dev/vsphere-credentials.yaml b/config/dev/vsphere-credentials.yaml
index d7c161f09..c338acb96 100644
--- a/config/dev/vsphere-credentials.yaml
+++ b/config/dev/vsphere-credentials.yaml
@@ -4,8 +4,6 @@ kind: VSphereClusterIdentity
 metadata:
   name: vsphere-cluster-identity
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   secretName: vsphere-cluster-identity-secret
   allowedNamespaces:
@@ -17,8 +15,6 @@ kind: Secret
 metadata:
   name: vsphere-cluster-identity-secret
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 stringData:
   username: ${VSPHERE_USER}
   password: ${VSPHERE_PASSWORD}
@@ -28,8 +24,6 @@ kind: Credential
 metadata:
   name: vsphere-cluster-identity-cred
   namespace: ${NAMESPACE}
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   description: vSphere credentials
   identityRef:
diff --git a/go.mod b/go.mod
index 91898df59..d87d9f995 100644
--- a/go.mod
+++ b/go.mod
@@ -17,6 +17,7 @@ require (
 	github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98
 	github.com/projectsveltos/addon-controller v0.45.0
 	github.com/projectsveltos/libsveltos v0.45.0
+	github.com/robfig/cron/v3 v3.0.1
 	github.com/segmentio/analytics-go v3.1.0+incompatible
 	github.com/stretchr/testify v1.10.0
 	github.com/zerospiel/velero v0.0.0-20241213181215-1eaa894d12b8
diff --git a/go.sum b/go.sum
index acad716d7..168254bdd 100644
--- a/go.sum
+++ b/go.sum
@@ -436,6 +436,8 @@ github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH
 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
 github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
 github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
 github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
 github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
 github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI=
diff --git a/hack/templates.sh b/hack/templates.sh
index ced1e8932..cbd0abff5 100755
--- a/hack/templates.sh
+++ b/hack/templates.sh
@@ -45,8 +45,6 @@ metadata:
   name: $template_name
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/internal/controller/backup/collect.go b/internal/controller/backup/collect.go
new file mode 100644
index 000000000..8184ebab9
--- /dev/null
+++ b/internal/controller/backup/collect.go
@@ -0,0 +1,138 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backup
+
+import (
+	"context"
+	"fmt"
+	"maps"
+	"slices"
+	"strings"
+	"time"
+
+	certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1"
+	velerov1api "github.com/zerospiel/velero/pkg/apis/velero/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	clusterapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	hmcv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
+)
+
+func (c *Config) getBackupTemplateSpec(ctx context.Context) (*velerov1api.BackupSpec, error) {
+	bs := &velerov1api.BackupSpec{
+		IncludedNamespaces: []string{"*"},
+		ExcludedResources:  []string{"clusters.cluster.x-k8s.io"},          // mutationwebhook
+		TTL:                metav1.Duration{Duration: 30 * 24 * time.Hour}, // velero's default, set it for the sake of UX
+	}
+
+	orSelectors := []*metav1.LabelSelector{
+		// fixed ones
+		selector(hmcv1alpha1.GenericComponentLabelName, hmcv1alpha1.GenericComponentLabelValueHMC),
+		selector(certmanagerv1.PartOfCertManagerControllerLabelKey, "true"),
+		selector(hmcv1alpha1.FluxHelmChartNameKey, hmcv1alpha1.CoreHMCName),
+		selector(clusterapiv1beta1.ProviderNameLabel, "cluster-api"),
+	}
+
+	clusterTemplates := new(hmcv1alpha1.ClusterTemplateList)
+	if err := c.cl.List(ctx, clusterTemplates); err != nil {
+		return nil, fmt.Errorf("failed to list ClusterTemplates: %w", err)
+	}
+
+	if len(clusterTemplates.Items) == 0 { // just collect child clusters names
+		cldSelectors, err := getClusterDeploymentsSelectors(ctx, c.cl, "")
+		if err != nil {
+			return nil, fmt.Errorf("failed to get selectors for all clusterdeployments: %w", err)
+		}
+
+		bs.OrLabelSelectors = sortDedup(append(orSelectors, cldSelectors...))
+
+		return bs, nil
+	}
+
+	for _, cltpl := range clusterTemplates.Items {
+		cldSelectors, err := getClusterDeploymentsSelectors(ctx, c.cl, cltpl.Name)
+		if err != nil {
+			return nil, fmt.Errorf("failed to get selectors for clusterdeployments referencing %s clustertemplate: %w", client.ObjectKeyFromObject(&cltpl), err)
+		}
+
+		// add only enabled providers
+		if len(cldSelectors) > 0 {
+			for _, provider := range cltpl.Status.Providers {
+				orSelectors = append(orSelectors, selector(clusterapiv1beta1.ProviderNameLabel, provider))
+			}
+		}
+
+		orSelectors = append(orSelectors, cldSelectors...)
+	}
+
+	bs.OrLabelSelectors = sortDedup(orSelectors)
+
+	return bs, nil
+}
+
+func sortDedup(selectors []*metav1.LabelSelector) []*metav1.LabelSelector {
+	const nonKubeSep = "_"
+
+	kvs := make([]string, len(selectors))
+	for i, s := range selectors {
+		for k, v := range s.MatchLabels { // expect only one kv pair
+			kvs[i] = k + nonKubeSep + v
+		}
+	}
+	slices.Sort(kvs)
+
+	for i, kv := range kvs {
+		sepIdx := strings.Index(kv, nonKubeSep)
+		if sepIdx < 0 {
+			continue // make compiler happy
+		}
+		k := kv[:sepIdx]
+		v := kv[sepIdx+len(nonKubeSep):]
+		selectors[i] = selector(k, v)
+	}
+
+	return slices.Clip(
+		slices.CompactFunc(selectors, func(a, b *metav1.LabelSelector) bool {
+			return maps.Equal(a.MatchLabels, b.MatchLabels)
+		}),
+	)
+}
+
+func getClusterDeploymentsSelectors(ctx context.Context, cl client.Client, clusterTemplateRef string) ([]*metav1.LabelSelector, error) {
+	cldeploys := new(hmcv1alpha1.ClusterDeploymentList)
+	opts := []client.ListOption{}
+	if clusterTemplateRef != "" {
+		opts = append(opts, client.MatchingFields{hmcv1alpha1.ClusterDeploymentTemplateIndexKey: clusterTemplateRef})
+	}
+
+	if err := cl.List(ctx, cldeploys, opts...); err != nil {
+		return nil, fmt.Errorf("failed to list ClusterDeployments: %w", err)
+	}
+
+	selectors := make([]*metav1.LabelSelector, len(cldeploys.Items)*2)
+	for i, cldeploy := range cldeploys.Items {
+		selectors[i] = selector(hmcv1alpha1.FluxHelmChartNameKey, cldeploy.Name)
+		selectors[i+1] = selector(clusterapiv1beta1.ClusterNameLabel, cldeploy.Name)
+	}
+
+	return selectors, nil
+}
+
+func selector(k, v string) *metav1.LabelSelector {
+	return &metav1.LabelSelector{
+		MatchLabels: map[string]string{k: v},
+	}
+}
diff --git a/internal/controller/backup/collect_test.go b/internal/controller/backup/collect_test.go
new file mode 100644
index 000000000..c4403916e
--- /dev/null
+++ b/internal/controller/backup/collect_test.go
@@ -0,0 +1,236 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backup
+
+import (
+	"reflect"
+	"testing"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func Test_sortDedup(t *testing.T) {
+	type testInput struct {
+		name      string
+		selectors []*metav1.LabelSelector
+		expected  []*metav1.LabelSelector
+	}
+
+	tests := []testInput{
+		{
+			name: "no dups unordered",
+			selectors: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "bar"},
+				},
+			},
+			expected: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"app": "bar"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+			},
+		},
+		{
+			name: "some dups in keys",
+			selectors: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "bar"},
+				},
+			},
+			expected: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"app": "bar"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+			},
+		},
+		{
+			name: "all dups",
+			selectors: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+			},
+			expected: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"app": "foo"},
+				},
+			},
+		},
+		{
+			name: "huge dups unordered",
+			selectors: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"hmc.mirantis.com/component": "hmc"},
+				},
+				{
+					MatchLabels: map[string]string{"controller.cert-manager.io/fao": "true"},
+				},
+				{
+					MatchLabels: map[string]string{"helm.toolkit.fluxcd.io/name": "hmc"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "cluster-api"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-aws"},
+				},
+				{
+					MatchLabels: map[string]string{"helm.toolkit.fluxcd.io/name": "unusual-cluster-name"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/cluster-name": "unusual-cluster-name"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-azure"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-azure"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-vsphere"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-internal"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-aws"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-openstack"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-vsphere"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-aws"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-azure"},
+				},
+			},
+			expected: []*metav1.LabelSelector{
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/cluster-name": "unusual-cluster-name"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "bootstrap-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "cluster-api"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "control-plane-k0smotron"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-aws"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-azure"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-internal"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-openstack"},
+				},
+				{
+					MatchLabels: map[string]string{"cluster.x-k8s.io/provider": "infrastructure-vsphere"},
+				},
+				{
+					MatchLabels: map[string]string{"controller.cert-manager.io/fao": "true"},
+				},
+				{
+					MatchLabels: map[string]string{"helm.toolkit.fluxcd.io/name": "hmc"},
+				},
+				{
+					MatchLabels: map[string]string{"helm.toolkit.fluxcd.io/name": "unusual-cluster-name"},
+				},
+				{
+					MatchLabels: map[string]string{"hmc.mirantis.com/component": "hmc"},
+				},
+			},
+		},
+	}
+
+	for _, test := range tests {
+		actual := sortDedup(test.selectors)
+		if !reflect.DeepEqual(actual, test.expected) {
+			t.Errorf("sortDedup(%s): \n\tactual:\n\t%v\n\n\twant:\n\t%v", test.name, actual, test.expected)
+		}
+	}
+}
diff --git a/internal/controller/backup/config.go b/internal/controller/backup/config.go
new file mode 100644
index 000000000..e3fabecc8
--- /dev/null
+++ b/internal/controller/backup/config.go
@@ -0,0 +1,145 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backup
+
+import (
+	"fmt"
+	"time"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Config holds required set of parameters of to successfully install Velero stack and manipulate with it.
+type Config struct {
+	kubeRestConfig *rest.Config
+	scheme         *runtime.Scheme
+	cl             client.Client
+
+	image           string
+	systemNamespace string
+	pluginImages    []string
+	features        []string
+
+	installRequeueAfter time.Duration
+	objectsRequeueAfter time.Duration
+}
+
+// VeleroName contains velero name of different parts of the stack.
+const VeleroName = "velero"
+
+// ConfigOpt is the functional option for the Config.
+type ConfigOpt func(*Config)
+
+// NewConfig creates instance of the config.
+func NewConfig(cl client.Client, kc *rest.Config, scheme *runtime.Scheme, opts ...ConfigOpt) *Config {
+	c := newWithDefaults()
+
+	for _, o := range opts {
+		o(c)
+	}
+
+	c.cl = cl
+	c.kubeRestConfig = kc
+	c.scheme = scheme
+
+	return c
+}
+
+// GetVeleroSystemNamespace returns the velero system namespace.
+func (c *Config) GetVeleroSystemNamespace() string { return c.systemNamespace }
+
+func (c *Config) getServiceAccountName() string {
+	saName := VeleroName
+	if c.systemNamespace != VeleroName {
+		saName = VeleroName + "-" + c.systemNamespace + "-sa"
+	}
+	return saName
+}
+
+// WithInstallationRequeueAfter sets the RequeueAfter period if >0 for the Velero stack installation.
+func WithInstallationRequeueAfter(d time.Duration) ConfigOpt {
+	return func(c *Config) {
+		if d == 0 {
+			return
+		}
+		c.installRequeueAfter = d
+	}
+}
+
+// WithObjectsRequeueAfter sets the RequeueAfter period if >0 for the reconciled objects.
+func WithObjectsRequeueAfter(d time.Duration) ConfigOpt {
+	return func(c *Config) {
+		if d == 0 {
+			return
+		}
+		c.objectsRequeueAfter = d
+	}
+}
+
+// WithVeleroSystemNamespace sets the SystemNamespace if non-empty.
+func WithVeleroSystemNamespace(ns string) ConfigOpt {
+	return func(c *Config) {
+		if len(ns) == 0 {
+			return
+		}
+		c.systemNamespace = ns
+	}
+}
+
+// WithPluginImages sets maps of plugins maintained by Velero.
+func WithPluginImages(pluginImages ...string) ConfigOpt {
+	return func(c *Config) {
+		if len(pluginImages) == 0 {
+			return
+		}
+		c.pluginImages = pluginImages
+	}
+}
+
+// WithVeleroImage sets the main image for the Velero deployment if non-empty.
+func WithVeleroImage(image string) ConfigOpt {
+	return func(c *Config) {
+		if len(image) == 0 {
+			return
+		}
+		c.image = image
+	}
+}
+
+// WithFeatures sets a list of features for the Velero deployment.
+func WithFeatures(features ...string) ConfigOpt {
+	return func(c *Config) {
+		if len(features) == 0 {
+			return
+		}
+		c.features = features
+	}
+}
+
+func newWithDefaults() *Config {
+	return &Config{
+		installRequeueAfter: 5 * time.Second,
+		objectsRequeueAfter: 5 * time.Minute,
+		systemNamespace:     VeleroName,
+		image:               fmt.Sprintf("%s/%s:%s", VeleroName, VeleroName, "v1.15.0"), // velero/velero:v1.15.0
+		pluginImages: []string{
+			"velero/velero-plugin-for-aws:v1.11.0",
+			"velero/velero-plugin-for-microsoft-azure:v1.11.0",
+			"velero/velero-plugin-for-gcp:v1.11.0",
+		},
+	}
+}
diff --git a/internal/controller/backup/install.go b/internal/controller/backup/install.go
index 69d89fc60..1e2bf7e72 100644
--- a/internal/controller/backup/install.go
+++ b/internal/controller/backup/install.go
@@ -18,9 +18,11 @@ import (
 	"context"
 	"fmt"
 	"io"
+	"slices"
 	"time"
 
 	velerov1api "github.com/zerospiel/velero/pkg/apis/velero/v1"
+	velerobuilder "github.com/zerospiel/velero/pkg/builder"
 	veleroclient "github.com/zerospiel/velero/pkg/client"
 	veleroinstall "github.com/zerospiel/velero/pkg/install"
 	"github.com/zerospiel/velero/pkg/uploader"
@@ -33,111 +35,84 @@ import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/wait"
 	"k8s.io/client-go/dynamic"
-	"k8s.io/client-go/rest"
 	ctrl "sigs.k8s.io/controller-runtime"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
-)
-
-type Config struct {
-	kubeRestConfig *rest.Config
-	cl             client.Client
-
-	image           string
-	systemNamespace string
-	features        []string
-
-	requeueAfter time.Duration
-}
 
-const veleroName = "velero"
-
-type ConfigOpt func(*Config)
+	kcmv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
+)
 
-func NewConfig(cl client.Client, kc *rest.Config, opts ...ConfigOpt) *Config {
-	c := newWithDefaults()
+// ReconcileVeleroInstallation reconciles installation of velero stack within a management cluster.
+func (c *Config) ReconcileVeleroInstallation(ctx context.Context, mgmt *kcmv1alpha1.Management) (ctrl.Result, error) {
+	requeueResult := ctrl.Result{Requeue: true, RequeueAfter: c.installRequeueAfter}
 
-	for _, o := range opts {
-		o(c)
+	veleroDeploy, err := c.fetchVeleroDeploy(ctx)
+	if err != nil && !apierrors.IsNotFound(err) {
+		return ctrl.Result{}, fmt.Errorf("failed to get velero deploy: %w", err)
 	}
 
-	c.cl = cl
-	c.kubeRestConfig = kc
-
-	return c
-}
-
-func WithRequeueAfter(d time.Duration) ConfigOpt {
-	return func(c *Config) {
-		if d == 0 {
-			return
+	if apierrors.IsNotFound(err) {
+		if err := c.installVelero(ctx); err != nil {
+			return ctrl.Result{}, fmt.Errorf("failed to perform velero stack installation: %w", err)
 		}
-		c.requeueAfter = d
-	}
-}
 
-func WithVeleroSystemNamespace(ns string) ConfigOpt {
-	return func(c *Config) {
-		if len(ns) == 0 {
-			return
-		}
-		c.systemNamespace = ns
+		return requeueResult, nil
 	}
-}
 
-func WithVeleroImage(image string) ConfigOpt {
-	return func(c *Config) {
-		if len(image) == 0 {
-			return
-		}
-		c.image = image
+	originalDeploy := veleroDeploy.DeepCopy()
+
+	installedProperly, err := c.isDeployProperlyInstalled(ctx, veleroDeploy)
+	if err != nil {
+		return ctrl.Result{}, fmt.Errorf("failed to check if velero deploy is properly installed: %w", err)
 	}
-}
 
-func WithFeatures(features ...string) ConfigOpt {
-	return func(c *Config) {
-		if len(features) == 0 {
-			return
+	if !installedProperly {
+		if err := c.installVelero(ctx); err != nil {
+			return ctrl.Result{}, fmt.Errorf("failed to perform velero stack installation: %w", err)
 		}
-		c.features = features
-	}
-}
 
-func newWithDefaults() *Config {
-	return &Config{
-		requeueAfter:    5 * time.Second,
-		systemNamespace: veleroName,
-		image:           fmt.Sprintf("%s/%s:%s", veleroName, veleroName, "v1.15.0"), // velero/velero:v1.15.0
+		return requeueResult, nil
 	}
-}
 
-// ReconcileVeleroInstallation reconciles installation of velero stack within a management cluster.
-func (c *Config) ReconcileVeleroInstallation(ctx context.Context) (ctrl.Result, error) {
-	deployState, err := c.checkVeleroDeployIsInstalled(ctx)
+	isPatchRequired, err := c.normalizeDeploy(ctx, veleroDeploy, mgmt)
 	if err != nil {
-		return ctrl.Result{}, fmt.Errorf("failed to determine if velero is installed: %w", err)
+		return ctrl.Result{}, fmt.Errorf("failed to check if velero deploy required patch: %w", err)
 	}
 
-	if deployState.needInstallation {
-		ctrl.LoggerFrom(ctx).Info("Installing velero stack")
-		if err := c.installVelero(ctx); err != nil {
-			return ctrl.Result{}, fmt.Errorf("failed to perform velero stack installation: %w", err)
+	l := ctrl.LoggerFrom(ctx)
+	if isPatchRequired {
+		l.Info("Patching the deployment")
+		if err := c.cl.Patch(ctx, veleroDeploy, client.MergeFrom(originalDeploy)); err != nil {
+			return ctrl.Result{}, fmt.Errorf("failed to patch velero deploy: %w", err)
 		}
 
-		return ctrl.Result{}, nil
+		l.Info("Successfully patched the deploy")
 	}
 
-	if deployState.needRequeue || deployState.needInstallation {
-		return ctrl.Result{Requeue: true, RequeueAfter: c.requeueAfter}, nil // either the installation has happened or direct requeue is required
+	if !isDeploymentReady(veleroDeploy) {
+		l.Info("Deployment is not ready yet, will requeue")
+		return requeueResult, nil
 	}
 
+	l.Info("Deployment is in the expected state")
 	return ctrl.Result{}, nil
 }
 
+// InstallVeleroCRDs install all Velero CRDs.
+func (c *Config) InstallVeleroCRDs(cl client.Client) error {
+	dc, err := dynamic.NewForConfig(c.kubeRestConfig)
+	if err != nil {
+		return fmt.Errorf("failed to construct dynamic client: %w", err)
+	}
+
+	return veleroinstall.Install(veleroclient.NewDynamicFactory(dc), cl, veleroinstall.AllCRDs(), io.Discard)
+}
+
 // installVelero installs velero stack with all the required components.
 func (c *Config) installVelero(ctx context.Context) error {
-	saName, err := c.ensureVeleroRBAC(ctx)
-	if err != nil {
+	ctrl.LoggerFrom(ctx).Info("Installing velero stack")
+
+	if err := c.ensureVeleroRBAC(ctx); err != nil {
 		return fmt.Errorf("failed to ensure velero RBAC: %w", err)
 	}
 
@@ -145,8 +120,9 @@ func (c *Config) installVelero(ctx context.Context) error {
 		Namespace: c.systemNamespace,
 		Image:     c.image,
 		Features:  c.features,
+		Plugins:   c.pluginImages,
 
-		ServiceAccountName:      saName,
+		ServiceAccountName:      c.getServiceAccountName(),
 		NoDefaultBackupLocation: true, // no need (explicit BSL)
 
 		DefaultRepoMaintenanceFrequency: time.Hour,          // default
@@ -174,7 +150,6 @@ func (c *Config) installVelero(ctx context.Context) error {
 		UseVolumeSnapshots:          false, // no need
 		BSLConfig:                   nil,   // backupstoragelocation
 		VSLConfig:                   nil,   // volumesnapshotlocation
-		Plugins:                     nil,   // should be installed on-demand (BSL object)
 		CACertData:                  nil,   // no need (explicit BSL)
 		DefaultVolumesToFsBackup:    false, // no volume backups, no need
 		DefaultSnapshotMoveData:     false, // no snapshots, no need
@@ -196,22 +171,169 @@ func (c *Config) installVelero(ctx context.Context) error {
 	return veleroinstall.Install(veleroclient.NewDynamicFactory(dc), c.cl, resources, io.Discard)
 }
 
+func (c *Config) installCustomPlugins(ctx context.Context, veleroDeploy *appsv1.Deployment, mgmt *kcmv1alpha1.Management) (isPatchRequired bool, _ error) {
+	if mgmt == nil || len(mgmt.Spec.Backup.CustomPlugins) == 0 {
+		return false, nil
+	}
+
+	l := ctrl.LoggerFrom(ctx)
+
+	bsls := new(velerov1api.BackupStorageLocationList)
+	if err := c.cl.List(ctx, bsls, client.InNamespace(c.systemNamespace)); err != nil {
+		return false, fmt.Errorf("failed to list velero backup storage locations: %w", err)
+	}
+
+	// NOTE: we do not care about removing the init containers (plugins), it might be managed by the velero CLI directly
+	// TODO: process absent containers?
+	initContainers := slices.Clone(veleroDeploy.Spec.Template.Spec.InitContainers)
+	preLen := len(initContainers)
+	for _, bsl := range bsls.Items {
+		image, ok := mgmt.Spec.Backup.CustomPlugins[bsl.Spec.Provider]
+		if !ok {
+			l.Info("Custom plugin is set but no BackupStorageLocation with such provider exists", "provider", bsl.Spec.Provider, "bsl_name", bsl.Name, "velero_namespace", c.systemNamespace)
+			continue
+		}
+
+		cont := *velerobuilder.ForPluginContainer(image, corev1.PullIfNotPresent).Result()
+		if !slices.ContainsFunc(initContainers, hasContainer(cont)) {
+			initContainers = append(initContainers, cont)
+		}
+	}
+
+	postLen := len(initContainers)
+
+	if preLen == postLen { // nothing to do
+		return false, nil
+	}
+
+	l.Info("Adding new plugins to the Velero deployment", "new_plugins_count", postLen-preLen)
+	veleroDeploy.Spec.Template.Spec.InitContainers = initContainers
+	return true, nil
+}
+
+func (c *Config) normalizeDeploy(ctx context.Context, veleroDeploy *appsv1.Deployment, mgmt *kcmv1alpha1.Management) (bool, error) {
+	l := ctrl.LoggerFrom(ctx)
+
+	isPatchRequired, err := c.installCustomPlugins(ctx, veleroDeploy, mgmt)
+	if err != nil {
+		return false, fmt.Errorf("failed to check if custom plugins are in place: %w", err)
+	}
+
+	// process 2 invariants beforehand since velero installation does not manage those if they has been changed
+	cont := veleroDeploy.Spec.Template.Spec.Containers[0]
+	if cont.Image != c.image {
+		l.Info("Deployment container has unexpected image", "current_image", cont.Image, "expected_image", c.image)
+		cont.Image = c.image
+		veleroDeploy.Spec.Template.Spec.Containers[0] = cont
+		isPatchRequired = true
+	}
+
+	if veleroDeploy.Spec.Replicas == nil || *veleroDeploy.Spec.Replicas == 0 {
+		l.Info("Deployment is scaled to 0, scaling up to 1")
+		*veleroDeploy.Spec.Replicas = 1
+		isPatchRequired = true
+	}
+
+	return isPatchRequired, nil
+}
+
+func (c *Config) isDeployProperlyInstalled(ctx context.Context, veleroDeploy *appsv1.Deployment) (bool, error) {
+	l := ctrl.LoggerFrom(ctx)
+
+	l.Info("Checking if Velero deployment is properly installed")
+
+	missingPlugins := []string{}
+	for _, pluginImage := range c.pluginImages {
+		if slices.ContainsFunc(veleroDeploy.Spec.Template.Spec.InitContainers, func(c corev1.Container) bool {
+			return pluginImage == c.Image
+		}) {
+			continue
+		}
+
+		missingPlugins = append(missingPlugins, pluginImage)
+	}
+
+	if len(veleroDeploy.Spec.Template.Spec.Containers) > 0 &&
+		veleroDeploy.Spec.Template.Spec.Containers[0].Name == VeleroName &&
+		veleroDeploy.Spec.Template.Spec.ServiceAccountName == c.getServiceAccountName() &&
+		len(missingPlugins) == 0 {
+		return true, nil
+	}
+
+	if len(missingPlugins) > 0 {
+		l.Info("There are missing init containers in the velero deployment", "missing_images", missingPlugins)
+	} else {
+		l.Info("Deployment has unexpected container name or referenced service account, considering to reinstall the deployment again")
+	}
+
+	// the deploy is "corrupted", remove only it and then reinstall
+	if err := c.cl.Delete(ctx, veleroDeploy); err != nil {
+		return false, fmt.Errorf("failed to delete velero deploy: %w", err)
+	}
+
+	removalCtx, cancel := context.WithCancel(ctx)
+	var checkErr error
+	checkFn := func(ctx context.Context) {
+		key := client.ObjectKeyFromObject(veleroDeploy)
+		ll := l.V(1).WithValues("velero_deploy", key.String())
+		ll.Info("Checking if the deployment has been removed")
+		if checkErr = c.cl.Get(ctx, key, veleroDeploy); checkErr != nil {
+			if apierrors.IsNotFound(checkErr) {
+				ll.Info("Removed successfully")
+				checkErr = nil
+			}
+			cancel()
+			return
+		}
+		ll.Info("Not removed yet")
+	}
+
+	wait.UntilWithContext(removalCtx, checkFn, time.Millisecond*500)
+	if checkErr != nil {
+		return false, fmt.Errorf("failed to wait for velero deploy removal: %w", checkErr)
+	}
+
+	return false, nil // require install
+}
+
+func hasContainer(container corev1.Container) func(c corev1.Container) bool {
+	return func(c corev1.Container) bool {
+		// if container names, images, or volume mounts (name/mount) differ
+		// than consider that the slice does not a given container
+		if c.Name != container.Name ||
+			c.Image != container.Image ||
+			len(c.VolumeMounts) != len(container.VolumeMounts) {
+			return false
+		}
+
+		for i := range c.VolumeMounts {
+			if c.VolumeMounts[i].Name != container.VolumeMounts[i].Name ||
+				c.VolumeMounts[i].MountPath != container.VolumeMounts[i].MountPath {
+				return false
+			}
+		}
+
+		return true
+	}
+}
+
 // ensureVeleroRBAC creates required RBAC objects for velero to be functional
 // with the minimal required set of permissions.
 // Returns the name of created ServiceAccount referenced by created bindings.
-func (c *Config) ensureVeleroRBAC(ctx context.Context) (string, error) {
-	crbName, clusterRoleName, rbName, roleName, saName := veleroName, veleroName, veleroName, veleroName, veleroName
-	if c.systemNamespace != veleroName {
-		vns := veleroName + "-" + c.systemNamespace
-		crbName, clusterRoleName, saName = vns+"-clusterrolebinding", vns+"-clusterrole", crbName+"-sa"
+func (c *Config) ensureVeleroRBAC(ctx context.Context) error {
+	crbName, clusterRoleName, rbName, roleName := VeleroName, VeleroName, VeleroName, VeleroName
+	if c.systemNamespace != VeleroName {
+		vns := VeleroName + "-" + c.systemNamespace
+		crbName, clusterRoleName = vns+"-clusterrolebinding", vns+"-clusterrole"
 		rbName, roleName = vns+"-rolebinding", vns+"-role"
 	}
+	saName := c.getServiceAccountName()
 
 	systemNS := new(corev1.Namespace)
 	if err := c.cl.Get(ctx, client.ObjectKey{Name: c.systemNamespace}, systemNS); apierrors.IsNotFound(err) {
 		systemNS.Name = c.systemNamespace
 		if err := c.cl.Create(ctx, systemNS); err != nil {
-			return "", fmt.Errorf("failed to create %s namespace for velero: %w", c.systemNamespace, err)
+			return fmt.Errorf("failed to create %s namespace for velero: %w", c.systemNamespace, err)
 		}
 	}
 
@@ -220,7 +342,7 @@ func (c *Config) ensureVeleroRBAC(ctx context.Context) (string, error) {
 		sa.Labels = veleroinstall.Labels()
 		return nil
 	}); err != nil {
-		return "", fmt.Errorf("failed to create or update velero service account: %w", err)
+		return fmt.Errorf("failed to create or update velero service account: %w", err)
 	}
 
 	role := &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: roleName, Namespace: c.systemNamespace}}
@@ -240,16 +362,18 @@ func (c *Config) ensureVeleroRBAC(ctx context.Context) (string, error) {
 		}
 		return nil
 	}); err != nil {
-		return "", fmt.Errorf("failed to create or update velero role: %w", err)
+		return fmt.Errorf("failed to create or update velero role: %w", err)
 	}
 
 	roleBinding := &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: rbName, Namespace: c.systemNamespace}}
 	if _, err := controllerutil.CreateOrUpdate(ctx, c.cl, roleBinding, func() error {
 		roleBinding.Labels = veleroinstall.Labels()
-		roleBinding.RoleRef = rbacv1.RoleRef{
-			APIGroup: rbacv1.GroupName,
-			Kind:     "Role",
-			Name:     roleName,
+		if roleBinding.ObjectMeta.CreationTimestamp.IsZero() {
+			roleBinding.RoleRef = rbacv1.RoleRef{
+				APIGroup: rbacv1.GroupName,
+				Kind:     "Role",
+				Name:     roleName,
+			}
 		}
 		roleBinding.Subjects = []rbacv1.Subject{
 			{
@@ -260,7 +384,7 @@ func (c *Config) ensureVeleroRBAC(ctx context.Context) (string, error) {
 		}
 		return nil
 	}); err != nil {
-		return "", fmt.Errorf("failed to create or update velero role binding: %w", err)
+		return fmt.Errorf("failed to create or update velero role binding: %w", err)
 	}
 
 	cr := &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}}
@@ -285,16 +409,18 @@ func (c *Config) ensureVeleroRBAC(ctx context.Context) (string, error) {
 		}
 		return nil
 	}); err != nil {
-		return "", fmt.Errorf("failed to create or update velero cluster role: %w", err)
+		return fmt.Errorf("failed to create or update velero cluster role: %w", err)
 	}
 
 	crb := &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: crbName}}
 	if _, err := controllerutil.CreateOrUpdate(ctx, c.cl, crb, func() error {
 		crb.Labels = veleroinstall.Labels()
-		crb.RoleRef = rbacv1.RoleRef{
-			APIGroup: rbacv1.GroupName,
-			Kind:     "ClusterRole",
-			Name:     clusterRoleName,
+		if crb.ObjectMeta.CreationTimestamp.IsZero() {
+			crb.RoleRef = rbacv1.RoleRef{
+				APIGroup: rbacv1.GroupName,
+				Kind:     "ClusterRole",
+				Name:     clusterRoleName,
+			}
 		}
 		crb.Subjects = []rbacv1.Subject{
 			{
@@ -305,107 +431,15 @@ func (c *Config) ensureVeleroRBAC(ctx context.Context) (string, error) {
 		}
 		return nil
 	}); err != nil {
-		return "", fmt.Errorf("failed to create or update velero cluster role binding: %w", err)
+		return fmt.Errorf("failed to create or update velero cluster role binding: %w", err)
 	}
 
-	return saName, nil
+	return nil
 }
 
-type deployState struct {
-	needRequeue      bool
-	needInstallation bool
-}
-
-// checkVeleroDeployIsInstalled check whether the velero deploy is already installed:
-//   - the deployment is presented;
-//   - is in ready state;
-//   - the only container has the expected image and replicas.
-//
-// If image or replica count are not expected, the deploy will be patched regardingly.
-// If the deploy has unexpected container name, the deploy will be deleted.
-func (c *Config) checkVeleroDeployIsInstalled(ctx context.Context) (deployState, error) {
-	l := ctrl.LoggerFrom(ctx).WithName("velero-deploy-checker")
-
-	l.Info("Checking if Velero deployment is already installed")
-
+func (c *Config) fetchVeleroDeploy(ctx context.Context) (*appsv1.Deployment, error) {
 	veleroDeploy := new(appsv1.Deployment)
-	err := c.cl.Get(ctx, client.ObjectKey{Namespace: c.systemNamespace, Name: veleroName}, veleroDeploy)
-	if err != nil && !apierrors.IsNotFound(err) {
-		return deployState{}, fmt.Errorf("failed to get velero deploy: %w", err)
-	}
-
-	if apierrors.IsNotFound(err) {
-		l.Info("Deployment is not found, considering the stack has not been (yet) installed")
-		return deployState{needInstallation: true}, nil
-	}
-
-	if len(veleroDeploy.Spec.Template.Spec.Containers) == 0 ||
-		veleroDeploy.Spec.Template.Spec.Containers[0].Name != veleroName {
-		l.Info("Deployment has unexpected container name, considering to reinstall the deployment again")
-		// the deploy is "corrupted", remove only it and then reinstall
-		if err := c.cl.Delete(ctx, veleroDeploy); err != nil {
-			return deployState{}, fmt.Errorf("failed to delete velero deploy: %w", err)
-		}
-
-		removalCtx, cancel := context.WithCancel(ctx)
-		var checkErr error
-		checkFn := func(ctx context.Context) {
-			key := client.ObjectKeyFromObject(veleroDeploy)
-			ll := l.V(1).WithValues("velero_deploy", key.String())
-			ll.Info("Checking if the deployment has been removed")
-			if checkErr = c.cl.Get(ctx, client.ObjectKeyFromObject(veleroDeploy), veleroDeploy); checkErr != nil {
-				if apierrors.IsNotFound(checkErr) {
-					ll.Info("Removed successfully")
-					checkErr = nil
-				}
-				cancel()
-				return
-			}
-			ll.Info("Not removed yet")
-		}
-
-		wait.UntilWithContext(removalCtx, checkFn, time.Millisecond*500)
-		if checkErr != nil {
-			return deployState{}, fmt.Errorf("failed to wait for velero deploy removal: %w", checkErr)
-		}
-
-		return deployState{needInstallation: true}, nil
-	}
-
-	isPatchRequired := false
-	// process 2 invariants beforehand
-	cont := veleroDeploy.Spec.Template.Spec.Containers[0]
-	if cont.Image != c.image {
-		l.Info("Deployment container has unexpected image", "current_image", cont.Image, "expected_image", c.image)
-		cont.Image = c.image
-		veleroDeploy.Spec.Template.Spec.Containers[0] = cont
-		isPatchRequired = true
-	}
-
-	if veleroDeploy.Spec.Replicas == nil || *veleroDeploy.Spec.Replicas == 0 {
-		l.Info("Deployment is scaled to 0, scaling up to 1")
-		*veleroDeploy.Spec.Replicas = 1
-		isPatchRequired = true
-	}
-
-	if isPatchRequired {
-		l.Info("Patching the deployment")
-		if err := c.cl.Patch(ctx, veleroDeploy, client.Merge); err != nil {
-			return deployState{}, fmt.Errorf("failed to patch velero deploy: %w", err)
-		}
-
-		l.Info("Need to requeue after the successful patch")
-		return deployState{needRequeue: true}, nil
-	}
-
-	r := isDeploymentReady(veleroDeploy) // if no invariants then just check the readiness
-	if !r {
-		l.Info("Deployment is not ready yet, will requeue")
-		return deployState{needRequeue: true}, nil
-	}
-
-	l.Info("Deployment is in the expected state")
-	return deployState{}, nil
+	return veleroDeploy, c.cl.Get(ctx, client.ObjectKey{Namespace: c.systemNamespace, Name: VeleroName}, veleroDeploy)
 }
 
 // https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollout_status.go#L76-L89
diff --git a/internal/controller/backup/oneshot.go b/internal/controller/backup/oneshot.go
index edcc4a3a5..a137b135b 100644
--- a/internal/controller/backup/oneshot.go
+++ b/internal/controller/backup/oneshot.go
@@ -16,15 +16,89 @@ package backup
 
 import (
 	"context"
+	"fmt"
 
-	kcmv1 "github.com/K0rdent/kcm/api/v1alpha1"
+	velerov1api "github.com/zerospiel/velero/pkg/apis/velero/v1"
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/equality"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	ctrl "sigs.k8s.io/controller-runtime"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+
+	kcmv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
 )
 
-func (*Config) ReconcileBackup(ctx context.Context, backup *kcmv1.Backup) error {
+func (c *Config) ReconcileBackup(ctx context.Context, backup *kcmv1alpha1.ManagementBackup) (ctrl.Result, error) {
+	zeroResult := ctrl.Result{}
+
 	if backup == nil {
-		return nil
+		return zeroResult, nil
+	}
+
+	l := ctrl.LoggerFrom(ctx).WithName("single-reconciler")
+
+	if backup.Status.Reference == nil { // backup is not yet created
+		l.Info("Creating a single ManagementBackup")
+
+		templateSpec, err := c.getBackupTemplateSpec(ctx)
+		if err != nil {
+			return zeroResult, fmt.Errorf("failed to construct velero backup spec: %w", err)
+		}
+
+		veleroBackup := &velerov1api.Backup{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      backup.Name,
+				Namespace: c.systemNamespace,
+			},
+			Spec: *templateSpec,
+		}
+
+		_ = controllerutil.SetControllerReference(backup, veleroBackup, c.scheme, controllerutil.WithBlockOwnerDeletion(false))
+
+		if err := c.cl.Create(ctx, veleroBackup); client.IgnoreAlreadyExists(err) != nil { // avoid err-loop on status update error
+			return zeroResult, fmt.Errorf("failed to create velero Backup: %w", err)
+		}
+
+		l.Info("Initial backup has been created")
+
+		backup.Status.Reference = &corev1.ObjectReference{
+			APIVersion: velerov1api.SchemeGroupVersion.String(),
+			Kind:       "Backup",
+			Namespace:  veleroBackup.Namespace,
+			Name:       veleroBackup.Name,
+		}
+
+		if err := c.cl.Status().Update(ctx, backup); err != nil {
+			return zeroResult, fmt.Errorf("failed to update backup status with updated reference: %w", err)
+		}
+
+		// velero schedule has been created, nothing yet to update here
+		return zeroResult, nil
+	}
+
+	l.Info("Collecting onetime backup status")
+
+	// if backup does not exist then it has not been run yet
+	veleroBackup := new(velerov1api.Backup)
+	if err := c.cl.Get(ctx, client.ObjectKey{
+		Name:      backup.Name,
+		Namespace: c.systemNamespace,
+	}, veleroBackup); err != nil {
+		return zeroResult, fmt.Errorf("failed to get velero Backup: %w", err)
+	}
+
+	// decrease API calls
+	if equality.Semantic.DeepEqual(backup.Status.GetLastBackupCopy(), veleroBackup.Status) {
+		l.V(1).Info("No new changes to show in the onetime ManagementBackup")
+		return ctrl.Result{RequeueAfter: c.objectsRequeueAfter}, nil
+	}
+
+	l.Info("Updating onetime backup status")
+	backup.Status.LastBackup = &veleroBackup.Status
+	if err := c.cl.Status().Update(ctx, backup); err != nil {
+		return zeroResult, fmt.Errorf("failed to update ManagementBackup %s status: %w", backup.Name, err)
 	}
 
-	_ = ctx
-	return nil
+	return zeroResult, nil
 }
diff --git a/internal/controller/backup/schedule.go b/internal/controller/backup/schedule.go
index 83305c8a4..2a97c810d 100644
--- a/internal/controller/backup/schedule.go
+++ b/internal/controller/backup/schedule.go
@@ -16,15 +16,202 @@ package backup
 
 import (
 	"context"
+	"fmt"
 
-	kcmv1 "github.com/K0rdent/kcm/api/v1alpha1"
+	cron "github.com/robfig/cron/v3"
+	velerov1api "github.com/zerospiel/velero/pkg/apis/velero/v1"
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/equality"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	ctrl "sigs.k8s.io/controller-runtime"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+
+	kcmv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
 )
 
-func (*Config) ReconcileScheduledBackup(ctx context.Context, schedule *kcmv1.Backup) error {
-	if schedule == nil {
+func (c *Config) ReconcileScheduledBackup(ctx context.Context, scheduledBackup *kcmv1alpha1.ManagementBackup, cronRaw string) (ctrl.Result, error) {
+	zeroResult := ctrl.Result{}
+
+	if scheduledBackup == nil {
+		return zeroResult, nil
+	}
+
+	l := ctrl.LoggerFrom(ctx).WithName("schedule-reconciler")
+
+	templateSpec, err := c.getBackupTemplateSpec(ctx)
+	if err != nil {
+		return zeroResult, fmt.Errorf("failed to construct velero backup template spec: %w", err)
+	}
+
+	if scheduledBackup.Status.Reference == nil {
+		if scheduledBackup.CreationTimestamp.IsZero() || scheduledBackup.UID == "" {
+			l.Info("Creating scheduled ManagementBackup")
+			if err := c.cl.Create(ctx, scheduledBackup); err != nil {
+				return zeroResult, fmt.Errorf("failed to create scheduled ManagementBackup: %w", err)
+			}
+		}
+
+		veleroSchedule := &velerov1api.Schedule{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:      scheduledBackup.Name,
+				Namespace: c.systemNamespace,
+			},
+			Spec: velerov1api.ScheduleSpec{
+				Template:                   *templateSpec,
+				Schedule:                   cronRaw,
+				UseOwnerReferencesInBackup: ref(true),
+				SkipImmediately:            ref(false),
+			},
+		}
+
+		_ = ctrl.SetControllerReference(scheduledBackup, veleroSchedule, c.scheme, controllerutil.WithBlockOwnerDeletion(false))
+
+		createErr := c.cl.Create(ctx, veleroSchedule)
+		isAlreadyExistsErr := apierrors.IsAlreadyExists(createErr)
+		if createErr != nil && !isAlreadyExistsErr {
+			return zeroResult, fmt.Errorf("failed to create velero Schedule: %w", createErr)
+		}
+
+		scheduledBackup.Status.Reference = &corev1.ObjectReference{
+			APIVersion: velerov1api.SchemeGroupVersion.String(),
+			Kind:       "Schedule",
+			Namespace:  veleroSchedule.Namespace,
+			Name:       veleroSchedule.Name,
+		}
+
+		if !isAlreadyExistsErr {
+			l.Info("Initial schedule has been created")
+			if err := c.cl.Status().Update(ctx, scheduledBackup); err != nil {
+				return zeroResult, fmt.Errorf("failed to update scheduled backup status with updated reference: %w", err)
+			}
+			// velero schedule has been created, nothing yet to update here
+			return zeroResult, nil
+		}
+
+		// velero schedule is already exists, scheduled-backup has been "restored", update its status
+	}
+
+	l.Info("Collecting scheduled backup status")
+
+	veleroSchedule := new(velerov1api.Schedule)
+	if err := c.cl.Get(ctx, client.ObjectKey{
+		Name:      scheduledBackup.Status.Reference.Name,
+		Namespace: scheduledBackup.Status.Reference.Namespace,
+	}, veleroSchedule); err != nil {
+		return zeroResult, fmt.Errorf("failed to get velero Schedule: %w", err)
+	}
+
+	if cronRaw != "" && veleroSchedule.Spec.Schedule != cronRaw {
+		l.Info("Velero Schedule has outdated crontab, updating", "current_crontab", veleroSchedule.Spec.Schedule, "expected_crontab", cronRaw)
+		originalSchedule := veleroSchedule.DeepCopy()
+		veleroSchedule.Spec.Schedule = cronRaw
+		if err := c.cl.Patch(ctx, veleroSchedule, client.MergeFrom(originalSchedule)); err != nil {
+			return zeroResult, fmt.Errorf("failed to update velero schedule %s with a new crontab '%s': %w", client.ObjectKeyFromObject(veleroSchedule), cronRaw, err)
+		}
+
+		return zeroResult, nil
+	}
+
+	if !equality.Semantic.DeepEqual(templateSpec.OrLabelSelectors, veleroSchedule.Spec.Template.OrLabelSelectors) {
+		l.Info("Velero Schedule has outdated template spec selectors, updating")
+		originalSchedule := veleroSchedule.DeepCopy()
+		veleroSchedule.Spec.Template = *templateSpec
+		if err := c.cl.Patch(ctx, veleroSchedule, client.MergeFrom(originalSchedule)); err != nil {
+			return zeroResult, fmt.Errorf("failed to update velero schedule %s with a new template selectors: %w", client.ObjectKeyFromObject(veleroSchedule), err)
+		}
+
+		return zeroResult, nil
+	}
+
+	// if backup does not exist then it has not been run yet
+	veleroBackup := new(velerov1api.Backup)
+	if !veleroSchedule.Status.LastBackup.IsZero() {
+		l.V(1).Info("Fetching velero Backup to sync its status")
+		if err := c.cl.Get(ctx, client.ObjectKey{
+			Name:      veleroSchedule.TimestampedName(veleroSchedule.Status.LastBackup.Time),
+			Namespace: scheduledBackup.Status.Reference.Namespace,
+		}, veleroBackup); client.IgnoreNotFound(err) != nil {
+			return zeroResult, fmt.Errorf("failed to get velero Backup: %w", err)
+		}
+	}
+
+	var nextAttempt *metav1.Time
+	if !veleroSchedule.Spec.Paused {
+		l.V(1).Info("Parsing crontab schedule", "crontab", cronRaw)
+		cronSchedule, err := cron.ParseStandard(cronRaw)
+		if err != nil {
+			return zeroResult, fmt.Errorf("failed to parse cron schedule %s: %w", cronRaw, err)
+		}
+
+		nextAttempt = getNextAttemptTime(veleroSchedule, cronSchedule)
+	}
+
+	// decrease API calls, on first .status.reference set the status itself is empty so no need to check it
+	{
+		if scheduledBackup.Status.NextAttempt.Equal(nextAttempt) &&
+			scheduledBackup.Status.SchedulePaused == veleroSchedule.Spec.Paused &&
+			equality.Semantic.DeepEqual(scheduledBackup.Status.GetScheduleCopy(), veleroSchedule.Status) &&
+			equality.Semantic.DeepEqual(scheduledBackup.Status.GetLastBackupCopy(), veleroBackup.Status) {
+			l.V(1).Info("No new changes to show in the scheduled ManagementBackup")
+			return ctrl.Result{RequeueAfter: c.objectsRequeueAfter}, nil
+		}
+	}
+
+	scheduledBackup.Status.Schedule = &veleroSchedule.Status
+	scheduledBackup.Status.NextAttempt = nextAttempt
+	scheduledBackup.Status.SchedulePaused = veleroSchedule.Spec.Paused
+	if !veleroBackup.CreationTimestamp.IsZero() { // exists
+		scheduledBackup.Status.LastBackup = &veleroBackup.Status
+	}
+
+	l.Info("Updating scheduled backup status")
+	if err := c.cl.Status().Update(ctx, scheduledBackup); err != nil {
+		return zeroResult, fmt.Errorf("failed to update status of the scheduled ManagementBackup %s: %w", scheduledBackup.Name, err)
+	}
+
+	return zeroResult, nil
+}
+
+// DisableSchedule sets pause to the referenced velero schedule.
+// Do nothing is ManagedBackup is already marked as paused.
+func (c *Config) DisableSchedule(ctx context.Context, scheduledBackup *kcmv1alpha1.ManagementBackup) error {
+	if scheduledBackup.Status.Reference == nil || scheduledBackup.Status.SchedulePaused { // sanity
 		return nil
 	}
-	_ = ctx
+
+	veleroSchedule := new(velerov1api.Schedule)
+	if err := c.cl.Get(ctx, client.ObjectKey{
+		Name:      scheduledBackup.Status.Reference.Name,
+		Namespace: scheduledBackup.Status.Reference.Namespace,
+	}, veleroSchedule); err != nil {
+		return fmt.Errorf("failed to get velero Schedule: %w", err)
+	}
+
+	original := veleroSchedule.DeepCopy()
+
+	veleroSchedule.Spec.Paused = true
+	if err := c.cl.Patch(ctx, veleroSchedule, client.MergeFrom(original)); err != nil {
+		return fmt.Errorf("failed to disable velero schedule: %w", err)
+	}
+
+	ctrl.LoggerFrom(ctx).Info("Disabled Velero Schedule")
 
 	return nil
 }
+
+func getNextAttemptTime(schedule *velerov1api.Schedule, cronSchedule cron.Schedule) *metav1.Time {
+	lastBackupTime := schedule.CreationTimestamp.Time
+	if schedule.Status.LastBackup != nil {
+		lastBackupTime = schedule.Status.LastBackup.Time
+	}
+
+	if schedule.Status.LastSkipped != nil && schedule.Status.LastSkipped.After(lastBackupTime) {
+		lastBackupTime = schedule.Status.LastSkipped.Time
+	}
+
+	return &metav1.Time{Time: cronSchedule.Next(lastBackupTime)}
+}
+
+func ref[T any](v T) *T { return &v }
diff --git a/internal/controller/backup/type.go b/internal/controller/backup/type.go
index c3d51bc88..2644f0c4c 100644
--- a/internal/controller/backup/type.go
+++ b/internal/controller/backup/type.go
@@ -16,6 +16,7 @@ package backup
 
 import (
 	"context"
+	"errors"
 	"fmt"
 
 	velerov1api "github.com/zerospiel/velero/pkg/apis/velero/v1"
@@ -24,39 +25,48 @@ import (
 	kcmv1 "github.com/K0rdent/kcm/api/v1alpha1"
 )
 
+// Typ indicates type of a ManagementBackup object.
 type Typ uint
 
 const (
+	// TypeNone indicates unknown type.
 	TypeNone Typ = iota
+	// TypeSchedule indicates Schedule type.
 	TypeSchedule
+	// TypeSchedule indicates Backup oneshot type.
 	TypeBackup
 )
 
-func (c *Config) GetBackupType(ctx context.Context, instance *kcmv1.Backup, reqName string) (Typ, error) {
-	if instance.Status.Reference != nil {
-		gv := velerov1api.SchemeGroupVersion
-		switch instance.Status.Reference.GroupVersionKind() {
-		case gv.WithKind("Schedule"):
-			return TypeSchedule, nil
-		case gv.WithKind("Backup"):
-			return TypeBackup, nil
-		default:
-			return TypeNone, fmt.Errorf("unexpected kind %s in the backup reference", instance.Status.Reference.Kind)
-		}
+// GetType returns type of the ManagementBackup, returns TypeNone if undefined.
+func GetType(instance *hmcv1alpha1.ManagementBackup) Typ {
+	if instance.Status.Reference == nil {
+		return TypeNone
 	}
 
-	mgmts := new(kcmv1.ManagementList)
-	if err := c.cl.List(ctx, mgmts, client.Limit(1)); err != nil {
-		return TypeNone, fmt.Errorf("failed to list Management: %w", err)
+	gv := velerov1api.SchemeGroupVersion
+	switch instance.Status.Reference.GroupVersionKind() {
+	case gv.WithKind("Schedule"):
+		return TypeSchedule
+	case gv.WithKind("Backup"):
+		return TypeBackup
+	default:
+		return TypeNone
 	}
+}
 
-	if len(mgmts.Items) == 0 { // nothing to do in such case for both scheduled/non-scheduled backups
-		return TypeNone, nil
+// ErrNoManagementExists is a sentinel error indicating no Management object exists.
+var ErrNoManagementExists = errors.New("no Management object exists")
+
+// GetManagement fetches a Management object.
+func (c *Config) GetManagement(ctx context.Context) (*hmcv1alpha1.Management, error) {
+	mgmts := new(hmcv1alpha1.ManagementList)
+	if err := c.cl.List(ctx, mgmts, client.Limit(1)); err != nil {
+		return nil, fmt.Errorf("failed to list Management: %w", err)
 	}
 
-	if reqName == mgmts.Items[0].Name { // mgmt name == scheduled-backup
-		return TypeSchedule, nil
+	if len(mgmts.Items) == 0 {
+		return nil, ErrNoManagementExists
 	}
 
-	return TypeBackup, nil
+	return &mgmts.Items[0], nil
 }
diff --git a/internal/controller/backup_controller.go b/internal/controller/backup_controller.go
deleted file mode 100644
index ccb734205..000000000
--- a/internal/controller/backup_controller.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2024
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controller
-
-import (
-	"context"
-	"fmt"
-	"os"
-	"strings"
-	"time"
-
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/client-go/rest"
-	ctrl "sigs.k8s.io/controller-runtime"
-	"sigs.k8s.io/controller-runtime/pkg/builder"
-	"sigs.k8s.io/controller-runtime/pkg/client"
-	"sigs.k8s.io/controller-runtime/pkg/event"
-	"sigs.k8s.io/controller-runtime/pkg/handler"
-	"sigs.k8s.io/controller-runtime/pkg/predicate"
-
-	kcmv1 "github.com/K0rdent/kcm/api/v1alpha1"
-	"github.com/K0rdent/kcm/internal/controller/backup"
-)
-
-// BackupReconciler reconciles a Backup object
-type BackupReconciler struct {
-	client.Client
-
-	kc *rest.Config
-
-	image           string
-	systemNamespace string
-	features        []string
-
-	requeueAfter time.Duration
-}
-
-func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
-	l := ctrl.LoggerFrom(ctx)
-
-	backupInstance := new(kcmv1.Backup)
-	err := r.Client.Get(ctx, req.NamespacedName, backupInstance)
-	if ierr := client.IgnoreNotFound(err); ierr != nil {
-		l.Error(ierr, "unable to fetch Backup")
-		return ctrl.Result{}, ierr
-	}
-
-	bcfg := backup.NewConfig(r.Client, r.kc,
-		backup.WithFeatures(r.features...),
-		backup.WithRequeueAfter(r.requeueAfter),
-		backup.WithVeleroImage(r.image),
-		backup.WithVeleroSystemNamespace(r.systemNamespace),
-	)
-
-	if apierrors.IsNotFound(err) {
-		// if non-scheduled backup is not found(deleted), then just skip the error
-		// if scheduled backup is not found, then it either does not exist yet
-		// and we should create it, or it has been removed;
-		// if the latter is the case, we either should re-create it once again
-		// or do nothing if mgmt backup is disabled
-		mgmt := new(kcmv1.Management)
-		if err := r.Client.Get(ctx, req.NamespacedName, mgmt); err != nil {
-			l.Error(err, "unable to fetch Management")
-			return ctrl.Result{}, client.IgnoreNotFound(err)
-		}
-
-		if !mgmt.Spec.Backup.Enabled {
-			l.Info("Management backup is disabled, nothing to do")
-			return ctrl.Result{}, nil
-		}
-
-		l.Info("Reconciling velero stack")
-		installRes, err := bcfg.ReconcileVeleroInstallation(ctx)
-		if err != nil {
-			l.Error(err, "velero installation")
-			return ctrl.Result{}, err
-		}
-		if installRes.Requeue || installRes.RequeueAfter > 0 {
-			return installRes, nil
-		}
-
-		// required during creation
-		backupInstance.Name = req.Name
-		backupInstance.Namespace = req.Namespace
-	}
-
-	btype, err := bcfg.GetBackupType(ctx, backupInstance, req.Name)
-	if err != nil {
-		l.Error(err, "failed to determine backup type")
-		return ctrl.Result{}, err
-	}
-
-	switch btype {
-	case backup.TypeNone:
-		l.Info("There are nothing to reconcile, management does not exists")
-		// TODO: do we need to reconcile/delete/pause schedules in this case?
-		return ctrl.Result{}, nil
-	case backup.TypeBackup:
-		return ctrl.Result{}, bcfg.ReconcileBackup(ctx, backupInstance)
-	case backup.TypeSchedule:
-		return ctrl.Result{}, bcfg.ReconcileScheduledBackup(ctx, backupInstance)
-	}
-
-	return ctrl.Result{}, nil
-}
-
-// SetupWithManager sets up the controller with the Manager.
-func (r *BackupReconciler) SetupWithManager(mgr ctrl.Manager) error {
-	r.kc = mgr.GetConfig()
-
-	const reqDuration = "BACKUP_CTRL_REQUEUE_DURATION"
-	r.features = strings.Split(strings.ReplaceAll(os.Getenv("BACKUP_FEATURES"), ", ", ","), ",")
-	r.systemNamespace = os.Getenv("BACKUP_SYSTEM_NAMESPACE")
-	r.image = os.Getenv("BACKUP_BASIC_IMAGE")
-	d, err := time.ParseDuration(os.Getenv(reqDuration))
-	if err != nil {
-		return fmt.Errorf("failed to parse env %s duration: %w", reqDuration, err)
-	}
-	r.requeueAfter = d
-
-	return ctrl.NewControllerManagedBy(mgr).
-		For(&kcmv1.Backup{}).
-		Watches(&kcmv1.Management{}, handler.EnqueueRequestsFromMapFunc(func(_ context.Context, o client.Object) []ctrl.Request {
-			return []ctrl.Request{{NamespacedName: client.ObjectKeyFromObject(o)}}
-		}), builder.WithPredicates( // watch mgmt.spec.backup to manage the (only) scheduled Backup
-			predicate.Funcs{
-				GenericFunc: func(event.TypedGenericEvent[client.Object]) bool { return false },
-				DeleteFunc:  func(event.TypedDeleteEvent[client.Object]) bool { return false },
-				CreateFunc: func(tce event.TypedCreateEvent[client.Object]) bool {
-					mgmt, ok := tce.Object.(*kcmv1.Management)
-					if !ok {
-						return false
-					}
-
-					return mgmt.Spec.Backup.Enabled
-				},
-				UpdateFunc: func(tue event.TypedUpdateEvent[client.Object]) bool {
-					oldMgmt, ok := tue.ObjectOld.(*kcmv1.Management)
-					if !ok {
-						return false
-					}
-
-					newMgmt, ok := tue.ObjectNew.(*kcmv1.Management)
-					if !ok {
-						return false
-					}
-
-					return (newMgmt.Spec.Backup.Enabled != oldMgmt.Spec.Backup.Enabled ||
-						newMgmt.Spec.Backup.Schedule != oldMgmt.Spec.Backup.Schedule)
-				},
-			},
-		)).
-		Complete(r)
-}
diff --git a/internal/controller/credential_controller.go b/internal/controller/credential_controller.go
index 7faf56d9e..0bd21f355 100644
--- a/internal/controller/credential_controller.go
+++ b/internal/controller/credential_controller.go
@@ -20,6 +20,7 @@ import (
 	"fmt"
 	"time"
 
+	corev1 "k8s.io/api/core/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	apimeta "k8s.io/apimachinery/pkg/api/meta"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -31,11 +32,11 @@ import (
 	"github.com/K0rdent/kcm/internal/utils"
 )
 
-const defaultSyncPeriod = 15 * time.Minute
-
 // CredentialReconciler reconciles a Credential object
 type CredentialReconciler struct {
 	client.Client
+	SystemNamespace string
+	syncPeriod      time.Duration
 }
 
 func (r *CredentialReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, err error) {
@@ -81,6 +82,16 @@ func (r *CredentialReconciler) Reconcile(ctx context.Context, req ctrl.Request)
 		return ctrl.Result{}, err
 	}
 
+	if err := utils.AddHMCComponentLabel(ctx, r.Client, clIdty); err != nil {
+		l.Error(err, "adding component label to the ClusterIdentity")
+		return ctrl.Result{}, err
+	}
+
+	if err := r.updateAWSReferencedSecret(ctx, cred.Spec.IdentityRef); err != nil {
+		l.Error(err, "adding component label to the Secret from the Identity Reference")
+		return ctrl.Result{}, err
+	}
+
 	apimeta.SetStatusCondition(cred.GetConditions(), metav1.Condition{
 		Type:    kcm.CredentialReadyCondition,
 		Status:  metav1.ConditionTrue,
@@ -88,9 +99,7 @@ func (r *CredentialReconciler) Reconcile(ctx context.Context, req ctrl.Request)
 		Message: "Credential is ready",
 	})
 
-	return ctrl.Result{
-		RequeueAfter: defaultSyncPeriod,
-	}, nil
+	return ctrl.Result{RequeueAfter: r.syncPeriod}, nil
 }
 
 func (r *CredentialReconciler) updateStatus(ctx context.Context, cred *kcm.Credential) error {
@@ -109,8 +118,118 @@ func (r *CredentialReconciler) updateStatus(ctx context.Context, cred *kcm.Crede
 	return nil
 }
 
+// updateAWSReferencedSecret updates referenced AWS*Identity Secret with the component label.
+// The component label on such Secret is required for a proper management backup.
+func (r *CredentialReconciler) updateAWSReferencedSecret(ctx context.Context, idRef *corev1.ObjectReference) error {
+	// avoid "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" dependency
+	const (
+		awsClusterControllerIdentityKind = "AWSClusterControllerIdentity"
+		awsClusterStaticIdentityKind     = "AWSClusterStaticIdentity"
+		awsClusterRoleIdentityKind       = "AWSClusterRoleIdentity"
+	)
+
+	switch idRef.Kind {
+	case awsClusterControllerIdentityKind: // no secret refs
+		return nil
+	case awsClusterRoleIdentityKind:
+		return r.updateAWSSecretFromClusterRoleIdentity(ctx, idRef)
+	case awsClusterStaticIdentityKind:
+		return r.updateAWSSecretFromClusterStaticIdentity(ctx, idRef)
+	}
+
+	return nil
+}
+
+func (r *CredentialReconciler) updateAWSSecretFromClusterRoleIdentity(ctx context.Context, initialReference *corev1.ObjectReference) error {
+	const (
+		awsClusterControllerIdentityKind = "AWSClusterControllerIdentity"
+		awsClusterStaticIdentityKind     = "AWSClusterStaticIdentity"
+	)
+
+	kind, name := initialReference.Kind, initialReference.Name
+
+	const limitRefs = 3 // consider an error if more nested refs
+	for range limitRefs {
+		clRoleIdentity := new(unstructured.Unstructured)
+		clRoleIdentity.SetAPIVersion(initialReference.APIVersion)
+
+		clRoleIdentity.SetKind(kind)
+		clRoleIdentity.SetName(name)
+
+		if err := r.Client.Get(ctx, client.ObjectKey{Name: name}, clRoleIdentity); err != nil { // cluster-scoped
+			return fmt.Errorf("failed to get %s identity reference: %w", kind, err)
+		}
+
+		srcIdentityRefKind, ok, err := unstructured.NestedString(clRoleIdentity.Object, "spec", "sourceIdentityRef", "kind")
+		if err != nil {
+			return fmt.Errorf("failed to extract .spec.sourceIdentityRef.kind from %s %s: %w", clRoleIdentity.GetKind(), clRoleIdentity.GetName(), err)
+		}
+		if !ok {
+			return nil // sanity
+		}
+
+		srcIdentityRefName, ok, err := unstructured.NestedString(clRoleIdentity.Object, "spec", "sourceIdentityRef", "name")
+		if err != nil {
+			return fmt.Errorf("failed to extract .spec.sourceIdentityRef.name from %s %s", clRoleIdentity.GetKind(), clRoleIdentity.GetName())
+		}
+		if !ok {
+			return nil // sanity
+		}
+
+		kind, name = srcIdentityRefKind, srcIdentityRefName
+
+		switch srcIdentityRefKind {
+		case awsClusterControllerIdentityKind: // no secret refs
+			return nil
+		case awsClusterStaticIdentityKind:
+			newReference := &corev1.ObjectReference{
+				APIVersion: initialReference.APIVersion,
+				Kind:       kind,
+				Name:       name,
+			}
+			return r.updateAWSSecretFromClusterStaticIdentity(ctx, newReference)
+		}
+		// nested refs case, continue
+	}
+
+	return fmt.Errorf("failed to determine the secrets data from the %s %s identity reference", initialReference.Kind, initialReference.Name)
+}
+
+func (r *CredentialReconciler) updateAWSSecretFromClusterStaticIdentity(ctx context.Context, reference *corev1.ObjectReference) error {
+	clStaticIdentity := new(unstructured.Unstructured)
+	clStaticIdentity.SetAPIVersion(reference.APIVersion)
+	clStaticIdentity.SetKind(reference.Kind)
+	clStaticIdentity.SetName(reference.Name)
+
+	if err := r.Client.Get(ctx, client.ObjectKey{Name: reference.Name}, clStaticIdentity); err != nil { // cluster-scoped
+		return fmt.Errorf("failed to get %s identity reference: %w", reference.Kind, err)
+	}
+
+	secretName, ok, err := unstructured.NestedString(clStaticIdentity.Object, "spec", "secretRef")
+	if err != nil {
+		return fmt.Errorf("failed to extract .spec.secretRef from %s %s", clStaticIdentity.GetKind(), clStaticIdentity.GetName())
+	}
+	if !ok {
+		return nil // nothing to do
+	}
+
+	key := client.ObjectKey{Name: secretName, Namespace: r.SystemNamespace}
+	secret := new(corev1.Secret)
+	if err := r.Client.Get(ctx, key, secret); err != nil {
+		return fmt.Errorf("failed to get Secret %s referenced in %s %s: %w", key, clStaticIdentity.GetKind(), clStaticIdentity.GetName(), err)
+	}
+
+	if err := utils.AddHMCComponentLabel(ctx, r.Client, secret); err != nil {
+		return fmt.Errorf("failed to add component label: %w", err)
+	}
+
+	return nil
+}
+
 // SetupWithManager sets up the controller with the Manager.
 func (r *CredentialReconciler) SetupWithManager(mgr ctrl.Manager) error {
+	r.syncPeriod = 15 * time.Minute
+
 	return ctrl.NewControllerManagedBy(mgr).
 		For(&kcm.Credential{}).
 		Complete(r)
diff --git a/internal/controller/management_backup_controller.go b/internal/controller/management_backup_controller.go
new file mode 100644
index 000000000..8a369b09d
--- /dev/null
+++ b/internal/controller/management_backup_controller.go
@@ -0,0 +1,324 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package controller
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"os"
+	"slices"
+	"strings"
+	"time"
+
+	velerov1api "github.com/zerospiel/velero/pkg/apis/velero/v1"
+	appsv1 "k8s.io/api/apps/v1"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/util/workqueue"
+	ctrl "sigs.k8s.io/controller-runtime"
+	"sigs.k8s.io/controller-runtime/pkg/builder"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/event"
+	"sigs.k8s.io/controller-runtime/pkg/handler"
+	"sigs.k8s.io/controller-runtime/pkg/predicate"
+
+	hmcv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
+	"github.com/K0rdent/kcm/internal/controller/backup"
+)
+
+// ManagementBackupReconciler reconciles a ManagementBackup object
+type ManagementBackupReconciler struct {
+	client.Client
+
+	config *backup.Config
+}
+
+func (r *ManagementBackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+	l := ctrl.LoggerFrom(ctx)
+
+	backupInstance := new(hmcv1alpha1.ManagementBackup)
+	err := r.Client.Get(ctx, req.NamespacedName, backupInstance)
+	if ierr := client.IgnoreNotFound(err); ierr != nil {
+		l.Error(ierr, "unable to fetch ManagementBackup")
+		return ctrl.Result{}, ierr
+	}
+
+	instanceIsNotFound := apierrors.IsNotFound(err)
+
+	mgmt, err := r.config.GetManagement(ctx)
+	if err != nil && !errors.Is(err, backup.ErrNoManagementExists) { // error during list
+		return ctrl.Result{}, err
+	}
+
+	btype := backup.GetType(backupInstance)
+	if errors.Is(err, backup.ErrNoManagementExists) {
+		// no mgmt, if backup is not found then nothing to do
+		if instanceIsNotFound {
+			l.Info("No Management object exists, ManagementBackup object has not been found, nothing to do")
+			return ctrl.Result{}, nil
+		}
+
+		// backup exists, disable if schedule and active, otherwise proceed with reconciliation (status updates)
+		if btype == backup.TypeSchedule {
+			if err := r.config.DisableSchedule(ctx, backupInstance); err != nil {
+				l.Error(err, "failed to disable scheduled ManagementBackup")
+				return ctrl.Result{}, err
+			}
+		}
+	}
+
+	requestEqualsMgmt := mgmt != nil && req.Name == mgmt.Name && req.Namespace == mgmt.Namespace
+	if instanceIsNotFound { // mgmt exists
+		if !requestEqualsMgmt { // oneshot backup
+			l.Info("ManagementBackup object has not been found, nothing to do")
+			return ctrl.Result{}, nil
+		}
+
+		btype = backup.TypeSchedule
+
+		// required during creation
+		backupInstance.Name = req.Name
+		backupInstance.Namespace = req.Namespace
+	}
+
+	if requestEqualsMgmt {
+		l.Info("Reconciling velero stack parts")
+		installRes, err := r.config.ReconcileVeleroInstallation(ctx, mgmt)
+		if err != nil {
+			l.Error(err, "velero stack installation")
+			return ctrl.Result{}, err
+		}
+
+		if !installRes.IsZero() {
+			return installRes, nil
+		}
+	}
+
+	if btype == backup.TypeNone {
+		if requestEqualsMgmt {
+			btype = backup.TypeSchedule
+		} else {
+			btype = backup.TypeBackup
+		}
+	}
+
+	switch btype {
+	case backup.TypeBackup:
+		return r.config.ReconcileBackup(ctx, backupInstance)
+	case backup.TypeSchedule:
+		return r.config.ReconcileScheduledBackup(ctx, backupInstance, mgmt.GetBackupSchedule())
+	case backup.TypeNone:
+		fallthrough
+	default:
+		return ctrl.Result{}, nil
+	}
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *ManagementBackupReconciler) SetupWithManager(mgr ctrl.Manager) error {
+	var err error
+	r.config, err = parseEnvsToConfig(r.Client, mgr)
+	if err != nil {
+		return fmt.Errorf("failed to parse envs: %w", err)
+	}
+
+	// NOTE: without installed CRDs it is impossible to initialize informers
+	// and the uncached client is required because it this point the manager
+	// still has not started the cache yet
+	uncachedCl, err := client.New(mgr.GetConfig(), client.Options{Cache: nil})
+	if err != nil {
+		return fmt.Errorf("failed to create uncached client: %w", err)
+	}
+
+	if err := r.config.InstallVeleroCRDs(uncachedCl); err != nil {
+		return fmt.Errorf("failed to install velero CRDs: %w", err)
+	}
+
+	getManagementNameIfEnabled := func(ctx context.Context) ctrl.Request {
+		mgmt, err := r.config.GetManagement(ctx)
+		if err != nil || !mgmt.Spec.Backup.Enabled {
+			return ctrl.Request{}
+		}
+
+		return ctrl.Request{NamespacedName: client.ObjectKeyFromObject(mgmt)}
+	}
+
+	enqueueIfManagementEnabled := func(req ctrl.Request) []ctrl.Request {
+		if req.Name == "" && req.Namespace == "" {
+			return nil
+		}
+		return []ctrl.Request{req}
+	}
+
+	return ctrl.NewControllerManagedBy(mgr).
+		For(&hmcv1alpha1.ManagementBackup{}).
+		Owns(&velerov1api.Backup{},
+			builder.WithPredicates(
+				predicate.Funcs{
+					GenericFunc: func(event.TypedGenericEvent[client.Object]) bool { return false },
+					DeleteFunc:  func(event.TypedDeleteEvent[client.Object]) bool { return false },
+				},
+			),
+			builder.MatchEveryOwner,
+		).
+		Owns(&velerov1api.Schedule{}, builder.WithPredicates(
+			predicate.Funcs{
+				GenericFunc: func(event.TypedGenericEvent[client.Object]) bool { return false },
+				DeleteFunc:  func(event.TypedDeleteEvent[client.Object]) bool { return false },
+			},
+		)).
+		Watches(&velerov1api.BackupStorageLocation{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request {
+			return enqueueIfManagementEnabled(getManagementNameIfEnabled(ctx))
+		}), builder.WithPredicates(
+			predicate.Funcs{
+				GenericFunc: func(event.TypedGenericEvent[client.Object]) bool { return false },
+				DeleteFunc:  func(event.TypedDeleteEvent[client.Object]) bool { return false },
+				CreateFunc:  func(event.TypedCreateEvent[client.Object]) bool { return true },
+				UpdateFunc: func(tue event.TypedUpdateEvent[client.Object]) bool {
+					oldBSL, ok := tue.ObjectOld.(*velerov1api.BackupStorageLocation)
+					if !ok {
+						return false
+					}
+
+					newBSL, ok := tue.ObjectNew.(*velerov1api.BackupStorageLocation)
+					if !ok {
+						return false
+					}
+
+					return newBSL.Spec.Provider != oldBSL.Spec.Provider
+				},
+			},
+		)).
+		Watches(&hmcv1alpha1.Management{}, handler.Funcs{
+			GenericFunc: nil,
+			DeleteFunc: func(_ context.Context, tde event.TypedDeleteEvent[client.Object], q workqueue.TypedRateLimitingInterface[ctrl.Request]) {
+				q.Add(ctrl.Request{NamespacedName: client.ObjectKeyFromObject(tde.Object)}) // disable schedule on mgmt absence
+			},
+			CreateFunc: func(_ context.Context, tce event.TypedCreateEvent[client.Object], q workqueue.TypedRateLimitingInterface[ctrl.Request]) {
+				mgmt, ok := tce.Object.(*hmcv1alpha1.Management)
+				if !ok || !mgmt.Spec.Backup.Enabled {
+					return
+				}
+
+				q.Add(ctrl.Request{NamespacedName: client.ObjectKeyFromObject(tce.Object)})
+			},
+			UpdateFunc: func(_ context.Context, tue event.TypedUpdateEvent[client.Object], q workqueue.TypedRateLimitingInterface[ctrl.Request]) {
+				oldMgmt, ok := tue.ObjectOld.(*hmcv1alpha1.Management)
+				if !ok {
+					return
+				}
+
+				newMgmt, ok := tue.ObjectNew.(*hmcv1alpha1.Management)
+				if !ok {
+					return
+				}
+
+				if newMgmt.Spec.Backup.Enabled == oldMgmt.Spec.Backup.Enabled &&
+					newMgmt.Spec.Backup.Schedule == oldMgmt.Spec.Backup.Schedule {
+					return
+				}
+
+				q.Add(ctrl.Request{NamespacedName: client.ObjectKeyFromObject(tue.ObjectNew)})
+			},
+		}).
+		Watches(&appsv1.Deployment{}, handler.Funcs{
+			GenericFunc: nil,
+			DeleteFunc:  nil,
+			CreateFunc:  nil,
+			UpdateFunc: func(ctx context.Context, tue event.TypedUpdateEvent[client.Object], q workqueue.TypedRateLimitingInterface[ctrl.Request]) {
+				if tue.ObjectNew.GetNamespace() != r.config.GetVeleroSystemNamespace() || tue.ObjectNew.GetName() != backup.VeleroName {
+					return
+				}
+
+				q.Add(getManagementNameIfEnabled(ctx))
+			},
+		}).
+		Watches(&hmcv1alpha1.ClusterDeployment{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request {
+			return enqueueIfManagementEnabled(getManagementNameIfEnabled(ctx))
+		}), builder.WithPredicates(
+			predicate.Funcs{
+				GenericFunc: func(event.TypedGenericEvent[client.Object]) bool { return false },
+				CreateFunc:  func(event.TypedCreateEvent[client.Object]) bool { return true },
+				DeleteFunc:  func(event.TypedDeleteEvent[client.Object]) bool { return true },
+				UpdateFunc: func(tue event.TypedUpdateEvent[client.Object]) bool {
+					oldObj, ok := tue.ObjectOld.(*hmcv1alpha1.ClusterDeployment)
+					if !ok {
+						return false
+					}
+
+					newObj, ok := tue.ObjectNew.(*hmcv1alpha1.ClusterDeployment)
+					if !ok {
+						return false
+					}
+
+					return newObj.Spec.Template != oldObj.Spec.Template
+				},
+			},
+		)).
+		Watches(&hmcv1alpha1.ClusterTemplate{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, _ client.Object) []ctrl.Request {
+			return enqueueIfManagementEnabled(getManagementNameIfEnabled(ctx))
+		}), builder.WithPredicates(
+			predicate.Funcs{
+				GenericFunc: func(event.TypedGenericEvent[client.Object]) bool { return false },
+				CreateFunc:  func(event.TypedCreateEvent[client.Object]) bool { return true },
+				DeleteFunc:  func(event.TypedDeleteEvent[client.Object]) bool { return true },
+				UpdateFunc: func(tue event.TypedUpdateEvent[client.Object]) bool {
+					oldObj, ok := tue.ObjectOld.(*hmcv1alpha1.ClusterTemplate)
+					if !ok {
+						return false
+					}
+
+					newObj, ok := tue.ObjectNew.(*hmcv1alpha1.ClusterTemplate)
+					if !ok {
+						return false
+					}
+
+					return !slices.Equal(oldObj.Status.Providers, newObj.Status.Providers)
+				},
+			},
+		)).
+		Complete(r)
+}
+
+func parseEnvsToConfig(cl client.Client, mgr interface {
+	GetScheme() *runtime.Scheme
+	GetConfig() *rest.Config
+},
+) (*backup.Config, error) {
+	const (
+		installationReqDurationEnv = "BACKUP_CTRL_INSTALL_READINESS_REQUEUE_DURATION"
+		reqDurationEnv             = "BACKUP_CTRL_REQUEUE_DURATION"
+	)
+	installationRequeueAfter, err := time.ParseDuration(os.Getenv(installationReqDurationEnv))
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse env %s duration: %w", installationReqDurationEnv, err)
+	}
+
+	objectsRequeueAfter, err := time.ParseDuration(os.Getenv(reqDurationEnv))
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse env %s duration: %w", reqDurationEnv, err)
+	}
+
+	return backup.NewConfig(cl, mgr.GetConfig(), mgr.GetScheme(),
+		backup.WithFeatures(strings.Split(strings.ReplaceAll(os.Getenv("BACKUP_FEATURES"), ", ", ","), ",")...),
+		backup.WithInstallationRequeueAfter(installationRequeueAfter),
+		backup.WithObjectsRequeueAfter(objectsRequeueAfter),
+		backup.WithVeleroImage(os.Getenv("BACKUP_BASIC_IMAGE")),
+		backup.WithVeleroSystemNamespace(os.Getenv("BACKUP_SYSTEM_NAMESPACE")),
+		backup.WithPluginImages(strings.Split(strings.ReplaceAll(os.Getenv("BACKUP_PLUGIN_IMAGES"), ", ", ","), ",")...),
+	), nil
+}
diff --git a/internal/controller/backup_controller_test.go b/internal/controller/management_backup_controller_test.go
similarity index 87%
rename from internal/controller/backup_controller_test.go
rename to internal/controller/management_backup_controller_test.go
index f224ada89..7eb14495f 100644
--- a/internal/controller/backup_controller_test.go
+++ b/internal/controller/management_backup_controller_test.go
@@ -15,34 +15,30 @@
 package controller
 
 import (
-	"context"
-
 	. "github.com/onsi/ginkgo/v2"
 	. "github.com/onsi/gomega"
 	"k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/types"
 
-	kcmv1 "github.com/K0rdent/kcm/api/v1alpha1"
+	hmcmirantiscomv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
 )
 
 var _ = Describe("Backup Controller", func() {
 	Context("When reconciling a resource", func() {
 		const resourceName = "test-resource"
 
-		ctx := context.Background()
-
 		typeNamespacedName := types.NamespacedName{
 			Name:      resourceName,
 			Namespace: metav1.NamespaceAll,
 		}
-		backup := &kcmv1.Backup{}
+		backup := &hmcmirantiscomv1alpha1.ManagementBackup{}
 
 		BeforeEach(func() {
 			By("creating the custom resource for the Kind Backup")
 			err := k8sClient.Get(ctx, typeNamespacedName, backup)
 			if err != nil && errors.IsNotFound(err) {
-				resource := &kcmv1.Backup{
+				resource := &hmcmirantiscomv1alpha1.ManagementBackup{
 					ObjectMeta: metav1.ObjectMeta{
 						Name:      resourceName,
 						Namespace: metav1.NamespaceAll,
@@ -53,7 +49,7 @@ var _ = Describe("Backup Controller", func() {
 		})
 
 		AfterEach(func() {
-			resource := &kcmv1.Backup{}
+			resource := &hmcmirantiscomv1alpha1.ManagementBackup{}
 			err := k8sClient.Get(ctx, typeNamespacedName, resource)
 			Expect(err).NotTo(HaveOccurred())
 
@@ -63,7 +59,7 @@ var _ = Describe("Backup Controller", func() {
 
 		It("should successfully reconcile the resource", func() {
 			By("Reconciling the created resource")
-			controllerReconciler := &BackupReconciler{
+			controllerReconciler := &ManagementBackupReconciler{
 				Client: k8sClient,
 			}
 			_ = controllerReconciler
diff --git a/internal/credspropagation/common.go b/internal/credspropagation/common.go
index c61273c15..7eeb4e54c 100644
--- a/internal/credspropagation/common.go
+++ b/internal/credspropagation/common.go
@@ -59,6 +59,9 @@ func makeSecret(name string, data map[string][]byte) *corev1.Secret {
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      name,
 			Namespace: metav1.NamespaceSystem,
+			Labels: map[string]string{
+				hmc.GenericComponentLabelName: hmc.GenericComponentLabelValueHMC,
+			},
 		},
 		Data: data,
 	}
@@ -71,6 +74,9 @@ func makeConfigMap(name string, data map[string]string) *corev1.ConfigMap {
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      name,
 			Namespace: metav1.NamespaceSystem,
+			Labels: map[string]string{
+				hmc.GenericComponentLabelName: hmc.GenericComponentLabelValueHMC,
+			},
 		},
 		Data: data,
 	}
diff --git a/internal/utils/status/status.go b/internal/utils/status/status.go
index 28ca98e51..71b3c164d 100644
--- a/internal/utils/status/status.go
+++ b/internal/utils/status/status.go
@@ -36,12 +36,12 @@ func ConditionsFromUnstructured(unstrObj *unstructured.Unstructured) ([]metav1.C
 	// Iterate the status conditions and ensure each condition reports a "Ready"
 	// status.
 	unstrConditions, found, err := unstructured.NestedSlice(unstrObj.Object, "status", "conditions")
-	if !found {
-		return nil, fmt.Errorf("no status conditions found for %s: %s", objKind, objName)
-	}
 	if err != nil {
 		return nil, fmt.Errorf("failed to get status conditions for %s: %s: %w", objKind, objName, err)
 	}
+	if !found {
+		return nil, fmt.Errorf("no status conditions found for %s: %s", objKind, objName)
+	}
 
 	conditions := make([]metav1.Condition, 0, len(unstrConditions))
 
diff --git a/internal/webhook/clusterdeployment_webhook.go b/internal/webhook/clusterdeployment_webhook.go
index 601fd43f9..66fefd052 100644
--- a/internal/webhook/clusterdeployment_webhook.go
+++ b/internal/webhook/clusterdeployment_webhook.go
@@ -25,6 +25,8 @@ import (
 	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
 	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/runtime"
+	capz "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
+	capv "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1"
 	ctrl "sigs.k8s.io/controller-runtime"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -268,6 +270,8 @@ func isCredMatchTemplate(cred *kcmv1.Credential, template *kcmv1.ClusterTemplate
 		return fmt.Errorf("wrong kind of the ClusterIdentity %q for provider %q", idtyKind, provider)
 	}
 
+	const secretKind = "Secret"
+
 	for _, provider := range template.Status.Providers {
 		switch provider {
 		case "infrastructure-aws":
@@ -277,16 +281,16 @@ func isCredMatchTemplate(cred *kcmv1.Credential, template *kcmv1.ClusterTemplate
 				return errMsg(provider)
 			}
 		case "infrastructure-azure":
-			if idtyKind != "AzureClusterIdentity" &&
-				idtyKind != "Secret" {
+			if idtyKind != capz.AzureClusterIdentityKind &&
+				idtyKind != secretKind {
 				return errMsg(provider)
 			}
 		case "infrastructure-vsphere":
-			if idtyKind != "VSphereClusterIdentity" {
+			if idtyKind != string(capv.VSphereClusterIdentityKind) {
 				return errMsg(provider)
 			}
 		case "infrastructure-openstack", "infrastructure-internal":
-			if idtyKind != "Secret" {
+			if idtyKind != secretKind {
 				return errMsg(provider)
 			}
 		default:
diff --git a/internal/webhook/management_webhook_test.go b/internal/webhook/management_webhook_test.go
index dd46eb4b1..f23455bf1 100644
--- a/internal/webhook/management_webhook_test.go
+++ b/internal/webhook/management_webhook_test.go
@@ -446,13 +446,13 @@ func TestManagementDefault(t *testing.T) {
 	}{
 		{
 			name:     "should not set default backup schedule if already set",
-			input:    management.NewManagement(management.WithBackup(v1alpha1.ManagementBackup{Enabled: true, Schedule: "0"})),
-			expected: management.NewManagement(management.WithBackup(v1alpha1.ManagementBackup{Enabled: true, Schedule: "0"})),
+			input:    management.NewManagement(management.WithBackup(v1alpha1.Backup{Enabled: true, Schedule: "0"})),
+			expected: management.NewManagement(management.WithBackup(v1alpha1.Backup{Enabled: true, Schedule: "0"})),
 		},
 		{
 			name:     "should set every six hours default backup schedule if backup is enabled but not set",
-			input:    management.NewManagement(management.WithBackup(v1alpha1.ManagementBackup{Enabled: true})),
-			expected: management.NewManagement(management.WithBackup(v1alpha1.ManagementBackup{Enabled: true, Schedule: "0 */6 * * *"})),
+			input:    management.NewManagement(management.WithBackup(v1alpha1.Backup{Enabled: true})),
+			expected: management.NewManagement(management.WithBackup(v1alpha1.Backup{Enabled: true, Schedule: "0 */6 * * *"})),
 		},
 		{
 			name:     "should not set schedule if backup is disabled",
diff --git a/internal/webhook/managementbackup_webhook.go b/internal/webhook/managementbackup_webhook.go
new file mode 100644
index 000000000..02d4ef88b
--- /dev/null
+++ b/internal/webhook/managementbackup_webhook.go
@@ -0,0 +1,72 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package webhook
+
+import (
+	"context"
+	"fmt"
+
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	ctrl "sigs.k8s.io/controller-runtime"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/webhook"
+	"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+
+	hmcv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
+)
+
+type ManagementBackupValidator struct {
+	client.Client
+}
+
+// SetupWebhookWithManager will setup the manager to manage the webhooks
+func (v *ManagementBackupValidator) SetupWebhookWithManager(mgr ctrl.Manager) error {
+	v.Client = mgr.GetClient()
+	return ctrl.NewWebhookManagedBy(mgr).
+		For(&hmcv1alpha1.ManagementBackup{}).
+		WithValidator(v).
+		Complete()
+}
+
+var _ webhook.CustomValidator = &ManagementBackupValidator{}
+
+// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
+func (v *ManagementBackupValidator) ValidateCreate(ctx context.Context, _ runtime.Object) (admission.Warnings, error) {
+	return v.validateBackupEnabled(ctx)
+}
+
+func (v *ManagementBackupValidator) validateBackupEnabled(ctx context.Context) (admission.Warnings, error) {
+	mgmt, err := getManagement(ctx, v.Client)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get Management: %w", err)
+	}
+
+	if !mgmt.Spec.Backup.Enabled {
+		return admission.Warnings{"Management backup feature is disabled"}, apierrors.NewBadRequest("management backup is disabled, create or update of ManagementBackup objects disabled")
+	}
+
+	return nil, nil
+}
+
+// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
+func (v *ManagementBackupValidator) ValidateUpdate(ctx context.Context, _, _ runtime.Object) (admission.Warnings, error) {
+	return v.validateBackupEnabled(ctx)
+}
+
+// ValidateDelete implements webhook.Validator so a webhook will be registered for the type
+func (*ManagementBackupValidator) ValidateDelete(context.Context, runtime.Object) (admission.Warnings, error) {
+	return nil, nil
+}
diff --git a/internal/webhook/managementbackup_webhook_test.go b/internal/webhook/managementbackup_webhook_test.go
new file mode 100644
index 000000000..4a59aef01
--- /dev/null
+++ b/internal/webhook/managementbackup_webhook_test.go
@@ -0,0 +1,74 @@
+// Copyright 2024
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package webhook
+
+import (
+	"context"
+	"testing"
+
+	. "github.com/onsi/gomega"
+	"k8s.io/apimachinery/pkg/runtime"
+	"sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+	hmcv1alpha1 "github.com/K0rdent/kcm/api/v1alpha1"
+	"github.com/K0rdent/kcm/test/objects/management"
+	"github.com/K0rdent/kcm/test/scheme"
+)
+
+func TestManagementBackup_validateBackupEnabled(t *testing.T) {
+	g := NewWithT(t)
+
+	tests := []struct {
+		name            string
+		existingObjects []runtime.Object
+		err             string
+	}{
+		{
+			name:            "should fail if > 1 Management",
+			existingObjects: []runtime.Object{management.NewManagement(), management.NewManagement(management.WithName("second"))},
+			err:             "failed to get Management: expected 1 Management object, got 2",
+		},
+		{
+			name: "should fail if no Management",
+			err:  "failed to get Management: " + errManagementIsNotFound.Error(),
+		},
+		{
+			name:            "should fail if backup is disabled",
+			existingObjects: []runtime.Object{management.NewManagement()},
+			err:             "management backup is disabled, create or update of ManagementBackup objects disabled",
+		},
+		{
+			name:            "should succeed if backup is enabled",
+			existingObjects: []runtime.Object{management.NewManagement(management.WithBackup(hmcv1alpha1.Backup{Enabled: true}))},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(_ *testing.T) {
+			c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build()
+			validator := &ManagementBackupValidator{Client: c}
+
+			ctx := context.Background()
+
+			_, err := validator.validateBackupEnabled(ctx)
+			if tt.err != "" {
+				g.Expect(err).To(HaveOccurred())
+				g.Expect(err).To(MatchError(tt.err))
+			} else {
+				g.Expect(err).To(Succeed())
+			}
+		})
+	}
+}
diff --git a/templates/cluster/aws-hosted-cp/Chart.yaml b/templates/cluster/aws-hosted-cp/Chart.yaml
index e0c81494d..82629980d 100644
--- a/templates/cluster/aws-hosted-cp/Chart.yaml
+++ b/templates/cluster/aws-hosted-cp/Chart.yaml
@@ -14,7 +14,7 @@ version: 0.0.4
 # It is recommended to use it with quotes.
 appVersion: "v1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-aws, control-plane-k0smotron, bootstrap-k0smotron
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-aws, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-aws: v1beta2
diff --git a/templates/cluster/aws-standalone-cp/Chart.yaml b/templates/cluster/aws-standalone-cp/Chart.yaml
index 1d8e9342e..2d0670299 100644
--- a/templates/cluster/aws-standalone-cp/Chart.yaml
+++ b/templates/cluster/aws-standalone-cp/Chart.yaml
@@ -13,7 +13,7 @@ version: 0.0.5
 # It is recommended to use it with quotes.
 appVersion: "v1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-aws, control-plane-k0smotron, bootstrap-k0smotron
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-aws, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-aws: v1beta2
diff --git a/templates/cluster/azure-hosted-cp/Chart.yaml b/templates/cluster/azure-hosted-cp/Chart.yaml
index c716e4a95..71416bcbe 100644
--- a/templates/cluster/azure-hosted-cp/Chart.yaml
+++ b/templates/cluster/azure-hosted-cp/Chart.yaml
@@ -14,7 +14,7 @@ version: 0.0.4
 # It is recommended to use it with quotes.
 appVersion: "v1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-azure, control-plane-k0smotron, bootstrap-k0smotron
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-azure, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-azure: v1beta1
diff --git a/templates/cluster/azure-standalone-cp/Chart.yaml b/templates/cluster/azure-standalone-cp/Chart.yaml
index 1071f954a..b91d40cc4 100644
--- a/templates/cluster/azure-standalone-cp/Chart.yaml
+++ b/templates/cluster/azure-standalone-cp/Chart.yaml
@@ -13,7 +13,7 @@ version: 0.0.5
 # It is recommended to use it with quotes.
 appVersion: "1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-azure, control-plane-k0smotron, bootstrap-k0smotron
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-azure, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-azure: v1beta1
diff --git a/templates/cluster/openstack-standalone-cp/Chart.yaml b/templates/cluster/openstack-standalone-cp/Chart.yaml
index 3e857f554..88e3b90be 100644
--- a/templates/cluster/openstack-standalone-cp/Chart.yaml
+++ b/templates/cluster/openstack-standalone-cp/Chart.yaml
@@ -13,7 +13,7 @@ version: 0.0.2
 # It is recommended to use it with quotes.
 appVersion: "1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-openstack, control-plane-k0smotron, bootstrap-k0smotron
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-openstack, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-openstack: v1beta1
diff --git a/templates/cluster/vsphere-hosted-cp/Chart.yaml b/templates/cluster/vsphere-hosted-cp/Chart.yaml
index c73e25efb..cd74a0778 100644
--- a/templates/cluster/vsphere-hosted-cp/Chart.yaml
+++ b/templates/cluster/vsphere-hosted-cp/Chart.yaml
@@ -14,8 +14,8 @@ version: 0.0.5
 # It is recommended to use it with quotes.
 appVersion: "v1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-vsphere, control-plane-k0smotron, bootstrap-k0smotron
-  k0rdent.mirantis.com/type: deployment
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-vsphere, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  hmc.mirantis.com/type: deployment
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-vsphere: v1beta1
diff --git a/templates/cluster/vsphere-standalone-cp/Chart.yaml b/templates/cluster/vsphere-standalone-cp/Chart.yaml
index e803d4097..60b89b18b 100644
--- a/templates/cluster/vsphere-standalone-cp/Chart.yaml
+++ b/templates/cluster/vsphere-standalone-cp/Chart.yaml
@@ -13,8 +13,8 @@ version: 0.0.5
 # It is recommended to use it with quotes.
 appVersion: "v1.31.1+k0s.1"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-vsphere, control-plane-k0smotron, bootstrap-k0smotron
-  k0rdent.mirantis.com/type: deployment
-  cluster.x-k8s.io/bootstrap-k0smotron: v1beta1
-  cluster.x-k8s.io/control-plane-k0smotron: v1beta1
+  cluster.x-k8s.io/provider: infrastructure-vsphere, control-plane-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron
+  hmc.mirantis.com/type: deployment
+  cluster.x-k8s.io/bootstrap-k0sproject-k0smotron: v1beta1
+  cluster.x-k8s.io/control-plane-k0sproject-k0smotron: v1beta1
   cluster.x-k8s.io/infrastructure-vsphere: v1beta1
diff --git a/templates/provider/k0smotron/Chart.yaml b/templates/provider/k0smotron/Chart.yaml
index 1624a0694..662a30b68 100644
--- a/templates/provider/k0smotron/Chart.yaml
+++ b/templates/provider/k0smotron/Chart.yaml
@@ -20,5 +20,5 @@ version: 0.0.6
 # It is recommended to use it with quotes.
 appVersion: "1.3.0"
 annotations:
-  cluster.x-k8s.io/provider: infrastructure-k0smotron, bootstrap-k0smotron, control-plane-k0smotron
+  cluster.x-k8s.io/provider: infrastructure-k0sproject-k0smotron, bootstrap-k0sproject-k0smotron, control-plane-k0sproject-k0smotron
   cluster.x-k8s.io/v1beta1: v1beta1
diff --git a/templates/provider/kcm-templates/files/release.yaml b/templates/provider/kcm-templates/files/release.yaml
index 6389c37b8..1495e3c96 100644
--- a/templates/provider/kcm-templates/files/release.yaml
+++ b/templates/provider/kcm-templates/files/release.yaml
@@ -4,8 +4,6 @@ metadata:
   name: kcm-0-0-6
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   version: 0.0.6
   kcm:
diff --git a/templates/provider/kcm-templates/files/templates/adopted-cluster-0-0-2.yaml b/templates/provider/kcm-templates/files/templates/adopted-cluster-0-0-2.yaml
index 2baa558df..0172b4fdf 100644
--- a/templates/provider/kcm-templates/files/templates/adopted-cluster-0-0-2.yaml
+++ b/templates/provider/kcm-templates/files/templates/adopted-cluster-0-0-2.yaml
@@ -4,8 +4,6 @@ metadata:
   name: adopted-cluster-0-0-2
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/aws-eks-0-0-3.yaml b/templates/provider/kcm-templates/files/templates/aws-eks-0-0-3.yaml
index 958c28264..5e81711ae 100644
--- a/templates/provider/kcm-templates/files/templates/aws-eks-0-0-3.yaml
+++ b/templates/provider/kcm-templates/files/templates/aws-eks-0-0-3.yaml
@@ -4,8 +4,6 @@ metadata:
   name: aws-eks-0-0-3
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/aws-hosted-cp-0-0-4.yaml b/templates/provider/kcm-templates/files/templates/aws-hosted-cp-0-0-4.yaml
index 48696d9a7..9ed64c548 100644
--- a/templates/provider/kcm-templates/files/templates/aws-hosted-cp-0-0-4.yaml
+++ b/templates/provider/kcm-templates/files/templates/aws-hosted-cp-0-0-4.yaml
@@ -4,8 +4,6 @@ metadata:
   name: aws-hosted-cp-0-0-4
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/aws-standalone-cp-0-0-5.yaml b/templates/provider/kcm-templates/files/templates/aws-standalone-cp-0-0-5.yaml
index 377ea219b..671bf0b57 100644
--- a/templates/provider/kcm-templates/files/templates/aws-standalone-cp-0-0-5.yaml
+++ b/templates/provider/kcm-templates/files/templates/aws-standalone-cp-0-0-5.yaml
@@ -4,8 +4,6 @@ metadata:
   name: aws-standalone-cp-0-0-5
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/azure-aks-0-0-2.yaml b/templates/provider/kcm-templates/files/templates/azure-aks-0-0-2.yaml
index d7a36e0f4..5959a60e6 100644
--- a/templates/provider/kcm-templates/files/templates/azure-aks-0-0-2.yaml
+++ b/templates/provider/kcm-templates/files/templates/azure-aks-0-0-2.yaml
@@ -4,8 +4,6 @@ metadata:
   name: azure-aks-0-0-2
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/azure-hosted-cp-0-0-4.yaml b/templates/provider/kcm-templates/files/templates/azure-hosted-cp-0-0-4.yaml
index 0a16c935f..1ff1bdd50 100644
--- a/templates/provider/kcm-templates/files/templates/azure-hosted-cp-0-0-4.yaml
+++ b/templates/provider/kcm-templates/files/templates/azure-hosted-cp-0-0-4.yaml
@@ -4,8 +4,6 @@ metadata:
   name: azure-hosted-cp-0-0-4
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/azure-standalone-cp-0-0-5.yaml b/templates/provider/kcm-templates/files/templates/azure-standalone-cp-0-0-5.yaml
index 8d7977625..8e703f691 100644
--- a/templates/provider/kcm-templates/files/templates/azure-standalone-cp-0-0-5.yaml
+++ b/templates/provider/kcm-templates/files/templates/azure-standalone-cp-0-0-5.yaml
@@ -4,8 +4,6 @@ metadata:
   name: azure-standalone-cp-0-0-5
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/cert-manager-1-16-2.yaml b/templates/provider/kcm-templates/files/templates/cert-manager-1-16-2.yaml
index 0aed1e3d2..7c1954556 100644
--- a/templates/provider/kcm-templates/files/templates/cert-manager-1-16-2.yaml
+++ b/templates/provider/kcm-templates/files/templates/cert-manager-1-16-2.yaml
@@ -4,8 +4,6 @@ metadata:
   name: cert-manager-1-16-2
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/cluster-api-provider-aws.yaml b/templates/provider/kcm-templates/files/templates/cluster-api-provider-aws.yaml
index c2b9a3c26..987106f87 100644
--- a/templates/provider/kcm-templates/files/templates/cluster-api-provider-aws.yaml
+++ b/templates/provider/kcm-templates/files/templates/cluster-api-provider-aws.yaml
@@ -4,8 +4,6 @@ metadata:
   name: cluster-api-provider-aws-0-0-4
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/cluster-api-provider-azure.yaml b/templates/provider/kcm-templates/files/templates/cluster-api-provider-azure.yaml
index 310df433e..10a57e7fa 100644
--- a/templates/provider/kcm-templates/files/templates/cluster-api-provider-azure.yaml
+++ b/templates/provider/kcm-templates/files/templates/cluster-api-provider-azure.yaml
@@ -4,8 +4,6 @@ metadata:
   name: cluster-api-provider-azure-0-0-4
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/cluster-api-provider-openstack.yaml b/templates/provider/kcm-templates/files/templates/cluster-api-provider-openstack.yaml
index e665d4b63..675c9d837 100644
--- a/templates/provider/kcm-templates/files/templates/cluster-api-provider-openstack.yaml
+++ b/templates/provider/kcm-templates/files/templates/cluster-api-provider-openstack.yaml
@@ -4,8 +4,6 @@ metadata:
   name: cluster-api-provider-openstack-0-0-1
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/cluster-api-provider-vsphere.yaml b/templates/provider/kcm-templates/files/templates/cluster-api-provider-vsphere.yaml
index f97131803..137a1a51e 100644
--- a/templates/provider/kcm-templates/files/templates/cluster-api-provider-vsphere.yaml
+++ b/templates/provider/kcm-templates/files/templates/cluster-api-provider-vsphere.yaml
@@ -4,8 +4,6 @@ metadata:
   name: cluster-api-provider-vsphere-0-0-5
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/cluster-api.yaml b/templates/provider/kcm-templates/files/templates/cluster-api.yaml
index a2674fa72..7f9e7b696 100644
--- a/templates/provider/kcm-templates/files/templates/cluster-api.yaml
+++ b/templates/provider/kcm-templates/files/templates/cluster-api.yaml
@@ -4,8 +4,6 @@ metadata:
   name: cluster-api-0-0-6
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/dex-0-19-1.yaml b/templates/provider/kcm-templates/files/templates/dex-0-19-1.yaml
index 3f5189886..a22ec9d91 100644
--- a/templates/provider/kcm-templates/files/templates/dex-0-19-1.yaml
+++ b/templates/provider/kcm-templates/files/templates/dex-0-19-1.yaml
@@ -4,8 +4,6 @@ metadata:
   name: dex-0-19-1
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/external-secrets-0-11-0.yaml b/templates/provider/kcm-templates/files/templates/external-secrets-0-11-0.yaml
index 992cccb08..4bfe2638d 100644
--- a/templates/provider/kcm-templates/files/templates/external-secrets-0-11-0.yaml
+++ b/templates/provider/kcm-templates/files/templates/external-secrets-0-11-0.yaml
@@ -4,8 +4,6 @@ metadata:
   name: external-secrets-0-11-0
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-0.yaml b/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-0.yaml
index a12f9e945..98b093354 100644
--- a/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-0.yaml
+++ b/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-0.yaml
@@ -4,8 +4,6 @@ metadata:
   name: ingress-nginx-4-11-0
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-3.yaml b/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-3.yaml
index 7e2149825..39c6c5ea6 100644
--- a/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-3.yaml
+++ b/templates/provider/kcm-templates/files/templates/ingress-nginx-4-11-3.yaml
@@ -4,8 +4,6 @@ metadata:
   name: ingress-nginx-4-11-3
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/k0smotron.yaml b/templates/provider/kcm-templates/files/templates/k0smotron.yaml
index 8d8aa2499..f17e95f8b 100644
--- a/templates/provider/kcm-templates/files/templates/k0smotron.yaml
+++ b/templates/provider/kcm-templates/files/templates/k0smotron.yaml
@@ -4,8 +4,6 @@ metadata:
   name: k0smotron-0-0-6
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/kcm.yaml b/templates/provider/kcm-templates/files/templates/kcm.yaml
index e6e9f251f..7f76a2ab1 100644
--- a/templates/provider/kcm-templates/files/templates/kcm.yaml
+++ b/templates/provider/kcm-templates/files/templates/kcm.yaml
@@ -4,8 +4,6 @@ metadata:
   name: kcm-0-0-6
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/kyverno-3-2-6.yaml b/templates/provider/kcm-templates/files/templates/kyverno-3-2-6.yaml
index 3a54aee25..bc9d5b441 100644
--- a/templates/provider/kcm-templates/files/templates/kyverno-3-2-6.yaml
+++ b/templates/provider/kcm-templates/files/templates/kyverno-3-2-6.yaml
@@ -4,8 +4,6 @@ metadata:
   name: kyverno-3-2-6
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/openstack-standalone-cp-0-0-2.yaml b/templates/provider/kcm-templates/files/templates/openstack-standalone-cp-0-0-2.yaml
index 9837a5cfb..d5015703b 100644
--- a/templates/provider/kcm-templates/files/templates/openstack-standalone-cp-0-0-2.yaml
+++ b/templates/provider/kcm-templates/files/templates/openstack-standalone-cp-0-0-2.yaml
@@ -4,8 +4,6 @@ metadata:
   name: openstack-standalone-cp-0-0-2
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/projectsveltos.yaml b/templates/provider/kcm-templates/files/templates/projectsveltos.yaml
index 0238d1150..8432bf14a 100644
--- a/templates/provider/kcm-templates/files/templates/projectsveltos.yaml
+++ b/templates/provider/kcm-templates/files/templates/projectsveltos.yaml
@@ -4,8 +4,6 @@ metadata:
   name: projectsveltos-0-45-0
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/velero-8-1-0.yaml b/templates/provider/kcm-templates/files/templates/velero-8-1-0.yaml
index 0148a310e..7f031ec98 100644
--- a/templates/provider/kcm-templates/files/templates/velero-8-1-0.yaml
+++ b/templates/provider/kcm-templates/files/templates/velero-8-1-0.yaml
@@ -4,8 +4,6 @@ metadata:
   name: velero-8-1-0
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/vsphere-hosted-cp-0-0-5.yaml b/templates/provider/kcm-templates/files/templates/vsphere-hosted-cp-0-0-5.yaml
index 504e68ccc..31bd034d0 100644
--- a/templates/provider/kcm-templates/files/templates/vsphere-hosted-cp-0-0-5.yaml
+++ b/templates/provider/kcm-templates/files/templates/vsphere-hosted-cp-0-0-5.yaml
@@ -4,8 +4,6 @@ metadata:
   name: vsphere-hosted-cp-0-0-5
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm-templates/files/templates/vsphere-standalone-cp-0-0-5.yaml b/templates/provider/kcm-templates/files/templates/vsphere-standalone-cp-0-0-5.yaml
index 981e37e25..8cf21bf2e 100644
--- a/templates/provider/kcm-templates/files/templates/vsphere-standalone-cp-0-0-5.yaml
+++ b/templates/provider/kcm-templates/files/templates/vsphere-standalone-cp-0-0-5.yaml
@@ -4,8 +4,6 @@ metadata:
   name: vsphere-standalone-cp-0-0-5
   annotations:
     helm.sh/resource-policy: keep
-  labels:
-    k0rdent.mirantis.com/component: kcm
 spec:
   helm:
     chartSpec:
diff --git a/templates/provider/kcm/templates/crds/hmc.mirantis.com_managementbackups.yaml b/templates/provider/kcm/templates/crds/hmc.mirantis.com_managementbackups.yaml
new file mode 100644
index 000000000..b3f9d083a
--- /dev/null
+++ b/templates/provider/kcm/templates/crds/hmc.mirantis.com_managementbackups.yaml
@@ -0,0 +1,316 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    controller-gen.kubebuilder.io/version: v0.16.3
+<<<<<<<< HEAD:templates/provider/kcm/templates/crds/k0rdent.mirantis.com_backups.yaml
+  name: backups.k0rdent.mirantis.com
+========
+  name: managementbackups.hmc.mirantis.com
+>>>>>>>> fb31e55 (Backup implementation part 2):templates/provider/kcm/templates/crds/hmc.mirantis.com_managementbackups.yaml
+spec:
+  group: k0rdent.mirantis.com
+  names:
+    kind: ManagementBackup
+    listKind: ManagementBackupList
+    plural: managementbackups
+    shortNames:
+    - hmcbackup
+    - mgmtbackup
+    singular: managementbackup
+  scope: Cluster
+  versions:
+  - additionalPrinterColumns:
+    - description: Next scheduled attempt to back up
+      jsonPath: .status.nextAttempt
+      name: NextBackup
+      type: string
+    - description: Schedule phase
+      jsonPath: .status.schedule.phase
+      name: Status
+      type: string
+    - description: Time elapsed since last backup run
+      jsonPath: .status.schedule.lastBackup
+      name: SinceLastBackup
+      priority: 1
+      type: date
+    - description: Status of last backup run
+      jsonPath: .status.lastBackup.phase
+      name: LastBackupStatus
+      type: string
+    - description: Time elapsed since object creation
+      jsonPath: .metadata.creationTimestamp
+      name: Age
+      type: date
+    - description: Schedule is on pause
+      jsonPath: .status.schedulePaused
+      name: Paused
+      priority: 1
+      type: boolean
+    name: v1alpha1
+    schema:
+      openAPIV3Schema:
+        description: ManagementBackup is the Schema for the backups API
+        properties:
+          apiVersion:
+            description: |-
+              APIVersion defines the versioned schema of this representation of an object.
+              Servers should convert recognized schemas to the latest internal value, and
+              may reject unrecognized values.
+              More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+            type: string
+          kind:
+            description: |-
+              Kind is a string value representing the REST resource this object represents.
+              Servers may infer this from the endpoint the client submits requests to.
+              Cannot be updated.
+              In CamelCase.
+              More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+            type: string
+          metadata:
+            type: object
+          spec:
+            description: ManagementBackupSpec defines the desired state of ManagementBackup
+            type: object
+          status:
+            description: ManagementBackupStatus defines the observed state of ManagementBackup
+            properties:
+              lastBackup:
+                description: Last Velero Backup that has been created.
+                properties:
+                  backupItemOperationsAttempted:
+                    description: |-
+                      BackupItemOperationsAttempted is the total number of attempted
+                      async BackupItemAction operations for this backup.
+                    type: integer
+                  backupItemOperationsCompleted:
+                    description: |-
+                      BackupItemOperationsCompleted is the total number of successfully completed
+                      async BackupItemAction operations for this backup.
+                    type: integer
+                  backupItemOperationsFailed:
+                    description: |-
+                      BackupItemOperationsFailed is the total number of async
+                      BackupItemAction operations for this backup which ended with an error.
+                    type: integer
+                  completionTimestamp:
+                    description: |-
+                      CompletionTimestamp records the time a backup was completed.
+                      Completion time is recorded even on failed backups.
+                      Completion time is recorded before uploading the backup object.
+                      The server's time is used for CompletionTimestamps
+                    format: date-time
+                    nullable: true
+                    type: string
+                  csiVolumeSnapshotsAttempted:
+                    description: |-
+                      CSIVolumeSnapshotsAttempted is the total number of attempted
+                      CSI VolumeSnapshots for this backup.
+                    type: integer
+                  csiVolumeSnapshotsCompleted:
+                    description: |-
+                      CSIVolumeSnapshotsCompleted is the total number of successfully
+                      completed CSI VolumeSnapshots for this backup.
+                    type: integer
+                  errors:
+                    description: |-
+                      Errors is a count of all error messages that were generated during
+                      execution of the backup.  The actual errors are in the backup's log
+                      file in object storage.
+                    type: integer
+                  expiration:
+                    description: Expiration is when this Backup is eligible for garbage-collection.
+                    format: date-time
+                    nullable: true
+                    type: string
+                  failureReason:
+                    description: FailureReason is an error that caused the entire
+                      backup to fail.
+                    type: string
+                  formatVersion:
+                    description: FormatVersion is the backup format version, including
+                      major, minor, and patch version.
+                    type: string
+                  hookStatus:
+                    description: HookStatus contains information about the status
+                      of the hooks.
+                    nullable: true
+                    properties:
+                      hooksAttempted:
+                        description: |-
+                          HooksAttempted is the total number of attempted hooks
+                          Specifically, HooksAttempted represents the number of hooks that failed to execute
+                          and the number of hooks that executed successfully.
+                        type: integer
+                      hooksFailed:
+                        description: HooksFailed is the total number of hooks which
+                          ended with an error
+                        type: integer
+                    type: object
+                  phase:
+                    description: Phase is the current state of the Backup.
+                    enum:
+                    - New
+                    - FailedValidation
+                    - InProgress
+                    - WaitingForPluginOperations
+                    - WaitingForPluginOperationsPartiallyFailed
+                    - Finalizing
+                    - FinalizingPartiallyFailed
+                    - Completed
+                    - PartiallyFailed
+                    - Failed
+                    - Deleting
+                    type: string
+                  progress:
+                    description: |-
+                      Progress contains information about the backup's execution progress. Note
+                      that this information is best-effort only -- if Velero fails to update it
+                      during a backup for any reason, it may be inaccurate/stale.
+                    nullable: true
+                    properties:
+                      itemsBackedUp:
+                        description: |-
+                          ItemsBackedUp is the number of items that have actually been written to the
+                          backup tarball so far.
+                        type: integer
+                      totalItems:
+                        description: |-
+                          TotalItems is the total number of items to be backed up. This number may change
+                          throughout the execution of the backup due to plugins that return additional related
+                          items to back up, the velero.io/exclude-from-backup label, and various other
+                          filters that happen as items are processed.
+                        type: integer
+                    type: object
+                  startTimestamp:
+                    description: |-
+                      StartTimestamp records the time a backup was started.
+                      Separate from CreationTimestamp, since that value changes
+                      on restores.
+                      The server's time is used for StartTimestamps
+                    format: date-time
+                    nullable: true
+                    type: string
+                  validationErrors:
+                    description: |-
+                      ValidationErrors is a slice of all validation errors (if
+                      applicable).
+                    items:
+                      type: string
+                    nullable: true
+                    type: array
+                  version:
+                    description: |-
+                      Version is the backup format major version.
+                      Deprecated: Please see FormatVersion
+                    type: integer
+                  volumeSnapshotsAttempted:
+                    description: |-
+                      VolumeSnapshotsAttempted is the total number of attempted
+                      volume snapshots for this backup.
+                    type: integer
+                  volumeSnapshotsCompleted:
+                    description: |-
+                      VolumeSnapshotsCompleted is the total number of successfully
+                      completed volume snapshots for this backup.
+                    type: integer
+                  warnings:
+                    description: |-
+                      Warnings is a count of all warning messages that were generated during
+                      execution of the backup. The actual warnings are in the backup's log
+                      file in object storage.
+                    type: integer
+                type: object
+              nextAttempt:
+                description: |-
+                  NextAttempt indicates the time when the next scheduled backup will be performed.
+                  Always absent for the ManagementBackups with a schedule.
+                format: date-time
+                type: string
+              reference:
+                description: |-
+                  Reference to the underlying Velero object being managed.
+                  Might be either Velero Backup or Schedule.
+                properties:
+                  apiVersion:
+                    description: API version of the referent.
+                    type: string
+                  fieldPath:
+                    description: |-
+                      If referring to a piece of an object instead of an entire object, this string
+                      should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+                      For example, if the object reference is to a container within a pod, this would take on a value like:
+                      "spec.containers{name}" (where "name" refers to the name of the container that triggered
+                      the event) or if no container name is specified "spec.containers[2]" (container with
+                      index 2 in this pod). This syntax is chosen only to have some well-defined way of
+                      referencing a part of an object.
+                    type: string
+                  kind:
+                    description: |-
+                      Kind of the referent.
+                      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+                    type: string
+                  name:
+                    description: |-
+                      Name of the referent.
+                      More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                    type: string
+                  namespace:
+                    description: |-
+                      Namespace of the referent.
+                      More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+                    type: string
+                  resourceVersion:
+                    description: |-
+                      Specific resourceVersion to which this reference is made, if any.
+                      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+                    type: string
+                  uid:
+                    description: |-
+                      UID of the referent.
+                      More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+                    type: string
+                type: object
+                x-kubernetes-map-type: atomic
+              schedule:
+                description: |-
+                  Status of the Velero Schedule for the Management scheduled backups.
+                  Always absent for the ManagementBackups with a schedule.
+                properties:
+                  lastBackup:
+                    description: |-
+                      LastBackup is the last time a Backup was run for this
+                      Schedule schedule
+                    format: date-time
+                    nullable: true
+                    type: string
+                  lastSkipped:
+                    description: LastSkipped is the last time a Schedule was skipped
+                    format: date-time
+                    nullable: true
+                    type: string
+                  phase:
+                    description: Phase is the current phase of the Schedule
+                    enum:
+                    - New
+                    - Enabled
+                    - FailedValidation
+                    type: string
+                  validationErrors:
+                    description: |-
+                      ValidationErrors is a slice of all validation errors (if
+                      applicable)
+                    items:
+                      type: string
+                    type: array
+                type: object
+              schedulePaused:
+                description: SchedulePaused indicates if the Velero Schedule is paused.
+                type: boolean
+            type: object
+        type: object
+    served: true
+    storage: true
+    subresources:
+      status: {}
diff --git a/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_backups.yaml b/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_backups.yaml
index cdd191a28..b3f9d083a 100644
--- a/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_backups.yaml
+++ b/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_backups.yaml
@@ -4,20 +4,54 @@ kind: CustomResourceDefinition
 metadata:
   annotations:
     controller-gen.kubebuilder.io/version: v0.16.3
+<<<<<<<< HEAD:templates/provider/kcm/templates/crds/k0rdent.mirantis.com_backups.yaml
   name: backups.k0rdent.mirantis.com
+========
+  name: managementbackups.hmc.mirantis.com
+>>>>>>>> fb31e55 (Backup implementation part 2):templates/provider/kcm/templates/crds/hmc.mirantis.com_managementbackups.yaml
 spec:
   group: k0rdent.mirantis.com
   names:
-    kind: Backup
-    listKind: BackupList
-    plural: backups
-    singular: backup
+    kind: ManagementBackup
+    listKind: ManagementBackupList
+    plural: managementbackups
+    shortNames:
+    - hmcbackup
+    - mgmtbackup
+    singular: managementbackup
   scope: Cluster
   versions:
-  - name: v1alpha1
+  - additionalPrinterColumns:
+    - description: Next scheduled attempt to back up
+      jsonPath: .status.nextAttempt
+      name: NextBackup
+      type: string
+    - description: Schedule phase
+      jsonPath: .status.schedule.phase
+      name: Status
+      type: string
+    - description: Time elapsed since last backup run
+      jsonPath: .status.schedule.lastBackup
+      name: SinceLastBackup
+      priority: 1
+      type: date
+    - description: Status of last backup run
+      jsonPath: .status.lastBackup.phase
+      name: LastBackupStatus
+      type: string
+    - description: Time elapsed since object creation
+      jsonPath: .metadata.creationTimestamp
+      name: Age
+      type: date
+    - description: Schedule is on pause
+      jsonPath: .status.schedulePaused
+      name: Paused
+      priority: 1
+      type: boolean
+    name: v1alpha1
     schema:
       openAPIV3Schema:
-        description: Backup is the Schema for the backups API
+        description: ManagementBackup is the Schema for the backups API
         properties:
           apiVersion:
             description: |-
@@ -37,16 +71,10 @@ spec:
           metadata:
             type: object
           spec:
-            description: BackupSpec defines the desired state of Backup
-            properties:
-              oneshot:
-                description: |-
-                  Oneshot indicates whether the Backup should not be scheduled
-                  and rather created immediately and only once.
-                type: boolean
+            description: ManagementBackupSpec defines the desired state of ManagementBackup
             type: object
           status:
-            description: BackupStatus defines the observed state of Backup
+            description: ManagementBackupStatus defines the observed state of ManagementBackup
             properties:
               lastBackup:
                 description: Last Velero Backup that has been created.
@@ -197,7 +225,7 @@ spec:
               nextAttempt:
                 description: |-
                   NextAttempt indicates the time when the next scheduled backup will be performed.
-                  Always absent for the Backups with the .spec.oneshot set to true.
+                  Always absent for the ManagementBackups with a schedule.
                 format: date-time
                 type: string
               reference:
@@ -248,7 +276,7 @@ spec:
               schedule:
                 description: |-
                   Status of the Velero Schedule for the Management scheduled backups.
-                  Always absent for the Backups with the .spec.oneshot set to true.
+                  Always absent for the ManagementBackups with a schedule.
                 properties:
                   lastBackup:
                     description: |-
@@ -277,6 +305,9 @@ spec:
                       type: string
                     type: array
                 type: object
+              schedulePaused:
+                description: SchedulePaused indicates if the Velero Schedule is paused.
+                type: boolean
             type: object
         type: object
     served: true
diff --git a/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_managements.yaml b/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_managements.yaml
index 4dcb51e3c..8c531bc77 100644
--- a/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_managements.yaml
+++ b/templates/provider/kcm/templates/crds/k0rdent.mirantis.com_managements.yaml
@@ -43,20 +43,36 @@ spec:
             description: ManagementSpec defines the desired state of Management
             properties:
               backup:
-                description: ManagementBackup enables a feature to backup KCM objects
-                  into a cloud.
+                description: Backup enables a feature to backup HMC objects into a
+                  cloud.
                 properties:
+                  customPlugins:
+                    additionalProperties:
+                      type: string
+                    description: "CustomPlugins holds key value pairs with [Velero]
+                      [community] and [custom] plugins, where:\n\t- key represents
+                      the provider's name in the format [velero.io/]<plugin-name>;\n\t-
+                      value represents the provider's plugin name;\n\nProvider name
+                      must be exactly the same as in a [BackupStorageLocation] object.\n\n[Velero]:
+                      https://velero.io\n[community]: https://velero.io/docs/v1.15/supported-providers/#provider-plugins-maintained-by-the-velero-community\n[custom]:
+                      https://velero.io/docs/v1.15/custom-plugins/\n[BackupStorageLocation]:
+                      https://velero.io/docs/v1.15/api-types/backupstoragelocation/"
+                    example:
+                      customPlugins:
+                        alibabacloud: registry.<region>.aliyuncs.com/acs/velero:1.4.2
+                        community.openstack.org/openstack: lirt/velero-plugin-for-openstack:v0.6.0
+                    type: object
                   enabled:
                     description: |-
                       Flag to indicate whether the backup feature is enabled.
                       If set to true, [Velero] platform will be installed.
-                      If set to false, creation or modification of Backups/Restores will be blocked.
+                      If set to false, creation or modification of ManagementBackups will be blocked.
 
                       [Velero]: https://velero.io
                     type: boolean
                   schedule:
                     description: |-
-                      Schedule is a Cron expression defining when to run the scheduled Backup.
+                      Schedule is a Cron expression defining when to run the scheduled ManagementBackup.
                       Default value is to backup every 6 hours.
                     type: string
                 type: object
diff --git a/templates/provider/kcm/templates/deployment.yaml b/templates/provider/kcm/templates/deployment.yaml
index 2d92dc77b..d32803d90 100644
--- a/templates/provider/kcm/templates/deployment.yaml
+++ b/templates/provider/kcm/templates/deployment.yaml
@@ -45,8 +45,12 @@ spec:
           value: {{ .Values.controller.backup.features }}
         - name: BACKUP_SYSTEM_NAMESPACE
           value: {{ .Values.controller.backup.namespace }}
+        - name: BACKUP_CTRL_INSTALL_READINESS_REQUEUE_DURATION
+          value: {{ .Values.controller.backup.installReadinessRequeuePeriod }}
         - name: BACKUP_CTRL_REQUEUE_DURATION
-          value: {{ .Values.controller.backup.requeue }}
+          value: {{ .Values.controller.backup.requeuePeriod }}
+        - name: BACKUP_PLUGIN_IMAGES
+          value: {{ join "," .Values.controller.backup.veleroPluginImages | quote }}
         image: {{ .Values.image.repository }}:{{ .Values.image.tag
           | default .Chart.AppVersion }}
         imagePullPolicy: {{ .Values.image.pullPolicy }}
diff --git a/templates/provider/kcm/templates/rbac/controller/roles.yaml b/templates/provider/kcm/templates/rbac/controller/roles.yaml
index 9786ace17..cbc8dbfda 100644
--- a/templates/provider/kcm/templates/rbac/controller/roles.yaml
+++ b/templates/provider/kcm/templates/rbac/controller/roles.yaml
@@ -184,7 +184,8 @@ rules:
   - awsclusterroleidentities
   - azureclusteridentities
   - vsphereclusteridentities
-  verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }}
+  verbs: {{ include "rbac.viewerVerbs" . | nindent 2 }}
+  - update # required for the managementbackups-ctrl
 - apiGroups:
   - config.projectsveltos.io
   resources:
@@ -216,23 +217,24 @@ rules:
   resources:
   - secrets
   verbs: {{ include "rbac.viewerVerbs" . | nindent 2 }}
-  - create
-# backup-ctrl
+  - create # required for the managementbackups-ctrl
+  - update # required for the managementbackups-ctrl
+# managementbackups-ctrl
 - apiGroups:
   - k0rdent.mirantis.com
   resources:
-  - backups
+  - managementbackups
   verbs: {{ include "rbac.editorVerbs" . | nindent 4 }}
 - apiGroups:
   - k0rdent.mirantis.com
   resources:
-  - backups/finalizers
+  - managementbackups/finalizers
   verbs:
   - update
 - apiGroups:
   - k0rdent.mirantis.com
   resources:
-  - backups/status
+  - managementbackups/status
   verbs:
   - get
   - patch
@@ -244,6 +246,7 @@ rules:
   - namespaces
   verbs: {{ include "rbac.viewerVerbs" . | nindent 2 }}
   - create
+  - update
 - apiGroups:
   - apps
   resources:
@@ -261,6 +264,7 @@ rules:
   - roles
   verbs: {{ include "rbac.viewerVerbs" . | nindent 2 }}
   - create
+  - update
 - apiGroups:
   - apiextensions.k8s.io
   resources:
@@ -280,7 +284,7 @@ rules:
   verbs:
   - list
   - get
-# backup-ctrl
+# managementbackups-ctrl
 ---
 apiVersion: rbac.authorization.k8s.io/v1
 kind: Role
diff --git a/templates/provider/kcm/templates/rbac/user-facing/backup-editor.yaml b/templates/provider/kcm/templates/rbac/user-facing/backup-editor.yaml
index ef787a669..892147a20 100644
--- a/templates/provider/kcm/templates/rbac/user-facing/backup-editor.yaml
+++ b/templates/provider/kcm/templates/rbac/user-facing/backup-editor.yaml
@@ -1,4 +1,4 @@
-# permissions for end users to edit backups.
+# permissions for end users to edit managementbackups.
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRole
 metadata:
@@ -9,8 +9,8 @@ rules:
 - apiGroups:
   - k0rdent.mirantis.com
   resources:
-  - backups
-  - backups/status
+  - managementbackups
+  - managementbackups/status
   verbs: {{ include "rbac.editorVerbs" . | nindent 6 }}
 - apiGroups:
   - velero.io
diff --git a/templates/provider/kcm/templates/rbac/user-facing/backup-viewer.yaml b/templates/provider/kcm/templates/rbac/user-facing/backup-viewer.yaml
index 9783440bb..e03945645 100644
--- a/templates/provider/kcm/templates/rbac/user-facing/backup-viewer.yaml
+++ b/templates/provider/kcm/templates/rbac/user-facing/backup-viewer.yaml
@@ -1,4 +1,4 @@
-# permissions for end users to view backups.
+# permissions for end users to view managementbackups.
 apiVersion: rbac.authorization.k8s.io/v1
 kind: ClusterRole
 metadata:
@@ -9,8 +9,8 @@ rules:
 - apiGroups:
   - k0rdent.mirantis.com
   resources:
-  - backups
-  - backups/status
+  - managementbackups
+  - managementbackups/status
   verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }}
 - apiGroups:
   - velero.io
diff --git a/templates/provider/kcm/templates/rbac/user-facing/managementbackup-editor.yaml b/templates/provider/kcm/templates/rbac/user-facing/managementbackup-editor.yaml
new file mode 100644
index 000000000..892147a20
--- /dev/null
+++ b/templates/provider/kcm/templates/rbac/user-facing/managementbackup-editor.yaml
@@ -0,0 +1,19 @@
+# permissions for end users to edit managementbackups.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    k0rdent.mirantis.com/aggregate-to-global-admin: "true"
+  name: {{ include "kcm.fullname" . }}-backup-editor-role
+rules:
+- apiGroups:
+  - k0rdent.mirantis.com
+  resources:
+  - managementbackups
+  - managementbackups/status
+  verbs: {{ include "rbac.editorVerbs" . | nindent 6 }}
+- apiGroups:
+  - velero.io
+  resources:
+  - '*'
+  verbs: {{ include "rbac.editorVerbs" . | nindent 6 }}
diff --git a/templates/provider/kcm/templates/rbac/user-facing/managementbackup-viewer.yaml b/templates/provider/kcm/templates/rbac/user-facing/managementbackup-viewer.yaml
new file mode 100644
index 000000000..e03945645
--- /dev/null
+++ b/templates/provider/kcm/templates/rbac/user-facing/managementbackup-viewer.yaml
@@ -0,0 +1,19 @@
+# permissions for end users to view managementbackups.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    k0rdent.mirantis.com/aggregate-to-global-viewer: "true"
+  name: {{ include "kcm.fullname" . }}-backup-viewer-role
+rules:
+- apiGroups:
+  - k0rdent.mirantis.com
+  resources:
+  - managementbackups
+  - managementbackups/status
+  verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }}
+- apiGroups:
+  - velero.io
+  resources:
+  - '*'
+  verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }}
diff --git a/templates/provider/kcm/templates/webhooks.yaml b/templates/provider/kcm/templates/webhooks.yaml
index 4668ac293..31841ef65 100644
--- a/templates/provider/kcm/templates/webhooks.yaml
+++ b/templates/provider/kcm/templates/webhooks.yaml
@@ -341,4 +341,25 @@ webhooks:
         resources:
           - releases
     sideEffects: None
+  - admissionReviewVersions:
+      - v1
+    clientConfig:
+      service:
+        name: {{ include "hmc.webhook.serviceName" . }}
+        namespace: {{ include "hmc.webhook.serviceNamespace" . }}
+        path: /validate-hmc-mirantis-com-v1alpha1-managementbackup
+    failurePolicy: Fail
+    matchPolicy: Equivalent
+    name: validation.managementbackup.hmc.mirantis.com
+    rules:
+      - apiGroups:
+          - hmc.mirantis.com
+        apiVersions:
+          - v1alpha1
+        operations:
+          - CREATE
+          - UPDATE
+        resources:
+          - managementbackups
+    sideEffects: None
 {{- end }}
diff --git a/templates/provider/kcm/values.yaml b/templates/provider/kcm/values.yaml
index a009cc1dd..49af52dde 100644
--- a/templates/provider/kcm/values.yaml
+++ b/templates/provider/kcm/values.yaml
@@ -22,7 +22,12 @@ controller:
       repository: velero
       name: velero
       tag: v1.15.0
-    requeue: 5s
+    installReadinessRequeuePeriod: 5s
+    requeuePeriod: 5m
+    veleroPluginImages:
+      - velero/velero-plugin-for-aws:v1.11.0
+      - velero/velero-plugin-for-microsoft-azure:v1.11.0
+      - velero/velero-plugin-for-gcp:v1.11.0
 
 containerSecurityContext:
   allowPrivilegeEscalation: false
diff --git a/test/objects/management/management.go b/test/objects/management/management.go
index c103bc981..03c295250 100644
--- a/test/objects/management/management.go
+++ b/test/objects/management/management.go
@@ -90,7 +90,7 @@ func WithRelease(v string) Opt {
 	}
 }
 
-func WithBackup(v v1alpha1.ManagementBackup) Opt {
+func WithBackup(v v1alpha1.Backup) Opt {
 	return func(management *v1alpha1.Management) {
 		management.Spec.Backup = v
 	}