From 01e32f01b943b5a5a18805399cc900788ac9c045 Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Sun, 8 Oct 2023 18:07:35 +0800 Subject: [PATCH 1/8] feat: data protection support gc controller to delete expired backups --- cmd/dataprotection/main.go | 2 + controllers/dataprotection/gc_controller.go | 80 +++++++++++ internal/dataprotection/types/constant.go | 12 ++ .../utils/periodical_enqueue_source.go | 128 ++++++++++++++++++ 4 files changed, 222 insertions(+) create mode 100644 controllers/dataprotection/gc_controller.go create mode 100644 internal/dataprotection/utils/periodical_enqueue_source.go diff --git a/cmd/dataprotection/main.go b/cmd/dataprotection/main.go index 67f7fffbca3..152b1ec4ce1 100644 --- a/cmd/dataprotection/main.go +++ b/cmd/dataprotection/main.go @@ -50,6 +50,7 @@ import ( dpcontrollers "github.com/apecloud/kubeblocks/controllers/dataprotection" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" viper "github.com/apecloud/kubeblocks/internal/viperx" ) @@ -98,6 +99,7 @@ func init() { viper.SetDefault("KUBEBLOCKS_SERVICEACCOUNT_NAME", "kubeblocks") viper.SetDefault(constant.CfgKeyCtrlrMgrNS, "default") viper.SetDefault(constant.KubernetesClusterDomainEnv, constant.DefaultDNSDomain) + viper.SetDefault(dptypes.CfgKeyGCFrequencySeconds, dptypes.DefaultGCFrequencySeconds) } func main() { diff --git a/controllers/dataprotection/gc_controller.go b/controllers/dataprotection/gc_controller.go new file mode 100644 index 00000000000..bec9a6c8170 --- /dev/null +++ b/controllers/dataprotection/gc_controller.go @@ -0,0 +1,80 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "context" + "time" + + "k8s.io/client-go/tools/record" + "k8s.io/utils/clock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + ctrlutil "github.com/apecloud/kubeblocks/internal/controllerutil" + dptypes "github.com/apecloud/kubeblocks/internal/dataprotection/types" + dputils "github.com/apecloud/kubeblocks/internal/dataprotection/utils" + viper "github.com/apecloud/kubeblocks/internal/viperx" +) + +// GCReconciler deletes expired backups. +type GCReconciler struct { + client.Client + Recorder record.EventRecorder + clock clock.WithTickerAndDelayedExecution +} + +// SetupWithManager sets up the GCReconciler using the supplied manager. +// GCController only watches on CreateEvent for ensuring every new backup will be +// taken care of. Other events will be filtered to decrease the load on the controller. +func (r *GCReconciler) SetupWithManager(mgr ctrl.Manager) error { + s := dputils.NewPeriodicalEnqueueSource(mgr.GetClient(), &dpv1alpha1.BackupList{}, + getGCFrequency(), dputils.PeriodicalEnqueueSourceOption{}) + return ctrl.NewControllerManagedBy(mgr). + For(&dpv1alpha1.Backup{}, builder.WithPredicates(predicate.Funcs{ + UpdateFunc: func(_ event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(_ event.DeleteEvent) bool { + return false + }, + GenericFunc: func(_ event.GenericEvent) bool { + return false + }, + })). + WatchesRawSource(s, nil). + Complete(r) +} + +func (r *GCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return ctrlutil.Reconciled() +} + +func getGCFrequency() time.Duration { + gcFrequencySeconds := viper.GetInt(dptypes.CfgKeyGCFrequencySeconds) + if gcFrequencySeconds > 0 { + return time.Duration(gcFrequencySeconds) * time.Second + } + return dptypes.DefaultGCFrequencySeconds +} diff --git a/internal/dataprotection/types/constant.go b/internal/dataprotection/types/constant.go index 3254e3feb61..d102d5cffe4 100644 --- a/internal/dataprotection/types/constant.go +++ b/internal/dataprotection/types/constant.go @@ -19,6 +19,18 @@ along with this program. If not, see . package types +// config keys used in viper +const ( + // CfgKeyGCFrequencySeconds is the key of gc frequency, its unit is second + CfgKeyGCFrequencySeconds = "GC_FREQUENCY_SECONDS" +) + +// config default values +const ( + // DefaultGCFrequencySeconds is the default gc frequency, its unit is second + DefaultGCFrequencySeconds = 60 * 60 +) + const ( // DataProtectionFinalizerName is the name of our custom finalizer DataProtectionFinalizerName = "dataprotection.kubeblocks.io/finalizer" diff --git a/internal/dataprotection/utils/periodical_enqueue_source.go b/internal/dataprotection/utils/periodical_enqueue_source.go new file mode 100644 index 00000000000..a76c63b12aa --- /dev/null +++ b/internal/dataprotection/utils/periodical_enqueue_source.go @@ -0,0 +1,128 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package utils + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// PeriodicalEnqueueSource is an implementation of interface sigs.k8s.io/controller-runtime/pkg/source/Source +// It reads the specific resources from cache and enqueue them into the queue to trigger +// the reconcile procedure periodically. +type PeriodicalEnqueueSource struct { + client.Client + log logr.Logger + objList client.ObjectList + period time.Duration + option PeriodicalEnqueueSourceOption +} + +type PeriodicalEnqueueSourceOption struct { + OrderFunc func(objList client.ObjectList) client.ObjectList +} + +func NewPeriodicalEnqueueSource( + client client.Client, + objList client.ObjectList, + period time.Duration, + option PeriodicalEnqueueSourceOption) *PeriodicalEnqueueSource { + return &PeriodicalEnqueueSource{ + log: log.Log.WithValues("resource", reflect.TypeOf(objList).String()), + Client: client, + objList: objList, + period: period, + option: option, + } +} + +func (p *PeriodicalEnqueueSource) Start( + ctx context.Context, + h handler.EventHandler, + q workqueue.RateLimitingInterface, + predicates ...predicate.Predicate) error { + go wait.Until(func() { + p.log.V(1).Info("enqueueing resources ...") + if err := p.List(ctx, p.objList); err != nil { + p.log.Error(err, "error listing resources") + return + } + + if meta.LenList(p.objList) == 0 { + p.log.V(1).Info("no resources found, skip") + return + } + + if p.option.OrderFunc != nil { + p.objList = p.option.OrderFunc(p.objList) + } + + if err := meta.EachListItem(p.objList, func(object runtime.Object) error { + obj, ok := object.(client.Object) + if !ok { + p.log.Error(nil, "object is not a client.Object", "object", object) + return nil + } + e := event.GenericEvent{Object: obj} + for _, pred := range predicates { + if !pred.Generic(e) { + p.log.V(1).Info("skip enqueue object due to the predicate", "object", obj) + return nil + } + } + + q.Add(ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + }, + }) + p.log.V(1).Info("resource enqueued", "object", obj) + return nil + }); err != nil { + p.log.Error(err, "error enqueueing resources") + return + } + }, p.period, ctx.Done()) + + return nil +} + +func (p *PeriodicalEnqueueSource) String() string { + if p.objList != nil { + return fmt.Sprintf("periodical enqueue source: %T", p.objList) + } + return "periodical enqueue source: unknown type" +} From 824f921f3d14b3c620588f996a20ee87f772354f Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Thu, 12 Oct 2023 09:19:20 +0800 Subject: [PATCH 2/8] support dp gc controller --- apis/apps/v1alpha1/zz_generated.deepcopy.go | 7 ++- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- cmd/dataprotection/main.go | 12 +++- .../backupschedule_controller.go | 35 ------------ controllers/dataprotection/gc_controller.go | 55 +++++++++++++++++-- .../v1beta2/zz_generated.deepcopy.go | 2 +- internal/cli/util/flags/flags_test.go | 3 +- internal/dataprotection/utils/envvar.go | 3 +- 11 files changed, 74 insertions(+), 51 deletions(-) diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index 96610f5edcf..8d3add0314f 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -25,14 +25,15 @@ along with this program. If not, see . package v1alpha1 import ( - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + + dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go b/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go index 06449f5aa3a..5f955a82a32 100644 --- a/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go +++ b/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ along with this program. If not, see . package v1alpha1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/apis/extensions/v1alpha1/zz_generated.deepcopy.go index dc6dc6af55e..eeec9922d51 100644 --- a/apis/extensions/v1alpha1/zz_generated.deepcopy.go +++ b/apis/extensions/v1alpha1/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/storage/v1alpha1/zz_generated.deepcopy.go b/apis/storage/v1alpha1/zz_generated.deepcopy.go index f22b9440108..0ab972c839a 100644 --- a/apis/storage/v1alpha1/zz_generated.deepcopy.go +++ b/apis/storage/v1alpha1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ along with this program. If not, see . package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/workloads/v1alpha1/zz_generated.deepcopy.go b/apis/workloads/v1alpha1/zz_generated.deepcopy.go index 563b051a208..09390fb0541 100644 --- a/apis/workloads/v1alpha1/zz_generated.deepcopy.go +++ b/apis/workloads/v1alpha1/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) diff --git a/cmd/dataprotection/main.go b/cmd/dataprotection/main.go index ab5dfe5bcfb..0188c53dc7e 100644 --- a/cmd/dataprotection/main.go +++ b/cmd/dataprotection/main.go @@ -38,6 +38,7 @@ import ( discoverycli "k8s.io/client-go/discovery" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -97,7 +98,7 @@ func init() { viper.SetDefault("VOLUMESNAPSHOT_API_BETA", false) viper.SetDefault(constant.KBToolsImage, "apecloud/kubeblocks-tools:latest") viper.SetDefault("KUBEBLOCKS_SERVICEACCOUNT_NAME", "kubeblocks") - viper.SetDefault(constant.CfgKeyCtrlrMgrNS, "default") + viper.SetDefault(constant.CfgKeyCtrlrMgrNS, "kb-system") viper.SetDefault(constant.KubernetesClusterDomainEnv, constant.DefaultDNSDomain) viper.SetDefault(dptypes.CfgKeyGCFrequencySeconds, dptypes.DefaultGCFrequencySeconds) } @@ -263,6 +264,15 @@ func main() { os.Exit(1) } + if err = (&dpcontrollers.GCReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("gc-controller"), + Clock: clock.RealClock{}, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "GarbageCollection") + os.Exit(1) + } + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/controllers/dataprotection/backupschedule_controller.go b/controllers/dataprotection/backupschedule_controller.go index 64f48815b9a..ff07c591936 100644 --- a/controllers/dataprotection/backupschedule_controller.go +++ b/controllers/dataprotection/backupschedule_controller.go @@ -22,7 +22,6 @@ package dataprotection import ( "context" "reflect" - "strings" "time" batchv1 "k8s.io/api/batch/v1" @@ -84,12 +83,6 @@ func (r *BackupScheduleReconciler) Reconcile(ctx context.Context, req ctrl.Reque return *res, err } - // try to remove expired or oldest backups, triggered by cronjob controller - // TODO(ldm): another garbage collection controller to remove expired backups - if err = r.removeExpiredBackups(reqCtx); err != nil { - return r.patchStatusFailed(reqCtx, backupSchedule, "RemoveExpiredBackupsFailed", err) - } - if err = r.handleSchedule(reqCtx, backupSchedule); err != nil { return r.patchStatusFailed(reqCtx, backupSchedule, "HandleBackupScheduleFailed", err) } @@ -197,34 +190,6 @@ func (r *BackupScheduleReconciler) patchStatusFailed(reqCtx intctrlutil.RequestC return intctrlutil.RequeueWithError(err, reqCtx.Log, "") } -func (r *BackupScheduleReconciler) removeExpiredBackups(reqCtx intctrlutil.RequestCtx) error { - backups := dpv1alpha1.BackupList{} - if err := r.Client.List(reqCtx.Ctx, &backups, - client.InNamespace(reqCtx.Req.Namespace)); err != nil { - return err - } - - now := metav1.Now() - for _, item := range backups.Items { - // ignore retained backup. - if strings.EqualFold(item.GetLabels()[constant.BackupProtectionLabelKey], constant.BackupRetain) { - continue - } - - // ignore backup which is not expired. - if item.Status.Expiration == nil || !item.Status.Expiration.Before(&now) { - continue - } - - // delete expired backup. - if err := intctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, &item); err != nil { - // failed delete backups, return error info. - return err - } - } - return nil -} - // handleSchedule handles backup schedules for different backup method. func (r *BackupScheduleReconciler) handleSchedule( reqCtx intctrlutil.RequestCtx, diff --git a/controllers/dataprotection/gc_controller.go b/controllers/dataprotection/gc_controller.go index bec9a6c8170..a53da6fd43c 100644 --- a/controllers/dataprotection/gc_controller.go +++ b/controllers/dataprotection/gc_controller.go @@ -23,12 +23,15 @@ import ( "context" "time" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" @@ -38,19 +41,18 @@ import ( viper "github.com/apecloud/kubeblocks/internal/viperx" ) -// GCReconciler deletes expired backups. +// GCReconciler garbage collection reconciler, which periodically deletes expired backups. type GCReconciler struct { client.Client Recorder record.EventRecorder - clock clock.WithTickerAndDelayedExecution + Clock clock.WithTickerAndDelayedExecution } // SetupWithManager sets up the GCReconciler using the supplied manager. // GCController only watches on CreateEvent for ensuring every new backup will be // taken care of. Other events will be filtered to decrease the load on the controller. func (r *GCReconciler) SetupWithManager(mgr ctrl.Manager) error { - s := dputils.NewPeriodicalEnqueueSource(mgr.GetClient(), &dpv1alpha1.BackupList{}, - getGCFrequency(), dputils.PeriodicalEnqueueSourceOption{}) + s := dputils.NewPeriodicalEnqueueSource(mgr.GetClient(), &dpv1alpha1.BackupList{}, getGCFrequency(), dputils.PeriodicalEnqueueSourceOption{}) return ctrl.NewControllerManagedBy(mgr). For(&dpv1alpha1.Backup{}, builder.WithPredicates(predicate.Funcs{ UpdateFunc: func(_ event.UpdateEvent) bool { @@ -67,7 +69,52 @@ func (r *GCReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backups,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups=dataprotection.kubeblocks.io,resources=backups/status,verbs=get + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// delete expired backups. func (r *GCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqCtx := ctrlutil.RequestCtx{ + Ctx: ctx, + Req: req, + Log: log.FromContext(ctx).WithValues("gc backup", req.NamespacedName), + Recorder: r.Recorder, + } + reqCtx.Log.V(1).Info("gcController getting backup") + + backup := &dpv1alpha1.Backup{} + if err := r.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, backup); err != nil { + if apierrors.IsNotFound(err) { + reqCtx.Log.Error(err, "backup ont found") + return ctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + } + + // backup is being deleted, skip + if backup.Status.Phase == dpv1alpha1.BackupPhaseDeleting || + backup.DeletionTimestamp.IsZero() { + reqCtx.Log.V(1).Info("backup is being deleted, skipping") + return ctrlutil.Reconciled() + } + + reqCtx.Log.V(1).Info("gc reconcile", "backup", req.String(), + "phase", backup.Status.Phase, "expiration", backup.Status.Expiration) + reqCtx.Log = reqCtx.Log.WithValues("expiration", backup.Status.Expiration) + + now := r.Clock.Now() + if backup.Status.Expiration == nil || backup.Status.Expiration.After(now) { + reqCtx.Log.V(1).Info("backup is not expired yet, skipping") + return ctrlutil.Reconciled() + } + + reqCtx.Log.Info("backup has expired, delete it", "backup", req.String()) + if err := ctrlutil.BackgroundDeleteObject(r.Client, reqCtx.Ctx, backup); err != nil { + reqCtx.Log.Error(err, "failed to delete backup") + r.Recorder.Event(backup, corev1.EventTypeWarning, "RemoveExpiredBackupsFailed", err.Error()) + return ctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") + } + return ctrlutil.Reconciled() } diff --git a/externalapis/preflight/v1beta2/zz_generated.deepcopy.go b/externalapis/preflight/v1beta2/zz_generated.deepcopy.go index e1cbcb93528..395b99094bf 100644 --- a/externalapis/preflight/v1beta2/zz_generated.deepcopy.go +++ b/externalapis/preflight/v1beta2/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1beta2 import ( troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/internal/cli/util/flags/flags_test.go b/internal/cli/util/flags/flags_test.go index 83865046029..13b8718d0c8 100644 --- a/internal/cli/util/flags/flags_test.go +++ b/internal/cli/util/flags/flags_test.go @@ -20,13 +20,14 @@ along with this program. If not, see . package flags import ( - "github.com/apecloud/kubeblocks/internal/cli/testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/spf13/cobra" clientfake "k8s.io/client-go/rest/fake" "k8s.io/kube-openapi/pkg/validation/spec" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + + "github.com/apecloud/kubeblocks/internal/cli/testing" ) const singleFlags = `{ diff --git a/internal/dataprotection/utils/envvar.go b/internal/dataprotection/utils/envvar.go index f01bac58c72..f3eb3735796 100644 --- a/internal/dataprotection/utils/envvar.go +++ b/internal/dataprotection/utils/envvar.go @@ -34,8 +34,7 @@ func BuildEnvByCredential(pod *corev1.Pod, credential *dpv1alpha1.ConnectionCred } var hostEnv corev1.EnvVar if credential.HostKey == "" { - hostEnv = corev1.EnvVar{Name: dptypes.DPDBHost, - Value: intctrlutil.BuildPodHostDNS(pod)} + hostEnv = corev1.EnvVar{Name: dptypes.DPDBHost, Value: intctrlutil.BuildPodHostDNS(pod)} } else { hostEnv = buildEnvBySecretKey(dptypes.DPDBHost, credential.SecretName, credential.HostKey) } From 154ad0a76f8fae034bb37455082d24c68fd41ee4 Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Thu, 12 Oct 2023 15:41:07 +0800 Subject: [PATCH 3/8] fix --- controllers/dataprotection/gc_controller.go | 2 +- internal/dataprotection/types/constant.go | 2 +- internal/dataprotection/utils/periodical_enqueue_source.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/controllers/dataprotection/gc_controller.go b/controllers/dataprotection/gc_controller.go index a53da6fd43c..d3c60377352 100644 --- a/controllers/dataprotection/gc_controller.go +++ b/controllers/dataprotection/gc_controller.go @@ -93,7 +93,7 @@ func (r *GCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re // backup is being deleted, skip if backup.Status.Phase == dpv1alpha1.BackupPhaseDeleting || - backup.DeletionTimestamp.IsZero() { + !backup.DeletionTimestamp.IsZero() { reqCtx.Log.V(1).Info("backup is being deleted, skipping") return ctrlutil.Reconciled() } diff --git a/internal/dataprotection/types/constant.go b/internal/dataprotection/types/constant.go index 96756abc1a3..9c3816a1ade 100644 --- a/internal/dataprotection/types/constant.go +++ b/internal/dataprotection/types/constant.go @@ -28,7 +28,7 @@ const ( // config default values const ( // DefaultGCFrequencySeconds is the default gc frequency, its unit is second - DefaultGCFrequencySeconds = 60 * 60 + DefaultGCFrequencySeconds = 60 ) const ( diff --git a/internal/dataprotection/utils/periodical_enqueue_source.go b/internal/dataprotection/utils/periodical_enqueue_source.go index a76c63b12aa..e2c25e3fa98 100644 --- a/internal/dataprotection/utils/periodical_enqueue_source.go +++ b/internal/dataprotection/utils/periodical_enqueue_source.go @@ -70,7 +70,7 @@ func NewPeriodicalEnqueueSource( func (p *PeriodicalEnqueueSource) Start( ctx context.Context, - h handler.EventHandler, + _ handler.EventHandler, q workqueue.RateLimitingInterface, predicates ...predicate.Predicate) error { go wait.Until(func() { From 72eca625a4dd31db875b4426c604486f6d0a5dfb Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Fri, 13 Oct 2023 13:06:51 +0800 Subject: [PATCH 4/8] add test case --- apis/apps/v1alpha1/zz_generated.deepcopy.go | 7 +- apis/dataprotection/v1alpha1/backup_types.go | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- cmd/dataprotection/main.go | 8 +- controllers/apps/cluster_controller_test.go | 5 +- .../dataprotection/backup_controller.go | 2 +- .../backupschedule_controller_test.go | 91 +-------- controllers/dataprotection/gc_controller.go | 18 +- .../dataprotection/gc_controller_test.go | 172 ++++++++++++++++++ controllers/dataprotection/suite_test.go | 25 ++- .../v1beta2/zz_generated.deepcopy.go | 2 +- .../dataprotection/action/action_create_vs.go | 1 - internal/dataprotection/backup/scheduler.go | 3 +- internal/dataprotection/types/constant.go | 2 +- .../utils/periodical_enqueue_source_test.go | 152 ++++++++++++++++ internal/dataprotection/utils/suit_test.go | 32 ++++ 19 files changed, 411 insertions(+), 121 deletions(-) create mode 100644 controllers/dataprotection/gc_controller_test.go create mode 100644 internal/dataprotection/utils/periodical_enqueue_source_test.go create mode 100644 internal/dataprotection/utils/suit_test.go diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go index 8d3add0314f..96610f5edcf 100644 --- a/apis/apps/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go @@ -25,15 +25,14 @@ along with this program. If not, see . package v1alpha1 import ( + dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - - dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" - workloadsv1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/apis/dataprotection/v1alpha1/backup_types.go b/apis/dataprotection/v1alpha1/backup_types.go index 1698c10d0f0..7fdedd4385d 100644 --- a/apis/dataprotection/v1alpha1/backup_types.go +++ b/apis/dataprotection/v1alpha1/backup_types.go @@ -43,7 +43,7 @@ type BackupSpec struct { DeletionPolicy BackupDeletionPolicy `json:"deletionPolicy,omitempty"` // retentionPeriod determines a duration up to which the backup should be kept. - // controller will remove all backups that are older than the RetentionPeriod. + // Controller will remove all backups that are older than the RetentionPeriod. // For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. // Sample duration format: // - years: 2y @@ -52,7 +52,7 @@ type BackupSpec struct { // - hours: 12h // - minutes: 30m // You can also combine the above durations. For example: 30d12h30m - // +kubebuilder:default="7d" + // If not set, the backup will be kept forever. // +optional RetentionPeriod RetentionPeriod `json:"retentionPeriod,omitempty"` diff --git a/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go b/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go index 5f955a82a32..06449f5aa3a 100644 --- a/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go +++ b/apis/dataprotection/v1alpha1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ along with this program. If not, see . package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/apis/extensions/v1alpha1/zz_generated.deepcopy.go index eeec9922d51..dc6dc6af55e 100644 --- a/apis/extensions/v1alpha1/zz_generated.deepcopy.go +++ b/apis/extensions/v1alpha1/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/storage/v1alpha1/zz_generated.deepcopy.go b/apis/storage/v1alpha1/zz_generated.deepcopy.go index 0ab972c839a..f22b9440108 100644 --- a/apis/storage/v1alpha1/zz_generated.deepcopy.go +++ b/apis/storage/v1alpha1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ along with this program. If not, see . package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/workloads/v1alpha1/zz_generated.deepcopy.go b/apis/workloads/v1alpha1/zz_generated.deepcopy.go index 09390fb0541..563b051a208 100644 --- a/apis/workloads/v1alpha1/zz_generated.deepcopy.go +++ b/apis/workloads/v1alpha1/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) diff --git a/cmd/dataprotection/main.go b/cmd/dataprotection/main.go index f4f0a291edc..d1fe4d89ef1 100644 --- a/cmd/dataprotection/main.go +++ b/cmd/dataprotection/main.go @@ -38,7 +38,6 @@ import ( discoverycli "k8s.io/client-go/discovery" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -263,11 +262,8 @@ func main() { os.Exit(1) } - if err = (&dpcontrollers.GCReconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor("gc-controller"), - Clock: clock.RealClock{}, - }).SetupWithManager(mgr); err != nil { + if err = dpcontrollers.NewGCReconciler(mgr). + SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "GarbageCollection") os.Exit(1) } diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 632f7eab35d..500fb9d96a9 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -388,7 +388,10 @@ var _ = Describe("Cluster Controller", func() { backup := testdp.NewBackupFactory(testCtx.DefaultNamespace, backupName). SetBackupPolicyName(backupPolicyName). SetBackupMethod(backupMethod). - SetLabels(map[string]string{constant.AppInstanceLabelKey: clusterKey.Name, constant.BackupProtectionLabelKey: constant.BackupRetain}). + SetLabels(map[string]string{ + constant.AppInstanceLabelKey: clusterKey.Name, + constant.BackupProtectionLabelKey: constant.BackupRetain, + }). WithRandomName(). Create(&testCtx).GetObject() backupKey := client.ObjectKeyFromObject(backup) diff --git a/controllers/dataprotection/backup_controller.go b/controllers/dataprotection/backup_controller.go index bc38a0e05e1..280029e1ffa 100644 --- a/controllers/dataprotection/backup_controller.go +++ b/controllers/dataprotection/backup_controller.go @@ -352,7 +352,7 @@ func (r *BackupReconciler) patchBackupStatus( if err != nil { return fmt.Errorf("failed to parse retention period %s, %v", original.Spec.RetentionPeriod, err) } - if original.Spec.RetentionPeriod != "" { + if duration.Seconds() > 0 { request.Status.Expiration = &metav1.Time{ Time: request.Status.StartTimestamp.Add(duration), } diff --git a/controllers/dataprotection/backupschedule_controller_test.go b/controllers/dataprotection/backupschedule_controller_test.go index c96f6cae4ba..bf35ad291d9 100644 --- a/controllers/dataprotection/backupschedule_controller_test.go +++ b/controllers/dataprotection/backupschedule_controller_test.go @@ -21,12 +21,11 @@ package dataprotection import ( "fmt" - "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" @@ -92,13 +91,6 @@ var _ = Describe("Backup Schedule Controller", func() { } } - getJobKey := func(backup *dpv1alpha1.Backup) client.ObjectKey { - return client.ObjectKey{ - Name: dpbackup.GenerateBackupJobName(backup, dpbackup.BackupDataJobNamePrefix), - Namespace: backup.Namespace, - } - } - BeforeEach(func() { By("creating an actionSet") actionSet := testdp.NewFakeActionSet(&testCtx) @@ -118,7 +110,6 @@ var _ = Describe("Backup Schedule Controller", func() { Context("creates a backup schedule", func() { var ( - backupNamePrefix = "schedule-test-backup-" backupSchedule *dpv1alpha1.BackupSchedule backupScheduleKey client.ObjectKey ) @@ -152,86 +143,6 @@ var _ = Describe("Backup Schedule Controller", func() { g.Expect(*fetched.Spec.StartingDeadlineSeconds).To(Equal(getStartingDeadlineSeconds(backupSchedule))) })).Should(Succeed()) }) - - It("delete expired backups", func() { - now := metav1.Now() - backupStatus := dpv1alpha1.BackupStatus{ - Phase: dpv1alpha1.BackupPhaseCompleted, - Expiration: &now, - StartTimestamp: &now, - CompletionTimestamp: &now, - } - - autoBackupLabel := map[string]string{ - dataProtectionLabelAutoBackupKey: "true", - dataProtectionLabelBackupPolicyKey: testdp.BackupPolicyName, - dataProtectionLabelBackupMethodKey: testdp.BackupMethodName, - } - - createBackup := func(name string) *dpv1alpha1.Backup { - return testdp.NewBackupFactory(testCtx.DefaultNamespace, name). - WithRandomName().AddLabelsInMap(autoBackupLabel). - SetBackupPolicyName(testdp.BackupPolicyName). - SetBackupMethod(testdp.BackupMethodName). - Create(&testCtx).GetObject() - } - - checkBackupCompleted := func(key client.ObjectKey) { - Eventually(testapps.CheckObj(&testCtx, key, - func(g Gomega, fetched *dpv1alpha1.Backup) { - g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseCompleted)) - })).Should(Succeed()) - } - - By("create an expired backup") - backupExpired := createBackup(backupNamePrefix + "expired") - - By("create 1st backup") - backupOutLimit1 := createBackup(backupNamePrefix + "1") - - By("create 2nd backup") - backupOutLimit2 := createBackup(backupNamePrefix + "2") - - By("waiting expired backup completed") - expiredKey := client.ObjectKeyFromObject(backupExpired) - testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupExpired), batchv1.JobComplete) - checkBackupCompleted(expiredKey) - - By("mock update expired backup status to expire") - backupStatus.Expiration = &metav1.Time{Time: now.Add(-time.Hour * 24)} - backupStatus.StartTimestamp = backupStatus.Expiration - testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupExpired), backupStatus) - - By("waiting 1st backup completed") - outLimit1Key := client.ObjectKeyFromObject(backupOutLimit1) - testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupOutLimit1), batchv1.JobComplete) - checkBackupCompleted(outLimit1Key) - - By("mock 1st backup not to expire") - backupStatus.Expiration = &metav1.Time{Time: now.Add(time.Hour * 24)} - backupStatus.StartTimestamp = &metav1.Time{Time: now.Add(time.Hour)} - testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupOutLimit1), backupStatus) - - By("waiting 2nd backup completed") - outLimit2Key := client.ObjectKeyFromObject(backupOutLimit2) - testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupOutLimit2), batchv1.JobComplete) - checkBackupCompleted(outLimit2Key) - - By("mock 2nd backup not to expire") - backupStatus.Expiration = &metav1.Time{Time: now.Add(time.Hour * 24)} - backupStatus.StartTimestamp = &metav1.Time{Time: now.Add(time.Hour * 2)} - testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupOutLimit2), backupStatus) - - By("patch backup schedule to trigger the controller to delete expired backup") - Eventually(testapps.GetAndChangeObj(&testCtx, backupScheduleKey, func(fetched *dpv1alpha1.BackupSchedule) { - fetched.Spec.Schedules[0].RetentionPeriod = "1d" - })).Should(Succeed()) - - By("retain the latest backup") - Eventually(testapps.List(&testCtx, generics.BackupSignature, - client.MatchingLabels(autoBackupLabel), - client.InNamespace(backupPolicy.Namespace))).Should(HaveLen(2)) - }) }) Context("creates a backup schedule with empty schedule", func() { diff --git a/controllers/dataprotection/gc_controller.go b/controllers/dataprotection/gc_controller.go index d3c60377352..2a764037aed 100644 --- a/controllers/dataprotection/gc_controller.go +++ b/controllers/dataprotection/gc_controller.go @@ -44,15 +44,25 @@ import ( // GCReconciler garbage collection reconciler, which periodically deletes expired backups. type GCReconciler struct { client.Client - Recorder record.EventRecorder - Clock clock.WithTickerAndDelayedExecution + Recorder record.EventRecorder + clock clock.WithTickerAndDelayedExecution + frequency time.Duration +} + +func NewGCReconciler(mgr ctrl.Manager) *GCReconciler { + return &GCReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("gc-controller"), + clock: clock.RealClock{}, + frequency: getGCFrequency(), + } } // SetupWithManager sets up the GCReconciler using the supplied manager. // GCController only watches on CreateEvent for ensuring every new backup will be // taken care of. Other events will be filtered to decrease the load on the controller. func (r *GCReconciler) SetupWithManager(mgr ctrl.Manager) error { - s := dputils.NewPeriodicalEnqueueSource(mgr.GetClient(), &dpv1alpha1.BackupList{}, getGCFrequency(), dputils.PeriodicalEnqueueSourceOption{}) + s := dputils.NewPeriodicalEnqueueSource(mgr.GetClient(), &dpv1alpha1.BackupList{}, r.frequency, dputils.PeriodicalEnqueueSourceOption{}) return ctrl.NewControllerManagedBy(mgr). For(&dpv1alpha1.Backup{}, builder.WithPredicates(predicate.Funcs{ UpdateFunc: func(_ event.UpdateEvent) bool { @@ -102,7 +112,7 @@ func (r *GCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re "phase", backup.Status.Phase, "expiration", backup.Status.Expiration) reqCtx.Log = reqCtx.Log.WithValues("expiration", backup.Status.Expiration) - now := r.Clock.Now() + now := r.clock.Now() if backup.Status.Expiration == nil || backup.Status.Expiration.After(now) { reqCtx.Log.V(1).Info("backup is not expired yet, skipping") return ctrlutil.Reconciled() diff --git a/controllers/dataprotection/gc_controller_test.go b/controllers/dataprotection/gc_controller_test.go new file mode 100644 index 00000000000..4ff3812ea2d --- /dev/null +++ b/controllers/dataprotection/gc_controller_test.go @@ -0,0 +1,172 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package dataprotection + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" + dpbackup "github.com/apecloud/kubeblocks/internal/dataprotection/backup" + "github.com/apecloud/kubeblocks/internal/generics" + testapps "github.com/apecloud/kubeblocks/internal/testutil/apps" + testdp "github.com/apecloud/kubeblocks/internal/testutil/dataprotection" +) + +var _ = Describe("Data Protection Garbage Collection Controller", func() { + cleanEnv := func() { + // must wait till resources deleted and no longer existed before the testcases start, + // otherwise if later it needs to create some new resource objects with the same name, + // in race conditions, it will find the existence of old objects, resulting failure to + // create the new objects. + By("clean resources") + // delete rest mocked objects + inNS := client.InNamespace(testCtx.DefaultNamespace) + ml := client.HasLabels{testCtx.TestObjLabelKey} + + // namespaced + testapps.ClearResources(&testCtx, generics.ClusterSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml) + testapps.ClearResources(&testCtx, generics.SecretSignature, inNS, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupPolicySignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS) + + // wait all backup to be deleted, otherwise the controller maybe create + // job to delete the backup between the ClearResources function delete + // the job and get the job list, resulting the ClearResources panic. + Eventually(testapps.List(&testCtx, generics.BackupSignature, inNS)).Should(HaveLen(0)) + + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.JobSignature, true, inNS) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS) + + // non-namespaced + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ActionSetSignature, true, ml) + testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml) + testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupRepoSignature, true, ml) + testapps.ClearResources(&testCtx, generics.StorageProviderSignature, ml) + } + + BeforeEach(func() { + cleanEnv() + _ = testdp.NewFakeCluster(&testCtx) + }) + + AfterEach(cleanEnv) + + Context("garbage collection", func() { + var ( + backupNamePrefix = "schedule-test-backup-" + backupPolicy *dpv1alpha1.BackupPolicy + ) + + getJobKey := func(backup *dpv1alpha1.Backup) client.ObjectKey { + return client.ObjectKey{ + Name: dpbackup.GenerateBackupJobName(backup, dpbackup.BackupDataJobNamePrefix), + Namespace: backup.Namespace, + } + } + + BeforeEach(func() { + By("creating an actionSet") + actionSet := testdp.NewFakeActionSet(&testCtx) + + By("creating storage provider") + _ = testdp.NewFakeStorageProvider(&testCtx, nil) + + By("creating backup repo") + _, _ = testdp.NewFakeBackupRepo(&testCtx, nil) + + By("By creating a backupPolicy from actionSet " + actionSet.Name) + backupPolicy = testdp.NewFakeBackupPolicy(&testCtx, nil) + }) + + It("delete expired backups", func() { + now := metav1.Now() + backupStatus := dpv1alpha1.BackupStatus{ + Phase: dpv1alpha1.BackupPhaseCompleted, + Expiration: &now, + StartTimestamp: &now, + CompletionTimestamp: &now, + } + + autoBackupLabel := map[string]string{ + dataProtectionLabelAutoBackupKey: "true", + dataProtectionLabelBackupPolicyKey: testdp.BackupPolicyName, + dataProtectionLabelBackupMethodKey: testdp.BackupMethodName, + } + + createBackup := func(name string) *dpv1alpha1.Backup { + return testdp.NewBackupFactory(testCtx.DefaultNamespace, name). + WithRandomName().AddLabelsInMap(autoBackupLabel). + SetBackupPolicyName(testdp.BackupPolicyName). + SetBackupMethod(testdp.BackupMethodName). + Create(&testCtx).GetObject() + } + + checkBackupCompleted := func(key client.ObjectKey) { + Eventually(testapps.CheckObj(&testCtx, key, + func(g Gomega, fetched *dpv1alpha1.Backup) { + g.Expect(fetched.Status.Phase).To(Equal(dpv1alpha1.BackupPhaseCompleted)) + })).Should(Succeed()) + } + + setBackupUnexpired := func(backup *dpv1alpha1.Backup) { + backup.Status.Expiration = &metav1.Time{Time: fakeClock.Now().Add(time.Hour * 24)} + backup.Status.StartTimestamp = &metav1.Time{Time: fakeClock.Now().Add(time.Hour)} + testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backup), backup.Status) + } + + By("create an expired backup") + backupExpired := createBackup(backupNamePrefix + "expired") + + By("create an unexpired backup") + backup1 := createBackup(backupNamePrefix + "unexpired") + + By("waiting expired backup completed") + expiredKey := client.ObjectKeyFromObject(backupExpired) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(backupExpired), batchv1.JobComplete) + checkBackupCompleted(expiredKey) + + By("mock backup status to expire") + backupStatus.Expiration = &metav1.Time{Time: fakeClock.Now().Add(-time.Hour * 24)} + backupStatus.StartTimestamp = backupStatus.Expiration + testdp.PatchBackupStatus(&testCtx, client.ObjectKeyFromObject(backupExpired), backupStatus) + + By("waiting backup completed") + backup1Key := client.ObjectKeyFromObject(backup1) + testdp.PatchK8sJobStatus(&testCtx, getJobKey(backup1), batchv1.JobComplete) + checkBackupCompleted(backup1Key) + + By("mock backup not to expire") + setBackupUnexpired(backup1) + + By("retain the unexpired backup") + Eventually(testapps.List(&testCtx, generics.BackupSignature, + client.MatchingLabels(autoBackupLabel), + client.InNamespace(backupPolicy.Namespace))).Should(HaveLen(1)) + }) + }) +}) diff --git a/controllers/dataprotection/suite_test.go b/controllers/dataprotection/suite_test.go index 15710eb2e8f..a5b3559e01c 100644 --- a/controllers/dataprotection/suite_test.go +++ b/controllers/dataprotection/suite_test.go @@ -36,6 +36,7 @@ import ( batchv1 "k8s.io/api/batch/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + testclocks "k8s.io/utils/clock/testing" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -59,6 +60,7 @@ var testEnv *envtest.Environment var ctx context.Context var cancel context.CancelFunc var testCtx testutil.TestContext +var fakeClock *testclocks.FakeClock func init() { viper.AutomaticEnv() @@ -71,11 +73,11 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func() { - if viper.GetBool("ENABLE_DEBUG_LOG") { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { - o.TimeEncoder = zapcore.ISO8601TimeEncoder - })) - } + // if viper.GetBool("ENABLE_DEBUG_LOG") { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { + o.TimeEncoder = zapcore.ISO8601TimeEncoder + })) + // } reconcileInterval = time.Millisecond ctx, cancel = context.WithCancel(context.TODO()) @@ -186,6 +188,9 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) + err = mockGCReconciler(k8sManager).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv) go func() { @@ -201,3 +206,13 @@ var _ = AfterSuite(func() { err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) }) + +func mockGCReconciler(mgr ctrl.Manager) *GCReconciler { + fakeClock = testclocks.NewFakeClock(time.Now()) + return &GCReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("gc-controller"), + clock: fakeClock, + frequency: time.Duration(1) * time.Second, + } +} diff --git a/externalapis/preflight/v1beta2/zz_generated.deepcopy.go b/externalapis/preflight/v1beta2/zz_generated.deepcopy.go index 395b99094bf..e1cbcb93528 100644 --- a/externalapis/preflight/v1beta2/zz_generated.deepcopy.go +++ b/externalapis/preflight/v1beta2/zz_generated.deepcopy.go @@ -26,7 +26,7 @@ package v1beta2 import ( troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/internal/dataprotection/action/action_create_vs.go b/internal/dataprotection/action/action_create_vs.go index b8ed5ab7c78..7d51374ffe9 100644 --- a/internal/dataprotection/action/action_create_vs.go +++ b/internal/dataprotection/action/action_create_vs.go @@ -117,7 +117,6 @@ func (c *CreateVolumeSnapshotAction) Execute(ctx Context) (*dpv1alpha1.ActionSta // volume snapshot is ready and status is not error // TODO(ldm): now only support one volume to take snapshot, set its time, size to status return sb.phase(dpv1alpha1.ActionPhaseCompleted). - phase(dpv1alpha1.ActionPhaseCompleted). totalSize(snap.Status.RestoreSize.String()). timeRange(snap.Status.CreationTime, snap.Status.CreationTime). build(), nil diff --git a/internal/dataprotection/backup/scheduler.go b/internal/dataprotection/backup/scheduler.go index 3ac0af7f2e3..645174b170c 100644 --- a/internal/dataprotection/backup/scheduler.go +++ b/internal/dataprotection/backup/scheduler.go @@ -269,6 +269,7 @@ kind: Backup metadata: labels: dataprotection.kubeblocks.io/autobackup: "true" + dataprotection.kubeblocks.io/backup-schedule: "%s" name: %s namespace: %s spec: @@ -276,7 +277,7 @@ spec: backupMethod: %s retentionPeriod: %s EOF -`, s.generateBackupName(), s.BackupSchedule.Namespace, +`, s.BackupSchedule.Name, s.generateBackupName(), s.BackupSchedule.Namespace, s.BackupPolicy.Name, schedulePolicy.BackupMethod, schedulePolicy.RetentionPeriod) diff --git a/internal/dataprotection/types/constant.go b/internal/dataprotection/types/constant.go index 9c3816a1ade..96756abc1a3 100644 --- a/internal/dataprotection/types/constant.go +++ b/internal/dataprotection/types/constant.go @@ -28,7 +28,7 @@ const ( // config default values const ( // DefaultGCFrequencySeconds is the default gc frequency, its unit is second - DefaultGCFrequencySeconds = 60 + DefaultGCFrequencySeconds = 60 * 60 ) const ( diff --git a/internal/dataprotection/utils/periodical_enqueue_source_test.go b/internal/dataprotection/utils/periodical_enqueue_source_test.go new file mode 100644 index 00000000000..cd0c0b5f52c --- /dev/null +++ b/internal/dataprotection/utils/periodical_enqueue_source_test.go @@ -0,0 +1,152 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package utils + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" +) + +var _ = Describe("Periodical Enqueue Source", func() { + const ( + backupName = "test-backup" + ) + + var ( + ctx context.Context + cancelFunc context.CancelFunc + cli client.Client + queue workqueue.RateLimitingInterface + ) + + createBackup := func(name string) { + Expect(cli.Create(ctx, &dpv1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + })).Should(Succeed()) + } + + BeforeEach(func() { + Expect(dpv1alpha1.AddToScheme(scheme.Scheme)).Should(Succeed()) + ctx, cancelFunc = context.WithCancel(context.TODO()) + cli = (&fake.ClientBuilder{}).Build() + queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + }) + + Context("source", func() { + var source *PeriodicalEnqueueSource + + BeforeEach(func() { + By("create source") + source = NewPeriodicalEnqueueSource(cli, &dpv1alpha1.BackupList{}, 1*time.Second, PeriodicalEnqueueSourceOption{}) + }) + + It("should start success", func() { + By("start source") + Expect(source.Start(ctx, nil, queue)).Should(Succeed()) + + By("wait and there is no resources") + time.Sleep(1 * time.Second) + Expect(queue.Len()).Should(Equal(0)) + + By("create a resource") + createBackup(backupName) + + By("wait and there is one resource") + time.Sleep(2 * time.Second) + Expect(queue.Len()).Should(Equal(1)) + + By("cancel context, the queue source shouldn't run anymore") + item, _ := queue.Get() + queue.Forget(item) + Expect(queue.Len()).Should(Equal(0)) + cancelFunc() + time.Sleep(2 * time.Second) + Expect(queue.Len()).Should(Equal(0)) + }) + + It("predicate should work", func() { + By("start source") + Expect(source.Start(ctx, nil, queue, predicate.Funcs{ + GenericFunc: func(event event.GenericEvent) bool { + return event.Object.GetName() == backupName + }, + })) + + By("create a resource match predicate") + createBackup(backupName) + + By("create another resource that does not match predicate") + createBackup(backupName + "-1") + + By("wait and there is one resource") + time.Sleep(2 * time.Second) + Expect(queue.Len()).Should(Equal(1)) + + cancelFunc() + }) + + It("order function should work", func() { + By("set source order func") + source.option.OrderFunc = func(objList client.ObjectList) client.ObjectList { + backupList := &dpv1alpha1.BackupList{} + objArray := make([]runtime.Object, 0) + backups, _ := meta.ExtractList(objList) + objArray = append(objArray, backups[1], backups[0]) + _ = meta.SetList(backupList, objArray) + return backupList + } + + By("start source") + Expect(source.Start(ctx, nil, queue)).Should(Succeed()) + + By("create a resource") + createBackup(backupName + "-1") + + By("create another resource") + createBackup(backupName + "-2") + + time.Sleep(2 * time.Second) + Expect(queue.Len()).Should(Equal(2)) + first, _ := queue.Get() + Expect(first.(reconcile.Request).Name).Should(Equal(backupName + "-2")) + + cancelFunc() + }) + }) +}) diff --git a/internal/dataprotection/utils/suit_test.go b/internal/dataprotection/utils/suit_test.go new file mode 100644 index 00000000000..940a611baa8 --- /dev/null +++ b/internal/dataprotection/utils/suit_test.go @@ -0,0 +1,32 @@ +/* +Copyright (C) 2022-2023 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package utils + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestDPUtils(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "DataProtection Utils Suite") +} From 650df67bbf7a123dadc03993ceb07b158bbeeea0 Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Fri, 13 Oct 2023 15:38:58 +0800 Subject: [PATCH 5/8] make manifests --- config/crd/bases/dataprotection.kubeblocks.io_backups.yaml | 5 ++--- deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml index 58cfa9f3a50..784efca7533 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml @@ -94,14 +94,13 @@ spec: incremental or differential backup. type: string retentionPeriod: - default: 7d description: "retentionPeriod determines a duration up to which the - backup should be kept. controller will remove all backups that are + backup should be kept. Controller will remove all backups that are older than the RetentionPeriod. For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. Sample duration format: - years: \t2y - months: \t6mo - days: \t\t30d - hours: \t12h - minutes: \t30m You can also combine the above durations. For example: - 30d12h30m" + 30d12h30m If not set, the backup will be kept forever." type: string required: - backupMethod diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml index 58cfa9f3a50..784efca7533 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml @@ -94,14 +94,13 @@ spec: incremental or differential backup. type: string retentionPeriod: - default: 7d description: "retentionPeriod determines a duration up to which the - backup should be kept. controller will remove all backups that are + backup should be kept. Controller will remove all backups that are older than the RetentionPeriod. For example, RetentionPeriod of `30d` will keep only the backups of last 30 days. Sample duration format: - years: \t2y - months: \t6mo - days: \t\t30d - hours: \t12h - minutes: \t30m You can also combine the above durations. For example: - 30d12h30m" + 30d12h30m If not set, the backup will be kept forever." type: string required: - backupMethod From 1b6827504156073378776bddead34fc93db16a29 Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Fri, 13 Oct 2023 15:55:55 +0800 Subject: [PATCH 6/8] fix test case --- cmd/dataprotection/main.go | 5 ++--- controllers/dataprotection/gc_controller_test.go | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/dataprotection/main.go b/cmd/dataprotection/main.go index d1fe4d89ef1..c2de3e4c3d2 100644 --- a/cmd/dataprotection/main.go +++ b/cmd/dataprotection/main.go @@ -96,7 +96,7 @@ func init() { viper.SetDefault("VOLUMESNAPSHOT_API_BETA", false) viper.SetDefault(constant.KBToolsImage, "apecloud/kubeblocks-tools:latest") viper.SetDefault("KUBEBLOCKS_SERVICEACCOUNT_NAME", "kubeblocks") - viper.SetDefault(constant.CfgKeyCtrlrMgrNS, "kb-system") + viper.SetDefault(constant.CfgKeyCtrlrMgrNS, "default") viper.SetDefault(constant.KubernetesClusterDomainEnv, constant.DefaultDNSDomain) viper.SetDefault(dptypes.CfgKeyGCFrequencySeconds, dptypes.DefaultGCFrequencySeconds) } @@ -262,8 +262,7 @@ func main() { os.Exit(1) } - if err = dpcontrollers.NewGCReconciler(mgr). - SetupWithManager(mgr); err != nil { + if err = dpcontrollers.NewGCReconciler(mgr).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "GarbageCollection") os.Exit(1) } diff --git a/controllers/dataprotection/gc_controller_test.go b/controllers/dataprotection/gc_controller_test.go index 4ff3812ea2d..e4245e7891f 100644 --- a/controllers/dataprotection/gc_controller_test.go +++ b/controllers/dataprotection/gc_controller_test.go @@ -167,6 +167,8 @@ var _ = Describe("Data Protection Garbage Collection Controller", func() { Eventually(testapps.List(&testCtx, generics.BackupSignature, client.MatchingLabels(autoBackupLabel), client.InNamespace(backupPolicy.Namespace))).Should(HaveLen(1)) + Eventually(testapps.CheckObjExists(&testCtx, backup1Key, &dpv1alpha1.Backup{}, true)).Should(Succeed()) + Eventually(testapps.CheckObjExists(&testCtx, expiredKey, &dpv1alpha1.Backup{}, false)).Should(Succeed()) }) }) }) From b7bb13fb281336fac1997cad4938e6c31aa99636 Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Fri, 13 Oct 2023 16:31:02 +0800 Subject: [PATCH 7/8] add validation --- apis/dataprotection/v1alpha1/backup_types.go | 5 ++++- .../bases/dataprotection.kubeblocks.io_backups.yaml | 11 ++++++++++- controllers/dataprotection/gc_controller.go | 3 +-- controllers/dataprotection/suite_test.go | 10 +++++----- .../crds/dataprotection.kubeblocks.io_backups.yaml | 11 ++++++++++- 5 files changed, 30 insertions(+), 10 deletions(-) diff --git a/apis/dataprotection/v1alpha1/backup_types.go b/apis/dataprotection/v1alpha1/backup_types.go index 7fdedd4385d..d2705dcbda6 100644 --- a/apis/dataprotection/v1alpha1/backup_types.go +++ b/apis/dataprotection/v1alpha1/backup_types.go @@ -26,10 +26,12 @@ type BackupSpec struct { // Which backupPolicy is applied to perform this backup. // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.backupPolicyName" BackupPolicyName string `json:"backupPolicyName"` // backupMethod specifies the backup method name that is defined in backupPolicy. // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.backupMethod" BackupMethod string `json:"backupMethod"` // deletionPolicy determines whether the backup contents stored in backup repository @@ -51,7 +53,7 @@ type BackupSpec struct { // - days: 30d // - hours: 12h // - minutes: 30m - // You can also combine the above durations. For example: 30d12h30m + // You can also combine the above durations. For example: 30d12h30m. // If not set, the backup will be kept forever. // +optional RetentionPeriod RetentionPeriod `json:"retentionPeriod,omitempty"` @@ -59,6 +61,7 @@ type BackupSpec struct { // parentBackupName determines the parent backup name for incremental or // differential backup. // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.parentBackupName" ParentBackupName string `json:"parentBackupName,omitempty"` } diff --git a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml index 784efca7533..868c8bc7d37 100644 --- a/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml +++ b/config/crd/bases/dataprotection.kubeblocks.io_backups.yaml @@ -69,10 +69,16 @@ spec: description: backupMethod specifies the backup method name that is defined in backupPolicy. type: string + x-kubernetes-validations: + - message: forbidden to update spec.backupMethod + rule: self == oldSelf backupPolicyName: description: Which backupPolicy is applied to perform this backup. pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string + x-kubernetes-validations: + - message: forbidden to update spec.backupPolicyName + rule: self == oldSelf deletionPolicy: allOf: - enum: @@ -93,6 +99,9 @@ spec: description: parentBackupName determines the parent backup name for incremental or differential backup. type: string + x-kubernetes-validations: + - message: forbidden to update spec.parentBackupName + rule: self == oldSelf retentionPeriod: description: "retentionPeriod determines a duration up to which the backup should be kept. Controller will remove all backups that are @@ -100,7 +109,7 @@ spec: `30d` will keep only the backups of last 30 days. Sample duration format: - years: \t2y - months: \t6mo - days: \t\t30d - hours: \t12h - minutes: \t30m You can also combine the above durations. For example: - 30d12h30m If not set, the backup will be kept forever." + 30d12h30m. If not set, the backup will be kept forever." type: string required: - backupMethod diff --git a/controllers/dataprotection/gc_controller.go b/controllers/dataprotection/gc_controller.go index 2a764037aed..bc2cd828848 100644 --- a/controllers/dataprotection/gc_controller.go +++ b/controllers/dataprotection/gc_controller.go @@ -102,8 +102,7 @@ func (r *GCReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re } // backup is being deleted, skip - if backup.Status.Phase == dpv1alpha1.BackupPhaseDeleting || - !backup.DeletionTimestamp.IsZero() { + if !backup.DeletionTimestamp.IsZero() { reqCtx.Log.V(1).Info("backup is being deleted, skipping") return ctrlutil.Reconciled() } diff --git a/controllers/dataprotection/suite_test.go b/controllers/dataprotection/suite_test.go index a5b3559e01c..895b75bbc9f 100644 --- a/controllers/dataprotection/suite_test.go +++ b/controllers/dataprotection/suite_test.go @@ -73,11 +73,11 @@ func TestAPIs(t *testing.T) { } var _ = BeforeSuite(func() { - // if viper.GetBool("ENABLE_DEBUG_LOG") { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { - o.TimeEncoder = zapcore.ISO8601TimeEncoder - })) - // } + if viper.GetBool("ENABLE_DEBUG_LOG") { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { + o.TimeEncoder = zapcore.ISO8601TimeEncoder + })) + } reconcileInterval = time.Millisecond ctx, cancel = context.WithCancel(context.TODO()) diff --git a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml index 784efca7533..868c8bc7d37 100644 --- a/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml +++ b/deploy/helm/crds/dataprotection.kubeblocks.io_backups.yaml @@ -69,10 +69,16 @@ spec: description: backupMethod specifies the backup method name that is defined in backupPolicy. type: string + x-kubernetes-validations: + - message: forbidden to update spec.backupMethod + rule: self == oldSelf backupPolicyName: description: Which backupPolicy is applied to perform this backup. pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string + x-kubernetes-validations: + - message: forbidden to update spec.backupPolicyName + rule: self == oldSelf deletionPolicy: allOf: - enum: @@ -93,6 +99,9 @@ spec: description: parentBackupName determines the parent backup name for incremental or differential backup. type: string + x-kubernetes-validations: + - message: forbidden to update spec.parentBackupName + rule: self == oldSelf retentionPeriod: description: "retentionPeriod determines a duration up to which the backup should be kept. Controller will remove all backups that are @@ -100,7 +109,7 @@ spec: `30d` will keep only the backups of last 30 days. Sample duration format: - years: \t2y - months: \t6mo - days: \t\t30d - hours: \t12h - minutes: \t30m You can also combine the above durations. For example: - 30d12h30m If not set, the backup will be kept forever." + 30d12h30m. If not set, the backup will be kept forever." type: string required: - backupMethod From 3b26822764911d511070523c2fc10b7f6025e5e2 Mon Sep 17 00:00:00 2001 From: "L.Dongming" Date: Fri, 13 Oct 2023 17:46:35 +0800 Subject: [PATCH 8/8] do not watch create event --- controllers/dataprotection/gc_controller.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/controllers/dataprotection/gc_controller.go b/controllers/dataprotection/gc_controller.go index bc2cd828848..ba2c6a133c5 100644 --- a/controllers/dataprotection/gc_controller.go +++ b/controllers/dataprotection/gc_controller.go @@ -65,6 +65,9 @@ func (r *GCReconciler) SetupWithManager(mgr ctrl.Manager) error { s := dputils.NewPeriodicalEnqueueSource(mgr.GetClient(), &dpv1alpha1.BackupList{}, r.frequency, dputils.PeriodicalEnqueueSourceOption{}) return ctrl.NewControllerManagedBy(mgr). For(&dpv1alpha1.Backup{}, builder.WithPredicates(predicate.Funcs{ + CreateFunc: func(_ event.CreateEvent) bool { + return false + }, UpdateFunc: func(_ event.UpdateEvent) bool { return false },