Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: support to rebuild instance from backup for sharding cluster #8778

Merged
merged 3 commits into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions apis/operations/v1alpha1/opsrequest_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,10 @@ type RebuildInstance struct {
// +optional
BackupName string `json:"backupName,omitempty"`

// When multiple source targets exist of the backup, you must specify the source target to restore.
// +optional
SourceBackupTargetName string `json:"sourceBackupTargetName,omitempty"`

// Defines container environment variables for the restore process.
// merged with the ones specified in the Backup and ActionSet resources.
//
Expand Down
4 changes: 4 additions & 0 deletions config/crd/bases/operations.kubeblocks.io_opsrequests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4200,6 +4200,10 @@ spec:
type: object
type: array
x-kubernetes-preserve-unknown-fields: true
sourceBackupTargetName:
description: When multiple source targets exist of the backup,
you must specify the source target to restore.
type: string
required:
- componentName
- instances
Expand Down
4 changes: 4 additions & 0 deletions deploy/helm/crds/operations.kubeblocks.io_opsrequests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4200,6 +4200,10 @@ spec:
type: object
type: array
x-kubernetes-preserve-unknown-fields: true
sourceBackupTargetName:
description: When multiple source targets exist of the backup,
you must specify the source target to restore.
type: string
required:
- componentName
- instances
Expand Down
18 changes: 18 additions & 0 deletions examples/redis/cluster-cmpd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ kind: Cluster
metadata:
name: redis-cluster
namespace: default
annotations:
kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1
spec:
# Specifies the behavior when a Cluster is deleted.
# - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
Expand All @@ -14,10 +16,26 @@ spec:
componentSpecs:
# Specifies the name of the Component. This name is also part of the Service DNS name and must comply with the IANA service naming rule. When ClusterComponentSpec is referenced as a template, the name is optional. Otherwise, it is required.
- name: redis
instances:
- annotations:
xxx: yyy
labels: {}
name: tpl-0
replicas: 2
resources:
limits:
cpu: '0.5'
memory: 0.5Gi
requests:
cpu: 100m
memory: 500M
# References the name of a ComponentDefinition. The ComponentDefinition specifies the behavior and characteristics of the Component. If both `componentDefRef` and `componentDef` are provided, the `componentDef` will take precedence over `componentDefRef`.
componentDef: redis-7
# Determines whether the metrics exporter needs to be published to the service endpoint.
disableExporter: true
serviceVersion: "7.2.4"
labels: {}
# podUpdatePolicy: StrictInPlace
# Specifies which types of logs should be collected for the Cluster.
enabledLogs:
- running
Expand Down
48 changes: 24 additions & 24 deletions pkg/operations/rebuild_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,8 +275,7 @@ func (r rebuildInstanceOpsHandler) rebuildInstanceInPlace(reqCtx intctrlutil.Req
rebuildFrom opsv1alpha1.RebuildInstance,
instance opsv1alpha1.Instance,
index int) (bool, error) {
inPlaceHelper, err := r.prepareInplaceRebuildHelper(reqCtx, cli, opsRes, rebuildFrom.RestoreEnv,
instance, rebuildFrom.BackupName, index)
inPlaceHelper, err := r.prepareInplaceRebuildHelper(reqCtx, cli, opsRes, rebuildFrom, instance, index)
if err != nil {
return false, err
}
Expand Down Expand Up @@ -444,10 +443,6 @@ func (r rebuildInstanceOpsHandler) checkProgressForScalingOutPods(reqCtx intctrl
failedCount int
completedCount int
)
synthesizedComp, err := r.buildSynthesizedComponent(reqCtx.Ctx, cli, opsRes.Cluster, rebuildInstance.ComponentName)
if err != nil {
return 0, 0, nil, err
}
currPodSet, _ := component.GenerateAllPodNamesToSet(compSpec.Replicas, compSpec.Instances, compSpec.OfflineInstances,
opsRes.Cluster.Name, compSpec.Name)
for _, instance := range rebuildInstance.Instances {
Expand All @@ -464,6 +459,11 @@ func (r rebuildInstanceOpsHandler) checkProgressForScalingOutPods(reqCtx intctrl
reqCtx.Log.Info(fmt.Sprintf("waiting to create the pod %s", scalingOutPodName))
continue
}

synthesizedComp, err := r.buildSynthesizedComponent(reqCtx.Ctx, cli, opsRes.Cluster, pod.Labels[constant.KBAppComponentLabelKey])
if err != nil {
return 0, 0, nil, err
}
isAvailable, err := instanceIsAvailable(synthesizedComp, pod, opsRes.OpsRequest.Annotations[ignoreRoleCheckAnnotationKey])
if err != nil {
// set progress status to failed when new pod is failed
Expand Down Expand Up @@ -551,30 +551,29 @@ func (r rebuildInstanceOpsHandler) buildSynthesizedComponent(ctx context.Context
func (r rebuildInstanceOpsHandler) prepareInplaceRebuildHelper(reqCtx intctrlutil.RequestCtx,
cli client.Client,
opsRes *OpsResource,
envForRestore []corev1.EnvVar,
rebuildInstance opsv1alpha1.RebuildInstance,
instance opsv1alpha1.Instance,
backupName string,
index int) (*inplaceRebuildHelper, error) {
var (
backup *dpv1alpha1.Backup
actionSet *dpv1alpha1.ActionSet
synthesizedComp *component.SynthesizedComponent
err error
)
if backupName != "" {
if rebuildInstance.BackupName != "" {
// prepare backup infos
backup = &dpv1alpha1.Backup{}
if err = cli.Get(reqCtx.Ctx, client.ObjectKey{Name: backupName, Namespace: opsRes.Cluster.Namespace}, backup); err != nil {
if err = cli.Get(reqCtx.Ctx, client.ObjectKey{Name: rebuildInstance.BackupName, Namespace: opsRes.Cluster.Namespace}, backup); err != nil {
return nil, err
}
if backup.Labels[dptypes.BackupTypeLabelKey] != string(dpv1alpha1.BackupTypeFull) {
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`the backup "%s" is not a Full backup`, backupName))
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`the backup "%s" is not a Full backup`, rebuildInstance.BackupName))
}
if backup.Status.Phase != dpv1alpha1.BackupPhaseCompleted {
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`the backup "%s" phase is not Completed`, backupName))
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`the backup "%s" phase is not Completed`, rebuildInstance.BackupName))
}
if backup.Status.BackupMethod == nil {
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`the backupMethod of the backup "%s" can not be empty`, backupName))
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`the backupMethod of the backup "%s" can not be empty`, rebuildInstance.BackupName))
}
actionSet, err = dputils.GetActionSetByName(reqCtx, cli, backup.Status.BackupMethod.ActionSetName)
if err != nil {
Expand All @@ -595,17 +594,18 @@ func (r rebuildInstanceOpsHandler) prepareInplaceRebuildHelper(reqCtx intctrluti
return nil, err
}
return &inplaceRebuildHelper{
index: index,
backup: backup,
instance: instance,
actionSet: actionSet,
synthesizedComp: synthesizedComp,
pvcMap: pvcMap,
volumes: volumes,
targetPod: targetPod,
volumeMounts: volumeMounts,
rebuildPrefix: rebuildPrefix,
envForRestore: envForRestore,
index: index,
backup: backup,
instance: instance,
actionSet: actionSet,
synthesizedComp: synthesizedComp,
sourceBackupTargetName: rebuildInstance.SourceBackupTargetName,
pvcMap: pvcMap,
volumes: volumes,
targetPod: targetPod,
volumeMounts: volumeMounts,
rebuildPrefix: rebuildPrefix,
envForRestore: rebuildInstance.RestoreEnv,
}, nil
}

Expand Down
25 changes: 14 additions & 11 deletions pkg/operations/rebuild_instance_inplace.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,14 @@ type inplaceRebuildHelper struct {
instance opsv1alpha1.Instance
actionSet *dpv1alpha1.ActionSet
// key: source pvc name, value: the tmp pvc which using to rebuild
pvcMap map[string]*corev1.PersistentVolumeClaim
synthesizedComp *component.SynthesizedComponent
volumes []corev1.Volume
volumeMounts []corev1.VolumeMount
envForRestore []corev1.EnvVar
rebuildPrefix string
index int
pvcMap map[string]*corev1.PersistentVolumeClaim
synthesizedComp *component.SynthesizedComponent
volumes []corev1.Volume
volumeMounts []corev1.VolumeMount
envForRestore []corev1.EnvVar
sourceBackupTargetName string
rebuildPrefix string
index int
}

// rebuildPodWithNoBackup rebuilds the instance with no backup.
Expand Down Expand Up @@ -219,8 +220,9 @@ func (inPlaceHelper *inplaceRebuildHelper) createPrepareDataRestore(reqCtx intct
ObjectMeta: inPlaceHelper.buildRestoreMetaObject(opsRequest, restoreName),
Spec: dpv1alpha1.RestoreSpec{
Backup: dpv1alpha1.BackupRef{
Name: inPlaceHelper.backup.Name,
Namespace: opsRequest.Namespace,
Name: inPlaceHelper.backup.Name,
Namespace: opsRequest.Namespace,
SourceTargetName: inPlaceHelper.sourceBackupTargetName,
},
Env: inPlaceHelper.envForRestore,
PrepareDataConfig: &dpv1alpha1.PrepareDataConfig{
Expand Down Expand Up @@ -262,8 +264,9 @@ func (inPlaceHelper *inplaceRebuildHelper) createPostReadyRestore(reqCtx intctrl
ObjectMeta: inPlaceHelper.buildRestoreMetaObject(opsRequest, restoreName),
Spec: dpv1alpha1.RestoreSpec{
Backup: dpv1alpha1.BackupRef{
Name: inPlaceHelper.backup.Name,
Namespace: inPlaceHelper.backup.Namespace,
Name: inPlaceHelper.backup.Name,
Namespace: inPlaceHelper.backup.Namespace,
SourceTargetName: inPlaceHelper.sourceBackupTargetName,
},
Env: inPlaceHelper.envForRestore,
ReadyConfig: &dpv1alpha1.ReadyConfig{
Expand Down
10 changes: 5 additions & 5 deletions pkg/operations/rebuild_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,12 @@ var _ = Describe("OpsUtil functions", func() {
inNS := client.InNamespace(testCtx.DefaultNamespace)
ml := client.HasLabels{testCtx.TestObjLabelKey}
// namespaced
testapps.ClearResources(&testCtx, generics.OpsRequestSignature, inNS, ml)
testapps.ClearResources(&testCtx, generics.BackupSignature, inNS, ml)
testapps.ClearResources(&testCtx, generics.RestoreSignature, inNS, ml)
testapps.ClearResources(&testCtx, generics.InstanceSetSignature, inNS, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.OpsRequestSignature, true, inNS, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.RestoreSignature, true, inNS, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.InstanceSetSignature, true, inNS, ml)
// default GracePeriod is 30s
testapps.ClearResources(&testCtx, generics.PodSignature, inNS, ml, client.GracePeriodSeconds(0))
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml, client.GracePeriodSeconds(0))
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeSignature, true, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ActionSetSignature, true, ml)
Expand Down
Loading