Skip to content

Commit

Permalink
fix(AwsNfsVolumeBackup): Short circuit reconciliation when state is R…
Browse files Browse the repository at this point in the history
…eady or Failed
  • Loading branch information
ravi-shankar-sap committed Feb 19, 2025
1 parent a95ee82 commit f98431e
Show file tree
Hide file tree
Showing 7 changed files with 482 additions and 0 deletions.
1 change: 1 addition & 0 deletions api/cloud-resources/v1beta1/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ const (
StateDeleting = "Deleting"
StateUpdating = "Updating"
StateDeleted = "Deleted"
StateFailed = "Failed"
)

const (
Expand Down
76 changes: 76 additions & 0 deletions pkg/skr/awsnfsvolumebackup/markFailed.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
package awsnfsvolumebackup

import (
"context"
"fmt"
"github.com/kyma-project/cloud-manager/api/cloud-resources/v1beta1"
"github.com/kyma-project/cloud-manager/pkg/composed"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func markFailed(ctx context.Context, st composed.State) (error, context.Context) {
state := st.(*State)

//If deletion, continue.
if composed.MarkedForDeletionPredicate(ctx, st) {
return nil, nil
}

backup := state.ObjAsAwsNfsVolumeBackup()
backupState := backup.Status.State

//If not in error state, continue
if backupState != v1beta1.StateError {
return nil, ctx
}

//If this backup doesn't belong to a schedule, continue
scheduleName, exists := backup.GetLabels()[v1beta1.LabelScheduleName]
if !exists {
return nil, ctx
}

//createdOn := backup.GetCreationTimestamp().Format(time.RFC3339)
list := &v1beta1.AwsNfsVolumeBackupList{}

//List subsequent backups for this schedule.
err := state.Cluster().K8sClient().List(
ctx,
list,
client.MatchingLabels{
v1beta1.LabelScheduleName: scheduleName,
v1beta1.LabelScheduleNamespace: backup.GetNamespace(),
},
client.InNamespace(backup.GetNamespace()),
)

if err != nil {

return composed.PatchStatus(backup).
SetExclusiveConditions(metav1.Condition{
Type: v1beta1.ConditionTypeError,
Status: metav1.ConditionTrue,
Reason: v1beta1.ReasonBackupListFailed,
Message: fmt.Sprintf("Error listing subsequent backup(s) : %s", err.Error()),
}).
SuccessError(composed.StopWithRequeue).
Run(ctx, state)
}

//If there are subsequent backups exist,
//mark this backup object state as failed.
for _, item := range list.Items {

if item.CreationTimestamp.Time.After(backup.CreationTimestamp.Time) {
backup.Status.State = v1beta1.StateFailed
return composed.PatchStatus(backup).
SuccessLogMsg("AwsNfsVolumeBackup status updated with Failed state. ").
SuccessError(composed.StopAndForget).
Run(ctx, state)
}
}

//continue if there are no subsequent backups exist
return nil, ctx
}
212 changes: 212 additions & 0 deletions pkg/skr/awsnfsvolumebackup/markFailed_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
package awsnfsvolumebackup

import (
"context"
"github.com/go-logr/logr"
"github.com/kyma-project/cloud-manager/api/cloud-resources/v1beta1"
"github.com/kyma-project/cloud-manager/pkg/composed"
"github.com/stretchr/testify/suite"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"
"testing"
"time"
)

type markFailedSuite struct {
suite.Suite
ctx context.Context
}

func (suite *markFailedSuite) SetupTest() {
suite.ctx = log.IntoContext(context.Background(), logr.Discard())
}

func (suite *markFailedSuite) TestWhenBackupIsDeleting() {

obj := deletingAwsNfsVolumeBackup.DeepCopy()
factory, err := newStateFactoryWithObj(obj)
suite.Nil(err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get state object with AwsNfsVolume
state, err := factory.newStateWith(obj)
suite.Nil(err)

err, _ctx := markFailed(ctx, state)

//validate expected return values
suite.Nil(err)
suite.Nil(_ctx)
}

func (suite *markFailedSuite) TestWhenBackupIsReady() {

obj := awsNfsVolumeBackup.DeepCopy()
obj.Status.State = v1beta1.StateReady
factory, err := newStateFactoryWithObj(obj)
suite.Nil(err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get state object with AwsNfsVolume
state, err := factory.newStateWith(obj)
suite.Nil(err)

err, _ctx := markFailed(ctx, state)

//validate expected return values
suite.Nil(err)
suite.Equal(ctx, _ctx)

fromK8s := &v1beta1.AwsNfsVolumeBackup{}
err = factory.skrCluster.K8sClient().Get(ctx,
types.NamespacedName{Name: obj.Name,
Namespace: obj.Namespace},
fromK8s)
suite.Nil(err)

suite.Equal(v1beta1.StateReady, fromK8s.Status.State)
}

func (suite *markFailedSuite) TestWhenBackupIsFailed() {

obj := awsNfsVolumeBackup.DeepCopy()
obj.Status.State = v1beta1.StateFailed
factory, err := newStateFactoryWithObj(obj)
suite.Nil(err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get state object with AwsNfsVolume
state, err := factory.newStateWith(obj)
suite.Nil(err)

err, _ctx := markFailed(ctx, state)

//validate expected return values
suite.Nil(err)
suite.Equal(ctx, _ctx)

fromK8s := &v1beta1.AwsNfsVolumeBackup{}
err = factory.skrCluster.K8sClient().Get(ctx,
types.NamespacedName{Name: obj.Name,
Namespace: obj.Namespace},
fromK8s)
suite.Nil(err)

suite.Equal(v1beta1.StateFailed, fromK8s.Status.State)
}

func (suite *markFailedSuite) TestWhenBackupIsCreating() {

obj := awsNfsVolumeBackup.DeepCopy()
obj.Status.State = v1beta1.StateCreating
factory, err := newStateFactoryWithObj(obj)
suite.Nil(err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get state object with AwsNfsVolume
state, err := factory.newStateWith(obj)
suite.Nil(err)

err, _ctx := markFailed(ctx, state)

//validate expected return values
suite.Nil(err)
suite.Equal(ctx, _ctx)

fromK8s := &v1beta1.AwsNfsVolumeBackup{}
err = factory.skrCluster.K8sClient().Get(ctx,
types.NamespacedName{Name: obj.Name,
Namespace: obj.Namespace},
fromK8s)
suite.Nil(err)

suite.Equal(v1beta1.StateCreating, fromK8s.Status.State)
}

func (suite *markFailedSuite) TestWhenBackupIsLatestAndInError() {

obj := awsNfsVolumeBackup.DeepCopy()
obj.Status.State = v1beta1.StateError
factory, err := newStateFactoryWithObj(obj)
suite.Nil(err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//Get state object with AwsNfsVolume
state, err := factory.newStateWith(obj)
suite.Nil(err)

err, _ctx := markFailed(ctx, state)

//validate expected return values
suite.Nil(err)
suite.Equal(ctx, _ctx)

fromK8s := &v1beta1.AwsNfsVolumeBackup{}
err = factory.skrCluster.K8sClient().Get(ctx,
types.NamespacedName{Name: obj.Name,
Namespace: obj.Namespace},
fromK8s)
suite.Nil(err)

suite.Equal(v1beta1.StateError, fromK8s.Status.State)
}

func (suite *markFailedSuite) TestWhenBackupIsNotLatestAndInError() {

labels := map[string]string{
v1beta1.LabelScheduleName: "test-schedule",
v1beta1.LabelScheduleNamespace: "test",
}

obj := awsNfsVolumeBackup.DeepCopy()
obj.CreationTimestamp = v1.Time{Time: time.Now().Add(-1 * time.Minute)}
obj.Labels = labels
factory, err := newStateFactoryWithObj(obj)
suite.Nil(err)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

//Get state object with AwsNfsVolume
state, err := factory.newStateWith(obj)
suite.Nil(err)

obj.Status.State = v1beta1.StateError
err = state.Cluster().K8sClient().Status().Update(ctx, obj)
suite.Nil(err)

//Create another backup object for the same schedule
obj2 := awsNfsVolumeBackup.DeepCopy()
obj2.Name = "test-backup-02"
obj2.Namespace = "test"
obj2.CreationTimestamp = v1.Time{Time: time.Now()}
obj2.Labels = labels
obj2.Status.State = v1beta1.StateReady
err = factory.skrCluster.K8sClient().Create(ctx, obj2)
suite.Nil(err)

err, _ctx := markFailed(ctx, state)

//validate expected return values
suite.Equal(composed.StopAndForget, err)
suite.Equal(ctx, _ctx)

fromK8s := &v1beta1.AwsNfsVolumeBackup{}
err = factory.skrCluster.K8sClient().Get(ctx,
types.NamespacedName{Name: obj.Name,
Namespace: obj.Namespace},
fromK8s)
suite.Nil(err)

suite.Equal(v1beta1.StateFailed, fromK8s.Status.State)
}

func TestMarkFailed(t *testing.T) {
suite.Run(t, new(markFailedSuite))
}
2 changes: 2 additions & 0 deletions pkg/skr/awsnfsvolumebackup/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ func (r *reconciler) newAction() composed.Action {
"AwsNfsVolumeBackupMain",
feature.LoadFeatureContextFromObj(&cloudresourcesv1beta1.AwsNfsVolumeBackup{}),
commonScope.New(),
shortCircuitCompleted,
markFailed,
addFinalizer,
loadSkrAwsNfsVolume,
stopIfVolumeNotReady,
Expand Down
25 changes: 25 additions & 0 deletions pkg/skr/awsnfsvolumebackup/shortCircuit.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package awsnfsvolumebackup

import (
"context"
"github.com/kyma-project/cloud-manager/api/cloud-resources/v1beta1"
"github.com/kyma-project/cloud-manager/pkg/composed"
)

func shortCircuitCompleted(ctx context.Context, st composed.State) (error, context.Context) {
state := st.(*State)

//If deletion, continue.
if composed.MarkedForDeletionPredicate(ctx, st) {
return nil, nil
}

backup := state.ObjAsAwsNfsVolumeBackup()
backupState := backup.Status.State
if backupState == v1beta1.StateReady || backupState == v1beta1.StateFailed {
composed.LoggerFromCtx(ctx).Info("NfsVolumeBackup is complete , short-circuiting into StopAndForget")
return composed.StopAndForget, nil
}

return nil, ctx
}
Loading

0 comments on commit f98431e

Please sign in to comment.