From f4144de9bc0437051e8c0a7168c14f4c098afa57 Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Mon, 8 Jan 2024 15:07:11 +0530 Subject: [PATCH] restore-crd: use dynamic api to get k8s resource currently, we are directly using `kubectl` command to get crds, crd name and also for other ops. Since, we now have code to get k8s resource using dynamic api, let's use that instead of `kubectl` command. One thing to notice, now it more restrict when passing the ceph crd types. For example, earlier `cephcluster` used to work now we need to be specific `cephclusters`. Signed-off-by: subhamkrai --- .github/workflows/go-test.yaml | 10 ++-- cmd/commands/root.go | 3 +- docs/crd.md | 91 ++++++---------------------------- pkg/crds/crds.go | 25 +++++----- pkg/k8sutil/dynamic.go | 25 ++++++++++ pkg/k8sutil/interface.go | 2 + pkg/k8sutil/mocks.go | 15 ++++++ pkg/restore/crd.go | 80 ++++++++++++++++-------------- 8 files changed, 121 insertions(+), 130 deletions(-) diff --git a/.github/workflows/go-test.yaml b/.github/workflows/go-test.yaml index 3b69a9de..928e5084 100644 --- a/.github/workflows/go-test.yaml +++ b/.github/workflows/go-test.yaml @@ -130,7 +130,7 @@ jobs: # First let's delete the cephCluster kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false - kubectl rook-ceph -n rook-ceph restore-deleted cephcluster + kubectl rook-ceph -n rook-ceph restore-deleted cephclusters tests/github-action-helper.sh wait_for_crd_to_be_ready_default - name: Restore CRD with CRName @@ -138,7 +138,7 @@ jobs: # First let's delete the cephCluster kubectl -n rook-ceph delete cephcluster my-cluster --timeout 3s --wait=false - kubectl rook-ceph -n rook-ceph restore-deleted cephcluster my-cluster + kubectl rook-ceph -n rook-ceph restore-deleted cephclusters my-cluster tests/github-action-helper.sh wait_for_crd_to_be_ready_default - name: Show Cluster State @@ -153,7 +153,7 @@ jobs: set -ex kubectl rook-ceph destroy-cluster sleep 1 - kubectl get deployments -n rook-ceph --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; exit 1;}) + kubectl get deployments -n rook-ceph --no-headers| wc -l | (read n && [ $n -le 1 ] || { echo "the crs could not be deleted"; exit 1;}) - name: collect common logs if: always() @@ -286,7 +286,7 @@ jobs: # First let's delete the cephCluster kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false - kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephcluster + kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters tests/github-action-helper.sh wait_for_crd_to_be_ready_custom - name: Restore CRD with CRName @@ -294,7 +294,7 @@ jobs: # First let's delete the cephCluster kubectl -n test-cluster delete cephcluster my-cluster --timeout 3s --wait=false - kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephcluster my-cluster + kubectl rook-ceph --operator-namespace test-operator -n test-cluster restore-deleted cephclusters my-cluster tests/github-action-helper.sh wait_for_crd_to_be_ready_custom - name: Show Cluster State diff --git a/cmd/commands/root.go b/cmd/commands/root.go index 580fb457..15e963a3 100644 --- a/cmd/commands/root.go +++ b/cmd/commands/root.go @@ -21,8 +21,6 @@ import ( "regexp" "strings" - "k8s.io/client-go/dynamic" - "github.com/rook/kubectl-rook-ceph/pkg/exec" "github.com/rook/kubectl-rook-ceph/pkg/k8sutil" "github.com/rook/kubectl-rook-ceph/pkg/logging" @@ -30,6 +28,7 @@ import ( "github.com/spf13/cobra" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" k8s "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/tools/clientcmd" diff --git a/docs/crd.md b/docs/crd.md index fc9c365a..c52c1a9d 100644 --- a/docs/crd.md +++ b/docs/crd.md @@ -11,102 +11,43 @@ While the underlying Ceph data and daemons continue to be available, the CRs wil The `restore-deleted` command has one required and one optional parameter: -- ``: The CRD type that is to be restored, such as CephCluster, CephFilesystem, CephBlockPool and so on. -- `[CRName]`: The name of the specific CR which you want to restore since there can be multiple instances under the same CRD. For example, if there are multiple CephFilesystems stuck in deleting state, a specific filesystem can be restored: `restore-deleted cephfilesystem filesystem-2`. +- ``: The CRD type that is to be restored, such as CephClusters, CephFilesystems, CephBlockPools and so on. +- `[CRName]`: The name of the specific CR which you want to restore since there can be multiple instances under the same CRD. For example, if there are multiple CephFilesystems stuck in deleting state, a specific filesystem can be restored: `restore-deleted cephfilesystems filesystem-2`. ```bash kubectl rook-ceph restore-deleted [CRName] ``` -## CephCluster Restore Example +## CephClusters Restore Example ```bash -kubectl rook-ceph restore-deleted cephcluster +kubectl rook-ceph restore-deleted cephclusters + +Info: Detecting which resources to restore for crd "cephclusters" -Info: Detecting which resources to restore for crd "cephcluster" Info: Restoring CR my-cluster Warning: The resource my-cluster was found deleted. Do you want to restore it? yes | no Info: skipped prompt since ROOK_PLUGIN_SKIP_PROMPTS=true -Info: Scaling down the operator to 0 -Info: Backing up kubernetes and crd resources -Info: Backed up crd cephcluster/my-cluster in file cephcluster-my-cluster.yaml +Info: Proceeding with restoring deleting CR +Info: Scaling down the operator Info: Deleting validating webhook rook-ceph-webhook if present -Info: Fetching the UID for cephcluster/my-cluster -Info: Successfully fetched uid 8366f79a-ae1f-4679-a62b-8abc6e1528fa from cephcluster/my-cluster -Info: Removing ownerreferences from resources with matching uid 8366f79a-ae1f-4679-a62b-8abc6e1528fa +Info: Removing ownerreferences from resources with matching uid 92c0e549-44fd-43db-80ba-5473db996208 Info: Removing owner references for secret cluster-peer-token-my-cluster Info: Removed ownerReference for Secret: cluster-peer-token-my-cluster Info: Removing owner references for secret rook-ceph-admin-keyring Info: Removed ownerReference for Secret: rook-ceph-admin-keyring -Info: Removing owner references for secret rook-ceph-config -Info: Removed ownerReference for Secret: rook-ceph-config - -Info: Removing owner references for secret rook-ceph-crash-collector-keyring -Info: Removed ownerReference for Secret: rook-ceph-crash-collector-keyring - -Info: Removing owner references for secret rook-ceph-mgr-a-keyring -Info: Removed ownerReference for Secret: rook-ceph-mgr-a-keyring - -Info: Removing owner references for secret rook-ceph-mons-keyring -Info: Removed ownerReference for Secret: rook-ceph-mons-keyring - -Info: Removing owner references for secret rook-csi-cephfs-node -Info: Removed ownerReference for Secret: rook-csi-cephfs-node - -Info: Removing owner references for secret rook-csi-cephfs-provisioner -Info: Removed ownerReference for Secret: rook-csi-cephfs-provisioner - -Info: Removing owner references for secret rook-csi-rbd-node -Info: Removed ownerReference for Secret: rook-csi-rbd-node - -Info: Removing owner references for secret rook-csi-rbd-provisioner -Info: Removed ownerReference for Secret: rook-csi-rbd-provisioner - -Info: Removing owner references for configmaps rook-ceph-mon-endpoints -Info: Removed ownerReference for configmap: rook-ceph-mon-endpoints - -Info: Removing owner references for service rook-ceph-exporter -Info: Removed ownerReference for service: rook-ceph-exporter - -Info: Removing owner references for service rook-ceph-mgr -Info: Removed ownerReference for service: rook-ceph-mgr +--- +--- +--- -Info: Removing owner references for service rook-ceph-mgr-dashboard -Info: Removed ownerReference for service: rook-ceph-mgr-dashboard - -Info: Removing owner references for service rook-ceph-mon-a -Info: Removed ownerReference for service: rook-ceph-mon-a - -Info: Removing owner references for service rook-ceph-mon-d -Info: Removed ownerReference for service: rook-ceph-mon-d - -Info: Removing owner references for service rook-ceph-mon-e -Info: Removed ownerReference for service: rook-ceph-mon-e - -Info: Removing owner references for deployemt rook-ceph-mgr-a -Info: Removed ownerReference for deployment: rook-ceph-mgr-a - -Info: Removing owner references for deployemt rook-ceph-mon-a -Info: Removed ownerReference for deployment: rook-ceph-mon-a - -Info: Removing owner references for deployemt rook-ceph-mon-d -Info: Removed ownerReference for deployment: rook-ceph-mon-d - -Info: Removing owner references for deployemt rook-ceph-mon-e -Info: Removed ownerReference for deployment: rook-ceph-mon-e - -Info: Removing owner references for deployemt rook-ceph-osd-0 +Info: Removing owner references for deployment rook-ceph-osd-0 Info: Removed ownerReference for deployment: rook-ceph-osd-0 -Info: Removing finalizers from cephcluster/my-cluster -Info: cephcluster.ceph.rook.io/my-cluster patched - -Info: Re-creating the CR cephcluster from file cephcluster-my-cluster.yaml created above -Info: cephcluster.ceph.rook.io/my-cluster created - -Info: Scaling up the operator to 1 +Info: Removing finalizers from cephclusters/my-cluster +Info: Re-creating the CR cephclusters from dynamic resource +Info: Scaling up the operator Info: CR is successfully restored. Please watch the operator logs and check the crd ``` diff --git a/pkg/crds/crds.go b/pkg/crds/crds.go index de342cc2..0440ffad 100644 --- a/pkg/crds/crds.go +++ b/pkg/crds/crds.go @@ -20,6 +20,8 @@ import ( "context" "encoding/json" "fmt" + "time" + "github.com/rook/kubectl-rook-ceph/pkg/k8sutil" "github.com/rook/kubectl-rook-ceph/pkg/logging" corev1 "k8s.io/api/core/v1" @@ -28,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" - "time" ) var cephResources = []string{ @@ -52,8 +53,8 @@ var cephResources = []string{ } const ( - cephRookIoGroup = "ceph.rook.io" - cephRookResourcesVersion = "v1" + CephRookIoGroup = "ceph.rook.io" + CephRookResourcesVersion = "v1" ) const ( @@ -73,7 +74,7 @@ var ( }, } - defaultResourceRemoveFinalizers = map[string]interface{}{ + DefaultResourceRemoveFinalizers = map[string]interface{}{ "metadata": map[string]interface{}{ "finalizers": nil, }, @@ -93,7 +94,7 @@ func DeleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInt func deleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInterface, clusterNamespace string) error { for _, resource := range cephResources { logging.Info("getting resource kind %s", resource) - items, err := clientsets.ListResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, clusterNamespace) + items, err := clientsets.ListResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, clusterNamespace) if err != nil { if k8sErrors.IsNotFound(err) { logging.Info("the server could not find the requested resource: %s", resource) @@ -109,7 +110,7 @@ func deleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInt for _, item := range items { logging.Info(fmt.Sprintf("removing resource %s: %s", resource, item.GetName())) - err = clientsets.DeleteResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, clusterNamespace, item.GetName()) + err = clientsets.DeleteResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, clusterNamespace, item.GetName()) if err != nil { if k8sErrors.IsNotFound(err) { logging.Info(err.Error()) @@ -118,7 +119,7 @@ func deleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInt return err } - itemResource, err := clientsets.GetResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, item.GetName(), clusterNamespace) + itemResource, err := clientsets.GetResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, item.GetName(), clusterNamespace) if err != nil { if !k8sErrors.IsNotFound(err) { return err @@ -136,7 +137,7 @@ func deleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInt return err } - err = clientsets.DeleteResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, clusterNamespace, item.GetName()) + err = clientsets.DeleteResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, clusterNamespace, item.GetName()) if err != nil { if !k8sErrors.IsNotFound(err) { return err @@ -144,7 +145,7 @@ func deleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInt } } - itemResource, err = clientsets.GetResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, item.GetName(), clusterNamespace) + itemResource, err = clientsets.GetResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, item.GetName(), clusterNamespace) if err != nil { if !k8sErrors.IsNotFound(err) { return err @@ -160,14 +161,14 @@ func deleteCustomResources(ctx context.Context, clientsets k8sutil.ClientsetsInt func updatingFinalizers(ctx context.Context, clientsets k8sutil.ClientsetsInterface, itemResource *unstructured.Unstructured, resource, clusterNamespace string) error { if resource == CephResourceCephClusters { jsonPatchData, _ := json.Marshal(clusterResourcePatchFinalizer) - err := clientsets.PatchResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, clusterNamespace, itemResource.GetName(), types.MergePatchType, jsonPatchData) + err := clientsets.PatchResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, clusterNamespace, itemResource.GetName(), types.MergePatchType, jsonPatchData) if err != nil { return err } } - jsonPatchData, _ := json.Marshal(defaultResourceRemoveFinalizers) - err := clientsets.PatchResourcesDynamically(ctx, cephRookIoGroup, cephRookResourcesVersion, resource, clusterNamespace, itemResource.GetName(), types.MergePatchType, jsonPatchData) + jsonPatchData, _ := json.Marshal(DefaultResourceRemoveFinalizers) + err := clientsets.PatchResourcesDynamically(ctx, CephRookIoGroup, CephRookResourcesVersion, resource, clusterNamespace, itemResource.GetName(), types.MergePatchType, jsonPatchData) if err != nil { return err } diff --git a/pkg/k8sutil/dynamic.go b/pkg/k8sutil/dynamic.go index 3e4e09ce..3acd7e07 100644 --- a/pkg/k8sutil/dynamic.go +++ b/pkg/k8sutil/dynamic.go @@ -18,6 +18,7 @@ package k8sutil import ( "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -119,3 +120,27 @@ func (c *Clientsets) GetResourcesDynamically( return item, nil } + +func (c *Clientsets) CreateResourcesDynamically( + ctx context.Context, + group string, + version string, + resource string, + name *unstructured.Unstructured, + namespace string, +) (*unstructured.Unstructured, error) { + resourceId := schema.GroupVersionResource{ + Group: group, + Version: version, + Resource: resource, + } + + item, err := c.Dynamic.Resource(resourceId).Namespace(namespace). + Create(ctx, name, metav1.CreateOptions{}) + + if err != nil { + return nil, err + } + + return item, nil +} diff --git a/pkg/k8sutil/interface.go b/pkg/k8sutil/interface.go index 47596f19..9e7ecccc 100644 --- a/pkg/k8sutil/interface.go +++ b/pkg/k8sutil/interface.go @@ -18,12 +18,14 @@ package k8sutil import ( "context" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" ) //go:generate mockgen -package=k8sutil --build_flags=--mod=mod -destination=mocks.go github.com/rook/kubectl-rook-ceph/pkg/k8sutil ClientsetsInterface type ClientsetsInterface interface { + CreateResourcesDynamically(ctx context.Context, group string, version string, resource string, name *unstructured.Unstructured, namespace string) (*unstructured.Unstructured, error) ListResourcesDynamically(ctx context.Context, group string, version string, resource string, namespace string) ([]unstructured.Unstructured, error) GetResourcesDynamically(ctx context.Context, group string, version string, resource string, name string, namespace string) (*unstructured.Unstructured, error) DeleteResourcesDynamically(ctx context.Context, group string, version string, resource string, namespace string, resourceName string) error diff --git a/pkg/k8sutil/mocks.go b/pkg/k8sutil/mocks.go index b6b55964..8fa25132 100644 --- a/pkg/k8sutil/mocks.go +++ b/pkg/k8sutil/mocks.go @@ -36,6 +36,21 @@ func (m *MockClientsetsInterface) EXPECT() *MockClientsetsInterfaceMockRecorder return m.recorder } +// CreateResourcesDynamically mocks base method. +func (m *MockClientsetsInterface) CreateResourcesDynamically(arg0 context.Context, arg1, arg2, arg3 string, arg4 *unstructured.Unstructured, arg5 string) (*unstructured.Unstructured, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateResourcesDynamically", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(*unstructured.Unstructured) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateResourcesDynamically indicates an expected call of CreateResourcesDynamically. +func (mr *MockClientsetsInterfaceMockRecorder) CreateResourcesDynamically(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateResourcesDynamically", reflect.TypeOf((*MockClientsetsInterface)(nil).CreateResourcesDynamically), arg0, arg1, arg2, arg3, arg4, arg5) +} + // DeleteResourcesDynamically mocks base method. func (m *MockClientsetsInterface) DeleteResourcesDynamically(arg0 context.Context, arg1, arg2, arg3, arg4, arg5 string) error { m.ctrl.T.Helper() diff --git a/pkg/restore/crd.go b/pkg/restore/crd.go index 9b8c5d8f..efefd5fe 100644 --- a/pkg/restore/crd.go +++ b/pkg/restore/crd.go @@ -18,40 +18,57 @@ package restore import ( "context" + "encoding/json" "fmt" - "os" - "strings" "github.com/pkg/errors" - "github.com/rook/kubectl-rook-ceph/pkg/exec" + "github.com/rook/kubectl-rook-ceph/pkg/crds" "github.com/rook/kubectl-rook-ceph/pkg/k8sutil" "github.com/rook/kubectl-rook-ceph/pkg/logging" "github.com/rook/kubectl-rook-ceph/pkg/mons" apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" ) func RestoreCrd(ctx context.Context, k8sclientset *k8sutil.Clientsets, operatorNamespace, clusterNamespace string, args []string) { crd := args[0] + + var crName string + var crdResource unstructured.Unstructured + if len(args) == 2 { + crName = args[1] + } + logging.Info("Detecting which resources to restore for crd %q", crd) - getCrName := `kubectl -n %s get %s -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.metadata.deletionGracePeriodSeconds}{"\n"}{end}' | awk '$2=="0" {print $1}' | head -n 1` - command := fmt.Sprintf(getCrName, clusterNamespace, crd) - crName := strings.TrimSpace(exec.ExecuteBashCommand(command)) + crdList, err := k8sclientset.ListResourcesDynamically(ctx, crds.CephRookIoGroup, crds.CephRookResourcesVersion, crd, clusterNamespace) + if err != nil { + logging.Fatal(fmt.Errorf("Failed to list resources for crd %v", err)) + } + if len(crdList) == 0 { + logging.Info("No Ceph CRDs found to restore") + return + } + + for _, cr := range crdList { + if cr.GetDeletionTimestamp() != nil && (crName == "" || crName == cr.GetName()) { + crName = cr.GetName() + crdResource = *cr.DeepCopy() + break + } + } if crName == "" { logging.Info("Nothing to do here, no %q resources in deleted state", crd) return } - if len(args) == 2 { - crName = args[1] - } - logging.Info("Restoring CR %s", crName) var answer string logging.Warning("The resource %s was found deleted. Do you want to restore it? yes | no\n", crName) fmt.Scanf("%s", &answer) - err := mons.PromptToContinueOrCancel("restore-deleted", "yes", answer) + err = mons.PromptToContinueOrCancel("restore-deleted", "yes", answer) if err != nil { logging.Fatal(fmt.Errorf("Restoring the resource %s cancelled", crName)) } @@ -69,13 +86,6 @@ func RestoreCrd(ctx context.Context, k8sclientset *k8sutil.Clientsets, operatorN } } - logging.Info("Backing up kubernetes and crd resources") - crFileName := crd + "-" + crName + ".yaml" - getCrYamlContent := `kubectl -n %s get %s %s -oyaml > %s` - command = fmt.Sprintf(getCrYamlContent, clusterNamespace, crd, crName, crFileName) - exec.ExecuteBashCommand(command) - logging.Info("Backed up crd %s/%s in file %s", crd, crName, crFileName) - webhookConfigName := "rook-ceph-webhook" logging.Info("Deleting validating webhook %s if present", webhookConfigName) err = k8sclientset.Kube.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, webhookConfigName, v1.DeleteOptions{}) @@ -83,23 +93,25 @@ func RestoreCrd(ctx context.Context, k8sclientset *k8sutil.Clientsets, operatorN logging.Fatal(fmt.Errorf("failed to delete validating webhook %s. %v", webhookConfigName, err)) } - logging.Info("Fetching the UID for %s/%s", crd, crName) - getCrUID := `kubectl -n %s get %s %s -o 'jsonpath={.metadata.uid}'` - command = fmt.Sprintf(getCrUID, clusterNamespace, crd, crName) - uid := exec.ExecuteBashCommand(command) - logging.Info("Successfully fetched uid %s from %s/%s", uid, crd, crName) - - removeOwnerRefOfUID(ctx, k8sclientset, operatorNamespace, clusterNamespace, uid) + removeOwnerRefOfUID(ctx, k8sclientset, operatorNamespace, clusterNamespace, string(crdResource.GetUID())) logging.Info("Removing finalizers from %s/%s", crd, crName) - removeFinalizers := `kubectl -n %s patch %s/%s --type json --patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'` - command = fmt.Sprintf(removeFinalizers, clusterNamespace, crd, crName) - logging.Info(exec.ExecuteBashCommand(command)) - logging.Info("Re-creating the CR %s from file %s created above", crd, crFileName) - recreateCR := `kubectl create -f %s` - command = fmt.Sprintf(recreateCR, crFileName) - logging.Info(exec.ExecuteBashCommand(command)) + jsonPatchData, _ := json.Marshal(crds.DefaultResourceRemoveFinalizers) + err = k8sclientset.PatchResourcesDynamically(ctx, crds.CephRookIoGroup, crds.CephRookResourcesVersion, crd, clusterNamespace, crName, types.MergePatchType, jsonPatchData) + if err != nil { + logging.Fatal(fmt.Errorf("Failed to update resource %q for crd. %v", crName, err)) + } + + crdResource.SetResourceVersion("") + crdResource.SetUID("") + crdResource.SetSelfLink("") + crdResource.SetCreationTimestamp(v1.Time{}) + logging.Info("Re-creating the CR %s from dynamic resource", crd) + _, err = k8sclientset.CreateResourcesDynamically(ctx, crds.CephRookIoGroup, crds.CephRookResourcesVersion, crd, &crdResource, clusterNamespace) + if err != nil { + logging.Fatal((fmt.Errorf("Failed to create updated resource %q for crd. %v", crName, err))) + } logging.Info("Scaling up the operator") err = k8sutil.SetDeploymentScale(ctx, k8sclientset.Kube, operatorNamespace, "rook-ceph-operator", 1) @@ -107,10 +119,6 @@ func RestoreCrd(ctx context.Context, k8sclientset *k8sutil.Clientsets, operatorN logging.Fatal(errors.Wrapf(err, "Operator pod still being scaled up")) } - if err = os.Remove(crFileName); err != nil { - logging.Warning("Unable to remove. Please remove the file %s manually.%v", crFileName, err) - } - logging.Info("CR is successfully restored. Please watch the operator logs and check the crd") }