diff --git a/util/csaupgrade/upgrade.go b/util/csaupgrade/upgrade.go index a35d759690..9caee7b153 100644 --- a/util/csaupgrade/upgrade.go +++ b/util/csaupgrade/upgrade.go @@ -18,11 +18,15 @@ package csaupgrade import ( "bytes" + "encoding/json" + "errors" "fmt" + "reflect" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -46,21 +50,107 @@ import ( // have changed before sending a patch. // // obj - Target of the operation which has been managed with CSA in the past -// csaManagerName - Name of FieldManager formerly used for `Update` operations -// ssaManagerName - Name of FieldManager formerly used for `Apply` operations +// csaManagerNames - Names of FieldManagers to merge into ssaManagerName +// ssaManagerName - Name of FieldManager to be used for `Apply` operations func UpgradeManagedFields( obj runtime.Object, - csaManagerName string, + csaManagerNames sets.Set[string], ssaManagerName string, ) error { accessor, err := meta.Accessor(obj) if err != nil { - return fmt.Errorf("error accessing object metadata: %w", err) + return err + } + + filteredManagers := accessor.GetManagedFields() + + for csaManagerName := range csaManagerNames { + filteredManagers, err = upgradedManagedFields( + filteredManagers, csaManagerName, ssaManagerName) + + if err != nil { + return err + } + } + + // Commit changes to object + accessor.SetManagedFields(filteredManagers) + return nil +} + +// Calculates a minimal JSON Patch to send to upgrade managed fields +// See `UpgradeManagedFields` for more information. +// +// obj - Target of the operation which has been managed with CSA in the past +// csaManagerNames - Names of FieldManagers to merge into ssaManagerName +// ssaManagerName - Name of FieldManager to be used for `Apply` operations +// +// Returns non-nil error if there was an error, a JSON patch, or nil bytes if +// there is no work to be done. +func UpgradeManagedFieldsPatch( + obj runtime.Object, + csaManagerNames sets.Set[string], + ssaManagerName string) ([]byte, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + + managedFields := accessor.GetManagedFields() + filteredManagers := accessor.GetManagedFields() + for csaManagerName := range csaManagerNames { + filteredManagers, err = upgradedManagedFields( + filteredManagers, csaManagerName, ssaManagerName) + if err != nil { + return nil, err + } + } + + if reflect.DeepEqual(managedFields, filteredManagers) { + // If the managed fields have not changed from the transformed version, + // there is no patch to perform + return nil, nil + } + + // Create a patch with a diff between old and new objects. + // Just include all managed fields since that is only thing that will change + // + // Also include test for RV to avoid race condition + jsonPatch := []map[string]interface{}{ + { + "op": "replace", + "path": "/metadata/managedFields", + "value": filteredManagers, + }, + { + // Use "replace" instead of "test" operation so that etcd rejects with + // 409 conflict instead of apiserver with an invalid request + "op": "replace", + "path": "/metadata/resourceVersion", + "value": accessor.GetResourceVersion(), + }, + } + + return json.Marshal(jsonPatch) +} + +// Returns a copy of the provided managed fields that has been migrated from +// client-side-apply to server-side-apply, or an error if there was an issue +func upgradedManagedFields( + managedFields []metav1.ManagedFieldsEntry, + csaManagerName string, + ssaManagerName string, +) ([]metav1.ManagedFieldsEntry, error) { + if managedFields == nil { + return nil, nil } // Create managed fields clone since we modify the values - var managedFields []metav1.ManagedFieldsEntry - managedFields = append(managedFields, accessor.GetManagedFields()...) + managedFieldsCopy := make([]metav1.ManagedFieldsEntry, len(managedFields)) + if copy(managedFieldsCopy, managedFields) != len(managedFields) { + return nil, errors.New("failed to copy managed fields") + } + managedFields = managedFieldsCopy // Locate SSA manager replaceIndex, managerExists := findFirstIndex(managedFields, @@ -86,16 +176,16 @@ func UpgradeManagedFields( if !managerExists { // There are no CSA managers that need to be converted. Nothing to do // Return early - return nil + return managedFields, nil } // Convert CSA manager into SSA manager managedFields[replaceIndex].Operation = metav1.ManagedFieldsOperationApply managedFields[replaceIndex].Manager = ssaManagerName } - err = unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName) + err := unionManagerIntoIndex(managedFields, replaceIndex, csaManagerName) if err != nil { - return err + return nil, err } // Create version of managed fields which has no CSA managers with the given name @@ -105,17 +195,17 @@ func UpgradeManagedFields( entry.Subresource == "") }) - - // Commit changes to object - accessor.SetManagedFields(filteredManagers) - - return nil + return filteredManagers, nil } // Locates an Update manager entry named `csaManagerName` with the same APIVersion // as the manager at the targetIndex. Unions both manager's fields together // into the manager specified by `targetIndex`. No other managers are modified. -func unionManagerIntoIndex(entries []metav1.ManagedFieldsEntry, targetIndex int, csaManagerName string) error { +func unionManagerIntoIndex( + entries []metav1.ManagedFieldsEntry, + targetIndex int, + csaManagerName string, +) error { ssaManager := entries[targetIndex] // find Update manager of same APIVersion, union ssa fields with it. @@ -124,6 +214,8 @@ func unionManagerIntoIndex(entries []metav1.ManagedFieldsEntry, targetIndex int, func(entry metav1.ManagedFieldsEntry) bool { return entry.Manager == csaManagerName && entry.Operation == metav1.ManagedFieldsOperationUpdate && + //!TODO: some users may want to migrate subresources. + // should thread through the args at some point. entry.Subresource == "" && entry.APIVersion == ssaManager.APIVersion }) diff --git a/util/csaupgrade/upgrade_test.go b/util/csaupgrade/upgrade_test.go index e333fed04a..04f64ca640 100644 --- a/util/csaupgrade/upgrade_test.go +++ b/util/csaupgrade/upgrade_test.go @@ -17,11 +17,16 @@ limitations under the License. package csaupgrade_test import ( + "encoding/json" "reflect" "testing" + jsonpatch "github.com/evanphx/json-patch" "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/util/csaupgrade" ) @@ -30,7 +35,7 @@ func TestUpgradeCSA(t *testing.T) { cases := []struct { Name string - CSAManager string + CSAManagers []string SSAManager string OriginalObject []byte ExpectedObject []byte @@ -39,14 +44,15 @@ func TestUpgradeCSA(t *testing.T) { // Case where there is a CSA entry with the given name, but no SSA entry // is found. Expect that the CSA entry is converted to an SSA entry // and renamed. - Name: "csa-basic-direct-conversion", - CSAManager: "kubectl-client-side-apply", - SSAManager: "kubectl", + Name: "csa-basic-direct-conversion", + CSAManagers: []string{"kubectl-client-side-apply"}, + SSAManager: "kubectl", OriginalObject: []byte(` apiVersion: v1 data: {} kind: ConfigMap metadata: + resourceVersion: "1" annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} @@ -74,7 +80,10 @@ apiVersion: v1 data: {} kind: ConfigMap metadata: - annotations: {} + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} creationTimestamp: "2022-08-22T23:08:23Z" managedFields: - apiVersion: v1 @@ -85,12 +94,14 @@ metadata: f:key: {} f:legacy: {} f:metadata: - f:annotations: {} + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} manager: kubectl operation: Apply time: "2022-08-22T23:08:23Z" name: test - namespace: default + namespace: default `), }, { @@ -98,14 +109,15 @@ metadata: // Server creates duplicate managed fields entry - one for Update and another // for Apply. Expect entries to be merged into one entry, which is unchanged // from initial SSA. - Name: "csa-combine-with-ssa-duplicate-keys", - CSAManager: "kubectl-client-side-apply", - SSAManager: "kubectl", + Name: "csa-combine-with-ssa-duplicate-keys", + CSAManagers: []string{"kubectl-client-side-apply"}, + SSAManager: "kubectl", OriginalObject: []byte(` apiVersion: v1 data: {} kind: ConfigMap metadata: + resourceVersion: "1" annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} @@ -147,7 +159,10 @@ apiVersion: v1 data: {} kind: ConfigMap metadata: - annotations: {} + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} creationTimestamp: "2022-08-22T23:08:23Z" managedFields: - apiVersion: v1 @@ -158,12 +173,14 @@ metadata: f:key: {} f:legacy: {} f:metadata: - f:annotations: {} + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} manager: kubectl operation: Apply time: "2022-08-23T23:08:23Z" name: test - namespace: default + namespace: default `), }, { @@ -173,14 +190,15 @@ metadata: // This shows that upgrading such an object results in correct behavior next // time SSA applier // Expect final object to have unioned keys from both entries - Name: "csa-combine-with-ssa-additional-keys", - CSAManager: "kubectl-client-side-apply", - SSAManager: "kubectl", + Name: "csa-combine-with-ssa-additional-keys", + CSAManagers: []string{"kubectl-client-side-apply"}, + SSAManager: "kubectl", OriginalObject: []byte(` apiVersion: v1 data: {} kind: ConfigMap metadata: + resourceVersion: "1" annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} @@ -221,7 +239,10 @@ apiVersion: v1 data: {} kind: ConfigMap metadata: - annotations: {} + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} creationTimestamp: "2022-08-22T23:08:23Z" managedFields: - apiVersion: v1 @@ -232,26 +253,29 @@ metadata: f:key: {} f:legacy: {} f:metadata: - f:annotations: {} + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} manager: kubectl operation: Apply time: "2022-08-23T23:08:23Z" name: test - namespace: default + namespace: default `), }, { // Case when there are multiple CSA versions on the object which do not // match the version from the apply entry. Shows they are tossed away // without being merged. - Name: "csa-no-applicable-version", - CSAManager: "kubectl-client-side-apply", - SSAManager: "kubectl", + Name: "csa-no-applicable-version", + CSAManagers: []string{"kubectl-client-side-apply"}, + SSAManager: "kubectl", OriginalObject: []byte(` apiVersion: v1 data: {} kind: ConfigMap metadata: + resourceVersion: "1" annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} @@ -323,7 +347,10 @@ apiVersion: v1 data: {} kind: ConfigMap metadata: - annotations: {} + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} creationTimestamp: "2022-08-22T23:08:23Z" managedFields: - apiVersion: v5 @@ -334,26 +361,29 @@ metadata: f:key: {} f:legacy: {} f:metadata: - f:annotations: {} + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} manager: kubectl operation: Apply time: "2022-08-23T23:08:23Z" name: test - namespace: default + namespace: default `), }, { // Case when there are multiple CSA versions on the object which do not // match the version from the apply entry, and one which does. // Shows that CSA entry with matching version is unioned into the SSA entry. - Name: "csa-single-applicable-version", - CSAManager: "kubectl-client-side-apply", - SSAManager: "kubectl", + Name: "csa-single-applicable-version", + CSAManagers: []string{"kubectl-client-side-apply"}, + SSAManager: "kubectl", OriginalObject: []byte(` apiVersion: v1 data: {} kind: ConfigMap metadata: + resourceVersion: "1" annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} @@ -425,7 +455,10 @@ apiVersion: v1 data: {} kind: ConfigMap metadata: - annotations: {} + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} creationTimestamp: "2022-08-22T23:08:23Z" managedFields: - apiVersion: v5 @@ -440,11 +473,355 @@ metadata: f:annotations: .: {} f:hello2: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} manager: kubectl operation: Apply time: "2022-08-23T23:08:23Z" name: test - namespace: default + namespace: default +`), + }, + { + // Do nothing to object with nothing to migrate and no existing SSA manager + Name: "noop", + CSAManagers: []string{"kubectl-client-side-apply"}, + SSAManager: "not-already-in-object", + OriginalObject: []byte(` +apiVersion: v1 +data: {} +kind: ConfigMap +metadata: + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} + creationTimestamp: "2022-08-22T23:08:23Z" + managedFields: + - apiVersion: v5 + fieldsType: FieldsV1 + fieldsV1: + f:data: + .: {} + f:key: {} + f:legacy: {} + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + manager: kubectl + operation: Apply + time: "2022-08-23T23:08:23Z" + name: test + namespace: default +`), + ExpectedObject: []byte(` +apiVersion: v1 +data: {} +kind: ConfigMap +metadata: + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} + creationTimestamp: "2022-08-22T23:08:23Z" + managedFields: + - apiVersion: v5 + fieldsType: FieldsV1 + fieldsV1: + f:data: + .: {} + f:key: {} + f:legacy: {} + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + manager: kubectl + operation: Apply + time: "2022-08-23T23:08:23Z" + name: test + namespace: default +`), + }, + { + // Expect multiple targets to be merged into existing ssa manager + Name: "multipleTargetsExisting", + CSAManagers: []string{"kube-scheduler", "kubectl-client-side-apply"}, + SSAManager: "kubectl", + OriginalObject: []byte(` +apiVersion: v1 +data: {} +kind: ConfigMap +metadata: + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} + creationTimestamp: "2022-08-22T23:08:23Z" + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:name: {} + manager: kubectl + operation: Apply + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"PodScheduled"}: + .: {} + f:lastProbeTime: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + manager: kube-scheduler + operation: Update + time: "2022-11-03T23:22:40Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + f:labels: + .: {} + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:imagePullPolicy: {} + f:name: {} + f:resources: {} + f:terminationMessagePath: {} + f:terminationMessagePolicy: {} + f:dnsPolicy: {} + f:enableServiceLinks: {} + f:restartPolicy: {} + f:schedulerName: {} + f:securityContext: {} + f:terminationGracePeriodSeconds: {} + manager: kubectl-client-side-apply + operation: Update + time: "2022-11-03T23:22:40Z" + name: test + namespace: default +`), + ExpectedObject: []byte(` +apiVersion: v1 +data: {} +kind: ConfigMap +metadata: + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} + creationTimestamp: "2022-08-22T23:08:23Z" + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"PodScheduled"}: + .: {} + f:lastProbeTime: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + f:labels: + .: {} + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:imagePullPolicy: {} + f:name: {} + f:resources: {} + f:terminationMessagePath: {} + f:terminationMessagePolicy: {} + f:dnsPolicy: {} + f:enableServiceLinks: {} + f:restartPolicy: {} + f:schedulerName: {} + f:securityContext: {} + f:terminationGracePeriodSeconds: {} + manager: kubectl + operation: Apply + name: test + namespace: default +`), + }, + { + // Expect multiple targets to be merged into a new ssa manager + Name: "multipleTargetsNewInsertion", + CSAManagers: []string{"kubectl-client-side-apply", "kube-scheduler"}, + SSAManager: "newly-inserted-manager", + OriginalObject: []byte(` +apiVersion: v1 +data: {} +kind: ConfigMap +metadata: + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} + creationTimestamp: "2022-08-22T23:08:23Z" + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:name: {} + manager: kubectl + operation: Apply + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:status: + f:conditions: + .: {} + k:{"type":"PodScheduled"}: + .: {} + f:lastProbeTime: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + manager: kube-scheduler + operation: Update + time: "2022-11-03T23:22:40Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + f:labels: + .: {} + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:imagePullPolicy: {} + f:name: {} + f:resources: {} + f:terminationMessagePath: {} + f:terminationMessagePolicy: {} + f:dnsPolicy: {} + f:enableServiceLinks: {} + f:restartPolicy: {} + f:schedulerName: {} + f:securityContext: {} + f:terminationGracePeriodSeconds: {} + manager: kubectl-client-side-apply + operation: Update + time: "2022-11-03T23:22:40Z" + name: test + namespace: default +`), + ExpectedObject: []byte(` + apiVersion: v1 + data: {} + kind: ConfigMap + metadata: + resourceVersion: "1" + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"key":"value","legacy":"unused"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"test","namespace":"default"}} + creationTimestamp: "2022-08-22T23:08:23Z" + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:labels: + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:name: {} + manager: kubectl + operation: Apply + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: {} + f:kubectl.kubernetes.io/last-applied-configuration: {} + f:labels: + .: {} + f:name: {} + f:spec: + f:containers: + k:{"name":"kubernetes-pause"}: + .: {} + f:image: {} + f:imagePullPolicy: {} + f:name: {} + f:resources: {} + f:terminationMessagePath: {} + f:terminationMessagePolicy: {} + f:dnsPolicy: {} + f:enableServiceLinks: {} + f:restartPolicy: {} + f:schedulerName: {} + f:securityContext: {} + f:terminationGracePeriodSeconds: {} + f:status: + f:conditions: + .: {} + k:{"type":"PodScheduled"}: + .: {} + f:lastProbeTime: {} + f:lastTransitionTime: {} + f:message: {} + f:reason: {} + f:status: {} + f:type: {} + manager: newly-inserted-manager + operation: Apply + time: "2022-11-03T23:22:40Z" + name: test + namespace: default `), }, } @@ -452,7 +829,7 @@ metadata: for _, testCase := range cases { t.Run(testCase.Name, func(t *testing.T) { initialObject := unstructured.Unstructured{} - err := yaml.Unmarshal(testCase.OriginalObject, &initialObject) + err := yaml.Unmarshal(testCase.OriginalObject, &initialObject.Object) if err != nil { t.Fatal(err) } @@ -460,7 +837,7 @@ metadata: upgraded := initialObject.DeepCopy() err = csaupgrade.UpgradeManagedFields( upgraded, - testCase.CSAManager, + sets.New(testCase.CSAManagers...), testCase.SSAManager, ) @@ -469,7 +846,7 @@ metadata: } expectedObject := unstructured.Unstructured{} - err = yaml.Unmarshal(testCase.ExpectedObject, &expectedObject) + err = yaml.Unmarshal(testCase.ExpectedObject, &expectedObject.Object) if err != nil { t.Fatal(err) } @@ -477,6 +854,42 @@ metadata: if !reflect.DeepEqual(&expectedObject, upgraded) { t.Fatal(cmp.Diff(&expectedObject, upgraded)) } + + // Show that the UpgradeManagedFieldsPatch yields a patch that does + // nothing more and nothing less than make the object equal to output + // of UpgradeManagedFields + + initialCopy := initialObject.DeepCopyObject() + patchBytes, err := csaupgrade.UpgradeManagedFieldsPatch( + initialCopy, sets.New(testCase.CSAManagers...), testCase.SSAManager) + + if err != nil { + t.Fatal(err) + } else if patchBytes != nil { + patch, err := jsonpatch.DecodePatch(patchBytes) + if err != nil { + t.Fatal(err) + } + + initialJSON, err := json.Marshal(initialObject.Object) + if err != nil { + t.Fatal(err) + } + + patchedBytes, err := patch.Apply(initialJSON) + if err != nil { + t.Fatal(err) + } + + var patched unstructured.Unstructured + if err := json.Unmarshal(patchedBytes, &patched.Object); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(&patched, upgraded) { + t.Fatalf("expected patch to produce an upgraded object: %v", cmp.Diff(patched, upgraded)) + } + } }) } }