-
Notifications
You must be signed in to change notification settings - Fork 1.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
✨ Clusterctl alpha rollout history #7994
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,134 @@ | ||
/* | ||
Copyright 2023 The Kubernetes Authors. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package alpha | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"os" | ||
"sort" | ||
"strconv" | ||
|
||
"github.com/olekukonko/tablewriter" | ||
"github.com/pkg/errors" | ||
"gopkg.in/yaml.v3" | ||
corev1 "k8s.io/api/core/v1" | ||
|
||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" | ||
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" | ||
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" | ||
"sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" | ||
) | ||
|
||
type historyInfo struct { | ||
revisions string | ||
changeCause string | ||
} | ||
|
||
// ObjectViewer will issue a view on the specified cluster-api resource. | ||
func (r *rollout) ObjectViewer(ctx context.Context, proxy cluster.Proxy, ref corev1.ObjectReference, revision int64) error { | ||
switch ref.Kind { | ||
case MachineDeployment: | ||
deployment, err := getMachineDeployment(ctx, proxy, ref.Name, ref.Namespace) | ||
if err != nil || deployment == nil { | ||
return errors.Wrapf(err, "failed to get %v/%v", ref.Kind, ref.Name) | ||
} | ||
if err := viewMachineDeployment(ctx, proxy, deployment, revision); err != nil { | ||
return err | ||
} | ||
default: | ||
return errors.Errorf("invalid resource type %q, valid values are %v", ref.Kind, validHistoryResourceTypes) | ||
} | ||
return nil | ||
} | ||
|
||
func viewMachineDeployment(ctx context.Context, proxy cluster.Proxy, d *clusterv1.MachineDeployment, revision int64) error { | ||
log := logf.Log | ||
msList, err := getMachineSetsForDeployment(ctx, proxy, d) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
if revision < 0 { | ||
return errors.Errorf("revision number cannot be negative: %v", revision) | ||
} | ||
|
||
// Print details of a specific revision | ||
if revision > 0 { | ||
ms, err := findMachineDeploymentRevision(revision, msList) | ||
if err != nil { | ||
return errors.Errorf("unable to find the specified revision %d for MachineDeployment %s", revision, d.Name) | ||
} | ||
output, err := yaml.Marshal(ms.Spec.Template) | ||
if err != nil { | ||
return err | ||
} | ||
fmt.Fprint(os.Stdout, string(output)) | ||
return nil | ||
} | ||
|
||
// Print an overview of all revisions | ||
// Create a revisionToChangeCause map | ||
histInfo := make(map[int64]historyInfo) | ||
for _, ms := range msList { | ||
v, err := mdutil.Revision(ms) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I just stumbled into machinedeployment.clusters.x-k8s.io/revision-history and I'm wondering if we should keep this as well into account while building the revision history There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thank you for your comments. FYI, I found that Kubernetes also has this annotation, but it's not used in the actual logic. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In CAPI it is used in https://github.com/kubernetes-sigs/cluster-api/blob/29618402670e19c77833edbede6d9e[…]7b4d1036a/internal/controllers/machinedeployment/mdutil/util.go TL;DR each machineset keeps track of the revision it currently is, and if “reused”, it keeps also track of the revision it was…
I would prefer to get this PR merged with support for revision-history and then eventually open the discussion deprecation vs keeping this PR around on hold till the disussion is completed and revision-history is actually deprecated There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would prefer not showing the revision numbers that are listed under revision history annotation, at least as separate entries in the output. If we want really want to show those numbers in the output for the sake of having a full history then how about adding a third column called "matching revisions"/"duplicate reivisions" or some better name where we list these revision numbers against target revision numbers? Thoughts? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why can't we rollback to revision number in revision history? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I basically agree with @ykakarap 's comment.
Do you have any ideas for this use case? I thought, maybe, users want to store a specific revision number and want to rollback there always, but such needs can be satisfied by storing manifests for that revision. I think rollback is just a rollback for temporal error handling, and thus there's no need to rollback to a revision number in the history. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Following up from the discussion in the office hours. I think it's very user unfriendly if we do not support revision-history (as long as Cluster API is using it). Today Cluster API only tracks MachineSets that are actually different via separate MachineSet objects. As soon a MachineSet is the same as another one in the history revision-history is used automatically. If we don't support revision-history from a user point-of-view there will be arbritrary holes in the history (e.g. if 1, 4, 5, 7 are the same the history shown by clusterctl would be: 2, 3, 6, 7). I think this is very hard to understand for users. I would suggest:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thank you for following up and my apologies for the delayed reply. I changed PR following your suggestion. I'll deal with your second suggestion in a different PR as it should be an enhancement of
This makes sense. BTW, I made output of the history command like this. If I am a user, I want to group revisions associated to the same MachineSet with the same change cause. REVISIONS CHANGE-CAUSE
2 <none>
4,6,8,10 add label
1,3,5,7,9,11 update to the latest k8s version The above is more useful than this when you are looking for a revision that you want to rollback. REVISION CHANGE-CAUSE
1 <none>
2 <none>
3 <none>
4 <none>
5 <none>
6 <none>
7 <none>
8 <none>
9 <none>
10 add label
11 update to the latest k8s version As an alternative, we can extend the change cause to record its history but I think that is too much. |
||
if err != nil { | ||
log.Error(err, fmt.Sprintf("unable to get revision from machineset %s for machinedeployment %s in namespace %s", ms.Name, d.Name, d.Namespace)) | ||
continue | ||
} | ||
revisions := strconv.FormatInt(v, 10) | ||
if revHistory := ms.Annotations[clusterv1.RevisionHistoryAnnotation]; revHistory != "" { | ||
revisions = revHistory + "," + revisions | ||
} | ||
histInfo[v] = historyInfo{ | ||
revisions, | ||
ms.Annotations[clusterv1.ChangeCauseAnnotation], | ||
} | ||
} | ||
|
||
// Sort the revisions | ||
revisions := make([]int64, 0, len(histInfo)) | ||
for r := range histInfo { | ||
revisions = append(revisions, r) | ||
} | ||
sort.Slice(revisions, func(i, j int) bool { return revisions[i] < revisions[j] }) | ||
|
||
// Output the revisionToChangeCause map | ||
table := tablewriter.NewWriter(os.Stdout) | ||
table.SetHeader([]string{"REVISIONS", "CHANGE-CAUSE"}) | ||
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) | ||
table.SetAlignment(tablewriter.ALIGN_LEFT) | ||
table.SetCenterSeparator("") | ||
table.SetColumnSeparator("") | ||
table.SetRowSeparator("") | ||
table.SetHeaderLine(false) | ||
table.SetBorder(false) | ||
|
||
for _, r := range revisions { | ||
changeCause := histInfo[r].changeCause | ||
if changeCause == "" { | ||
changeCause = "<none>" | ||
} | ||
table.Append([]string{ | ||
histInfo[r].revisions, | ||
changeCause, | ||
}) | ||
} | ||
table.Render() | ||
|
||
return nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This annotation is never updated, right? So the cause is always "". Is there a follow-up planned to update this annotation?
If so, FYI, that work could potentially overlap with the in-place propagation work (#7731).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't have the plan to update this specific annotation directly from the codes. I assume that this annotation is set by users, like Kubernetes.
Also, I'm okay with propagating this annotation set in a machinedeployment to machinsets within the scope of the label propagation as Kubernetes does a similar behavior (as described below), but I guess it shouldn't be specific for this annotation.