diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index bf85f7501b95e..054e5a60e50fa 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -802,9 +802,10 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr updateClusterOptions.Yes = c.Yes updateClusterOptions.Target = c.Target updateClusterOptions.OutDir = c.OutDir - updateClusterOptions.admin = kubeconfig.DefaultKubecfgAdminLifetime updateClusterOptions.ClusterName = cluster.Name - updateClusterOptions.CreateKubecfg = true + + updateClusterOptions.CreateKubecfgOptions.Admin = kubeconfig.DefaultKubecfgAdminLifetime + updateClusterOptions.CreateKubecfgOptions.CreateKubecfg = true // SSHPublicKey has already been mapped updateClusterOptions.SSHPublicKey = "" diff --git a/cmd/kops/export_kubeconfig.go b/cmd/kops/export_kubeconfig.go index da2b95ef3d004..b04ac128624a8 100644 --- a/cmd/kops/export_kubeconfig.go +++ b/cmd/kops/export_kubeconfig.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io" - "time" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -58,12 +57,7 @@ type ExportKubeconfigOptions struct { ClusterName string KubeConfigPath string all bool - admin time.Duration - user string - internal bool - - // UseKopsAuthenticationPlugin controls whether we should use the kOps auth helper instead of a static credential - UseKopsAuthenticationPlugin bool + kubeconfig.CreateKubecfgOptions } func NewCmdExportKubeconfig(f *util.Factory, out io.Writer) *cobra.Command { @@ -76,7 +70,7 @@ func NewCmdExportKubeconfig(f *util.Factory, out io.Writer) *cobra.Command { Long: exportKubeconfigLong, Example: exportKubeconfigExample, Args: func(cmd *cobra.Command, args []string) error { - if options.admin != 0 && options.user != "" { + if options.Admin != 0 && options.User != "" { return fmt.Errorf("cannot use both --admin and --user") } if options.all { @@ -96,11 +90,11 @@ func NewCmdExportKubeconfig(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().StringVar(&options.KubeConfigPath, "kubeconfig", options.KubeConfigPath, "Filename of the kubeconfig to create") cmd.Flags().BoolVar(&options.all, "all", options.all, "Export all clusters from the kOps state store") - cmd.Flags().DurationVar(&options.admin, "admin", options.admin, "Also export a cluster admin user credential with the specified lifetime and add it to the cluster context") + cmd.Flags().DurationVar(&options.Admin, "admin", options.Admin, "Also export a cluster admin user credential with the specified lifetime and add it to the cluster context") cmd.Flags().Lookup("admin").NoOptDefVal = kubeconfig.DefaultKubecfgAdminLifetime.String() - cmd.Flags().StringVar(&options.user, "user", options.user, "Existing user in kubeconfig file to use") + cmd.Flags().StringVar(&options.User, "user", options.User, "Existing user in kubeconfig file to use") cmd.RegisterFlagCompletionFunc("user", completeKubecfgUser) - cmd.Flags().BoolVar(&options.internal, "internal", options.internal, "Use the cluster's internal DNS name") + cmd.Flags().BoolVar(&options.Internal, "internal", options.Internal, "Use the cluster's internal DNS name") cmd.Flags().BoolVar(&options.UseKopsAuthenticationPlugin, "auth-plugin", options.UseKopsAuthenticationPlugin, "Use the kOps authentication plugin") return cmd @@ -150,11 +144,8 @@ func RunExportKubeconfig(ctx context.Context, f *util.Factory, out io.Writer, op keyStore, secretStore, cloud, - options.admin, - options.user, - options.internal, - f.KopsStateStore(), - options.UseKopsAuthenticationPlugin) + options.CreateKubecfgOptions, + f.KopsStateStore()) if err != nil { return err } diff --git a/cmd/kops/get_assets.go b/cmd/kops/get_assets.go index 7d86cda2516db..6afd21fee5745 100644 --- a/cmd/kops/get_assets.go +++ b/cmd/kops/get_assets.go @@ -102,9 +102,11 @@ func NewCmdGetAssets(f *util.Factory, out io.Writer, getOptions *GetOptions) *co func RunGetAssets(ctx context.Context, f *util.Factory, out io.Writer, options *GetAssetsOptions) error { updateClusterResults, err := RunUpdateCluster(ctx, f, out, &UpdateClusterOptions{ - Target: cloudup.TargetDryRun, - GetAssets: true, - ClusterName: options.ClusterName, + CoreUpdateClusterOptions: CoreUpdateClusterOptions{ + Target: cloudup.TargetDryRun, + GetAssets: true, + ClusterName: options.ClusterName, + }, }) if err != nil { return err diff --git a/cmd/kops/reconcile.go b/cmd/kops/reconcile.go new file mode 100644 index 0000000000000..fdcf76bf80dd0 --- /dev/null +++ b/cmd/kops/reconcile.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "io" + + "github.com/spf13/cobra" + "k8s.io/kops/cmd/kops/util" + "k8s.io/kubectl/pkg/util/i18n" +) + +var reconcileShort = i18n.T("Reconcile a cluster.") + +func NewCmdReconcile(f *util.Factory, out io.Writer) *cobra.Command { + cmd := &cobra.Command{ + Use: "reconcile", + Short: reconcileShort, + } + + // subcommands + cmd.AddCommand(NewCmdReconcileCluster(f, out)) + + return cmd +} diff --git a/cmd/kops/reconcile_cluster.go b/cmd/kops/reconcile_cluster.go new file mode 100644 index 0000000000000..7ec9db74450a5 --- /dev/null +++ b/cmd/kops/reconcile_cluster.go @@ -0,0 +1,187 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "io" + + "github.com/spf13/cobra" + "k8s.io/kops/cmd/kops/util" + "k8s.io/kops/pkg/apis/kops" + "k8s.io/kops/pkg/commands/commandutils" + "k8s.io/kops/upup/pkg/fi/cloudup" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + reconcileClusterLong = templates.LongDesc(i18n.T(` + Reconcile the cluster by updating and rolling the control plane and nodes sequentially. + `)) + + reconcileClusterExample = templates.Examples(i18n.T(` + # After the cluster has been edited or upgraded, update the cloud resources with: + kops reconcile cluster k8s-cluster.example.com --state=s3://my-state-store --yes + `)) + + reconcileClusterShort = i18n.T("Reconcile a cluster.") +) + +type ReconcileClusterOptions struct { + CoreUpdateClusterOptions +} + +func NewCmdReconcileCluster(f *util.Factory, out io.Writer) *cobra.Command { + options := &ReconcileClusterOptions{} + options.InitDefaults() + + cmd := &cobra.Command{ + Use: "cluster [CLUSTER]", + Short: reconcileClusterShort, + Long: reconcileClusterLong, + Example: reconcileClusterExample, + Args: rootCommand.clusterNameArgs(&options.ClusterName), + ValidArgsFunction: commandutils.CompleteClusterName(f, true, false), + RunE: func(cmd *cobra.Command, args []string) error { + err := RunReconcileCluster(cmd.Context(), f, out, &options.CoreUpdateClusterOptions) + return err + }, + } + + cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Create cloud resources, without --yes reconcile is in dry run mode") + + // These flags from the update command are not obviously needed by reconcile, though we can add them if needed: + // + // cmd.Flags().StringVar(&options.Target, "target", options.Target, "Target - direct") + // cmd.RegisterFlagCompletionFunc("target", completeUpdateClusterTarget(f, &options.CoreUpdateClusterOptions)) + // cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)") + // cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output") + // cmd.MarkFlagDirname("out") + + // These flags from the update command are specified to kubeconfig creation + // + // cmd.Flags().BoolVar(&options.CreateKubecfg, "create-kube-config", options.CreateKubecfg, "Will control automatically creating the kube config file on your local filesystem") + // cmd.Flags().DurationVar(&options.Admin, "admin", options.Admin, "Also export a cluster admin user credential with the specified lifetime and add it to the cluster context") + // cmd.Flags().Lookup("admin").NoOptDefVal = kubeconfig.DefaultKubecfgAdminLifetime.String() + // cmd.Flags().StringVar(&options.User, "user", options.User, "Existing user in kubeconfig file to use. Implies --create-kube-config") + // cmd.RegisterFlagCompletionFunc("user", completeKubecfgUser) + // cmd.Flags().BoolVar(&options.Internal, "internal", options.Internal, "Use the cluster's internal DNS name. Implies --create-kube-config") + + cmd.Flags().BoolVar(&options.AllowKopsDowngrade, "allow-kops-downgrade", options.AllowKopsDowngrade, "Allow an older version of kOps to update the cluster than last used") + + // These flags from the update command are not obviously needed by reconcile, though we can add them if needed: + // + // cmd.Flags().StringSliceVar(&options.InstanceGroups, "instance-group", options.InstanceGroups, "Instance groups to update (defaults to all if not specified)") + // cmd.RegisterFlagCompletionFunc("instance-group", completeInstanceGroup(f, &options.InstanceGroups, &options.InstanceGroupRoles)) + // cmd.Flags().StringSliceVar(&options.InstanceGroupRoles, "instance-group-roles", options.InstanceGroupRoles, "Instance group roles to update ("+strings.Join(allRoles, ",")+")") + // cmd.RegisterFlagCompletionFunc("instance-group-roles", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // return sets.NewString(allRoles...).Delete(options.InstanceGroupRoles...).List(), cobra.ShellCompDirectiveNoFileComp + // }) + // cmd.Flags().StringVar(&options.Phase, "phase", options.Phase, "Subset of tasks to run: "+strings.Join(cloudup.Phases.List(), ", ")) + // cmd.RegisterFlagCompletionFunc("phase", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // return cloudup.Phases.List(), cobra.ShellCompDirectiveNoFileComp + // }) + // cmd.Flags().StringSliceVar(&options.LifecycleOverrides, "lifecycle-overrides", options.LifecycleOverrides, "comma separated list of phase overrides, example: SecurityGroups=Ignore,InternetGateway=ExistsAndWarnIfChanges") + // viper.BindPFlag("lifecycle-overrides", cmd.Flags().Lookup("lifecycle-overrides")) + // viper.BindEnv("lifecycle-overrides", "KOPS_LIFECYCLE_OVERRIDES") + // cmd.RegisterFlagCompletionFunc("lifecycle-overrides", completeLifecycleOverrides) + // cmd.Flags().BoolVar(&options.Prune, "prune", options.Prune, "Delete old revisions of cloud resources that were needed during an upgrade") + // cmd.Flags().BoolVar(&options.IgnoreKubeletVersionSkew, "ignore-kubelet-version-skew", options.IgnoreKubeletVersionSkew, "Setting this to true will force updating the kubernetes version on all instance groups, regardles of which control plane version is running") + + // cmd.Flags().BoolVar(&options.Reconcile, "reconcile", options.Reconcile, "Reconcile the cluster by rolling the control plane and nodes sequentially") + + return cmd +} + +// ReconcileCluster updates the cluster to the desired state, including rolling updates where necessary. +// To respect skew policy, it updates the control plane first, then updates the nodes. +// "update" is probably now smart enough to automatically not update the control plane if it is already at the desired version, +// but we do it explicitly here to be clearer / safer. +func RunReconcileCluster(ctx context.Context, f *util.Factory, out io.Writer, c *CoreUpdateClusterOptions) error { + if !c.Yes { + return fmt.Errorf("reconcile is only supported with --yes") + } + if c.Target == cloudup.TargetTerraform { + return fmt.Errorf("reconcile is not supported with terraform") + } + + fmt.Fprintf(out, "Updating control plane configuration\n") + { + opt := *c + opt.InstanceGroupRoles = []string{ + string(kops.InstanceGroupRoleAPIServer), + string(kops.InstanceGroupRoleControlPlane), + } + opt.Prune = false // Do not prune until after the last rolling update + if _, err := RunCoreUpdateCluster(ctx, f, out, &opt); err != nil { + return err + } + } + + fmt.Fprintf(out, "Doing rolling-update for control plane\n") + { + opt := &RollingUpdateOptions{} + opt.InitDefaults() + opt.ClusterName = c.ClusterName + opt.InstanceGroupRoles = []string{ + string(kops.InstanceGroupRoleAPIServer), + string(kops.InstanceGroupRoleControlPlane), + } + opt.Yes = c.Yes + if err := RunRollingUpdateCluster(ctx, f, out, opt); err != nil { + return err + } + } + + fmt.Fprintf(out, "Updating node configuration\n") + { + opt := *c + // Do all roles this time, though we only expect changes to node & bastion roles + opt.InstanceGroupRoles = nil + opt.Prune = false // Do not prune until after the last rolling update + if _, err := RunCoreUpdateCluster(ctx, f, out, &opt); err != nil { + return err + } + } + + fmt.Fprintf(out, "Doing rolling-update for nodes\n") + { + opt := &RollingUpdateOptions{} + opt.InitDefaults() + opt.ClusterName = c.ClusterName + // Do all roles this time, though we only expect changes to node & bastion roles + opt.InstanceGroupRoles = nil + opt.Yes = c.Yes + if err := RunRollingUpdateCluster(ctx, f, out, opt); err != nil { + return err + } + } + + fmt.Fprintf(out, "Pruning old resources that are no longer used\n") + { + opt := *c + opt.InstanceGroupRoles = nil + opt.Prune = true + if _, err := RunCoreUpdateCluster(ctx, f, out, &opt); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/kops/root.go b/cmd/kops/root.go index 59ba034849fee..d8ee4347944d6 100644 --- a/cmd/kops/root.go +++ b/cmd/kops/root.go @@ -171,6 +171,7 @@ func NewCmdRoot(f *util.Factory, out io.Writer) *cobra.Command { cmd.AddCommand(NewCmdGet(f, out)) cmd.AddCommand(commands.NewCmdHelpers(f, out)) cmd.AddCommand(NewCmdPromote(f, out)) + cmd.AddCommand(NewCmdReconcile(f, out)) cmd.AddCommand(NewCmdReplace(f, out)) cmd.AddCommand(NewCmdRollingUpdate(f, out)) cmd.AddCommand(NewCmdToolbox(f, out)) diff --git a/cmd/kops/update_cluster.go b/cmd/kops/update_cluster.go index 9b938f93920c7..d2e3423e7f148 100644 --- a/cmd/kops/update_cluster.go +++ b/cmd/kops/update_cluster.go @@ -23,7 +23,6 @@ import ( "io" "os" "strings" - "time" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -57,13 +56,26 @@ var ( updateClusterExample = templates.Examples(i18n.T(` # After the cluster has been edited or upgraded, update the cloud resources with: - kops update cluster k8s-cluster.example.com --yes --state=s3://my-state-store --yes + kops update cluster k8s-cluster.example.com --state=s3://my-state-store --yes `)) updateClusterShort = i18n.T("Update a cluster.") ) +// UpdateClusterOptions holds the options for the update cluster command. +// The update cluster command combines some functionality, so it actually builds up options for those functionality areas. type UpdateClusterOptions struct { + // Reconcile is true if we should reconcile the cluster by rolling the control plane and nodes sequentially + Reconcile bool + + kubeconfig.CreateKubecfgOptions + CoreUpdateClusterOptions +} + +// CoreUpdateClusterOptions holds the core options for the update cluster command, +// which are shared with the reconcile cluster command. +// The fields _not_ shared with the reconcile cluster command are the ones in CreateKubecfgOptions. +type CoreUpdateClusterOptions struct { Yes bool Target string OutDir string @@ -79,11 +91,6 @@ type UpdateClusterOptions struct { ClusterName string - CreateKubecfg bool - admin time.Duration - user string - internal bool - // InstanceGroups is the list of instance groups to update; // if not specified, all instance groups will be updated InstanceGroups []string @@ -106,6 +113,15 @@ type UpdateClusterOptions struct { } func (o *UpdateClusterOptions) InitDefaults() { + o.CoreUpdateClusterOptions.InitDefaults() + + o.Reconcile = false + + // By default we export a kubecfg, but it doesn't have a static/eternal credential in it any more. + o.CreateKubecfg = true +} + +func (o *CoreUpdateClusterOptions) InitDefaults() { o.Yes = false o.Target = "direct" o.SSHPublicKey = "" @@ -113,9 +129,6 @@ func (o *UpdateClusterOptions) InitDefaults() { // By default we enforce the version skew between control plane and worker nodes o.IgnoreKubeletVersionSkew = false - // By default we export a kubecfg, but it doesn't have a static/eternal credential in it any more. - o.CreateKubecfg = true - o.Prune = false o.RunTasksOptions.InitDefaults() @@ -145,16 +158,16 @@ func NewCmdUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Create cloud resources, without --yes update is in dry run mode") cmd.Flags().StringVar(&options.Target, "target", options.Target, "Target - direct, terraform") - cmd.RegisterFlagCompletionFunc("target", completeUpdateClusterTarget(f, options)) + cmd.RegisterFlagCompletionFunc("target", completeUpdateClusterTarget(f, &options.CoreUpdateClusterOptions)) cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)") cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output") cmd.MarkFlagDirname("out") cmd.Flags().BoolVar(&options.CreateKubecfg, "create-kube-config", options.CreateKubecfg, "Will control automatically creating the kube config file on your local filesystem") - cmd.Flags().DurationVar(&options.admin, "admin", options.admin, "Also export a cluster admin user credential with the specified lifetime and add it to the cluster context") + cmd.Flags().DurationVar(&options.Admin, "admin", options.Admin, "Also export a cluster admin user credential with the specified lifetime and add it to the cluster context") cmd.Flags().Lookup("admin").NoOptDefVal = kubeconfig.DefaultKubecfgAdminLifetime.String() - cmd.Flags().StringVar(&options.user, "user", options.user, "Existing user in kubeconfig file to use. Implies --create-kube-config") + cmd.Flags().StringVar(&options.User, "user", options.User, "Existing user in kubeconfig file to use. Implies --create-kube-config") cmd.RegisterFlagCompletionFunc("user", completeKubecfgUser) - cmd.Flags().BoolVar(&options.internal, "internal", options.internal, "Use the cluster's internal DNS name. Implies --create-kube-config") + cmd.Flags().BoolVar(&options.Internal, "internal", options.Internal, "Use the cluster's internal DNS name. Implies --create-kube-config") cmd.Flags().BoolVar(&options.AllowKopsDowngrade, "allow-kops-downgrade", options.AllowKopsDowngrade, "Allow an older version of kOps to update the cluster than last used") cmd.Flags().StringSliceVar(&options.InstanceGroups, "instance-group", options.InstanceGroups, "Instance groups to update (defaults to all if not specified)") cmd.RegisterFlagCompletionFunc("instance-group", completeInstanceGroup(f, &options.InstanceGroups, &options.InstanceGroupRoles)) @@ -174,6 +187,8 @@ func NewCmdUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().BoolVar(&options.Prune, "prune", options.Prune, "Delete old revisions of cloud resources that were needed during an upgrade") cmd.Flags().BoolVar(&options.IgnoreKubeletVersionSkew, "ignore-kubelet-version-skew", options.IgnoreKubeletVersionSkew, "Setting this to true will force updating the kubernetes version on all instance groups, regardles of which control plane version is running") + cmd.Flags().BoolVar(&options.Reconcile, "reconcile", options.Reconcile, "Reconcile the cluster by rolling the control plane and nodes sequentially") + return cmd } @@ -192,27 +207,39 @@ type UpdateClusterResults struct { Cluster *kops.Cluster } +func RunCoreUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *CoreUpdateClusterOptions) (*UpdateClusterResults, error) { + opt := &UpdateClusterOptions{} + opt.CoreUpdateClusterOptions = *c + opt.Reconcile = false + opt.CreateKubecfgOptions.CreateKubecfg = false + return RunUpdateCluster(ctx, f, out, opt) +} + func RunUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *UpdateClusterOptions) (*UpdateClusterResults, error) { + if c.Reconcile { + return nil, RunReconcileCluster(ctx, f, out, &c.CoreUpdateClusterOptions) + } + results := &UpdateClusterResults{} isDryrun := false targetName := c.Target - if c.admin != 0 && c.user != "" { + if c.Admin != 0 && c.User != "" { return nil, fmt.Errorf("cannot use both --admin and --user") } - if c.admin != 0 && !c.CreateKubecfg { + if c.Admin != 0 && !c.CreateKubecfg { klog.Info("--admin implies --create-kube-config") c.CreateKubecfg = true } - if c.user != "" && !c.CreateKubecfg { + if c.User != "" && !c.CreateKubecfg { klog.Info("--user implies --create-kube-config") c.CreateKubecfg = true } - if c.internal && !c.CreateKubecfg { + if c.Internal && !c.CreateKubecfg { klog.Info("--internal implies --create-kube-config") c.CreateKubecfg = true } @@ -387,19 +414,14 @@ func RunUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Up klog.Infof("Exporting kubeconfig for cluster") - // TODO: Another flag? - useKopsAuthenticationPlugin := false conf, err := kubeconfig.BuildKubecfg( ctx, cluster, keyStore, secretStore, cloud, - c.admin, - c.user, - c.internal, - f.KopsStateStore(), - useKopsAuthenticationPlugin) + c.CreateKubecfgOptions, + f.KopsStateStore()) if err != nil { return nil, err } @@ -409,7 +431,7 @@ func RunUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Up return nil, err } - if c.admin == 0 && c.user == "" { + if c.Admin == 0 && c.User == "" { klog.Warningf("Exported kubeconfig with no user authentication; use --admin, --user or --auth-plugin flags with `kops export kubeconfig`") } } @@ -522,7 +544,7 @@ func clusterIsInKubeConfig(contextName string) (bool, error) { return false, nil } -func completeUpdateClusterTarget(f commandutils.Factory, options *UpdateClusterOptions) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func completeUpdateClusterTarget(f commandutils.Factory, options *CoreUpdateClusterOptions) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { ctx := cmd.Context() diff --git a/docs/cli/kops.md b/docs/cli/kops.md index 5f0f828481316..63a9c814bc1af 100644 --- a/docs/cli/kops.md +++ b/docs/cli/kops.md @@ -33,6 +33,7 @@ kOps is Kubernetes Operations. * [kops export](kops_export.md) - Export configuration. * [kops get](kops_get.md) - Get one or many resources. * [kops promote](kops_promote.md) - Promote a resource. +* [kops reconcile](kops_reconcile.md) - Reconcile a cluster. * [kops replace](kops_replace.md) - Replace cluster resources. * [kops rolling-update](kops_rolling-update.md) - Rolling update a cluster. * [kops toolbox](kops_toolbox.md) - Miscellaneous, experimental, or infrequently used commands. diff --git a/docs/cli/kops_reconcile.md b/docs/cli/kops_reconcile.md new file mode 100644 index 0000000000000..23ae209b9672a --- /dev/null +++ b/docs/cli/kops_reconcile.md @@ -0,0 +1,27 @@ + + + +## kops reconcile + +Reconcile a cluster. + +### Options + +``` + -h, --help help for reconcile +``` + +### Options inherited from parent commands + +``` + --config string yaml config file (default is $HOME/.kops.yaml) + --name string Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable + --state string Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable + -v, --v Level number for the log level verbosity +``` + +### SEE ALSO + +* [kops](kops.md) - kOps is Kubernetes Operations. +* [kops reconcile cluster](kops_reconcile_cluster.md) - Reconcile a cluster. + diff --git a/docs/cli/kops_reconcile_cluster.md b/docs/cli/kops_reconcile_cluster.md new file mode 100644 index 0000000000000..0cfda2156be01 --- /dev/null +++ b/docs/cli/kops_reconcile_cluster.md @@ -0,0 +1,43 @@ + + + +## kops reconcile cluster + +Reconcile a cluster. + +### Synopsis + +Reconcile the cluster by updating and rolling the control plane and nodes sequentially. + +``` +kops reconcile cluster [CLUSTER] [flags] +``` + +### Examples + +``` + # After the cluster has been edited or upgraded, update the cloud resources with: + kops reconcile cluster k8s-cluster.example.com --state=s3://my-state-store --yes +``` + +### Options + +``` + --allow-kops-downgrade Allow an older version of kOps to update the cluster than last used + -h, --help help for cluster + -y, --yes Create cloud resources, without --yes reconcile is in dry run mode +``` + +### Options inherited from parent commands + +``` + --config string yaml config file (default is $HOME/.kops.yaml) + --name string Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable + --state string Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable + -v, --v Level number for the log level verbosity +``` + +### SEE ALSO + +* [kops reconcile](kops_reconcile.md) - Reconcile a cluster. + diff --git a/docs/cli/kops_update_cluster.md b/docs/cli/kops_update_cluster.md index 332fe909d050d..ab62d7862a93e 100644 --- a/docs/cli/kops_update_cluster.md +++ b/docs/cli/kops_update_cluster.md @@ -19,7 +19,7 @@ kops update cluster [CLUSTER] [flags] ``` # After the cluster has been edited or upgraded, update the cloud resources with: - kops update cluster k8s-cluster.example.com --yes --state=s3://my-state-store --yes + kops update cluster k8s-cluster.example.com --state=s3://my-state-store --yes ``` ### Options @@ -37,6 +37,7 @@ kops update cluster [CLUSTER] [flags] --out string Path to write any local output --phase string Subset of tasks to run: cluster, network, security --prune Delete old revisions of cloud resources that were needed during an upgrade + --reconcile Reconcile the cluster by rolling the control plane and nodes sequentially --ssh-public-key string SSH public key to use (deprecated: use kops create secret instead) --target string Target - direct, terraform (default "direct") --user string Existing user in kubeconfig file to use. Implies --create-kube-config diff --git a/pkg/kubeconfig/create_kubecfg.go b/pkg/kubeconfig/create_kubecfg.go index 862a021f38f9f..cd0b002f2c5a6 100644 --- a/pkg/kubeconfig/create_kubecfg.go +++ b/pkg/kubeconfig/create_kubecfg.go @@ -33,11 +33,21 @@ import ( const DefaultKubecfgAdminLifetime = 18 * time.Hour -func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.KeystoreReader, secretStore fi.SecretStore, cloud fi.Cloud, admin time.Duration, configUser string, internal bool, kopsStateStore string, useKopsAuthenticationPlugin bool) (*KubeconfigBuilder, error) { +type CreateKubecfgOptions struct { + CreateKubecfg bool + Admin time.Duration + User string + Internal bool + + // UseKopsAuthenticationPlugin controls whether we should use the kOps auth helper instead of a static credential + UseKopsAuthenticationPlugin bool +} + +func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.KeystoreReader, secretStore fi.SecretStore, cloud fi.Cloud, options CreateKubecfgOptions, kopsStateStore string) (*KubeconfigBuilder, error) { clusterName := cluster.ObjectMeta.Name var server string - if internal { + if options.Internal { server = "https://" + cluster.APIInternalName() } else { if cluster.Spec.API.PublicName != "" { @@ -48,7 +58,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto // If a load balancer exists we use it, except for when an SSL certificate is set. // This should avoid a lot of pain with DNS pre-creation. - if cluster.Spec.API.LoadBalancer != nil && (cluster.Spec.API.LoadBalancer.SSLCertificate == "" || admin != 0) { + if cluster.Spec.API.LoadBalancer != nil && (cluster.Spec.API.LoadBalancer.SSLCertificate == "" || options.Admin != 0) { ingresses, err := cloud.GetApiIngressStatus(cluster) if err != nil { return nil, fmt.Errorf("error getting ingress status: %v", err) @@ -90,7 +100,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto b := NewKubeconfigBuilder() // Use the secondary load balancer port if a certificate is on the primary listener - if admin != 0 && cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.SSLCertificate != "" && cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork { + if options.Admin != 0 && cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.SSLCertificate != "" && cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork { server = server + ":8443" } @@ -100,7 +110,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto // add the CA Cert to the kubeconfig only if we didn't specify a certificate for the LB // or if we're using admin credentials and the secondary port - if cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" || cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork || internal { + if cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" || cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork || options.Internal { keySet, err := keyStore.FindKeyset(ctx, fi.CertificateIDCA) if err != nil { return nil, fmt.Errorf("error fetching CA keypair: %v", err) @@ -115,7 +125,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto } } - if admin != 0 { + if options.Admin != 0 { cn := "kubecfg" user, err := user.Current() if err != nil || user == nil { @@ -131,7 +141,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto CommonName: cn, Organization: []string{rbac.SystemPrivilegedGroup}, }, - Validity: admin, + Validity: options.Admin, } cert, privateKey, _, err := pki.IssueCert(ctx, &req, fi.NewPKIKeystoreAdapter(keyStore)) if err != nil { @@ -147,7 +157,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto } } - if useKopsAuthenticationPlugin { + if options.UseKopsAuthenticationPlugin { b.AuthenticationExec = []string{ "kops", "helpers", @@ -163,10 +173,10 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto b.Server = server - if configUser == "" { + if options.User == "" { b.User = cluster.ObjectMeta.Name } else { - b.User = configUser + b.User = options.User } return b, nil diff --git a/pkg/kubeconfig/create_kubecfg_test.go b/pkg/kubeconfig/create_kubecfg_test.go index 1e87931982fea..e063d9213e559 100644 --- a/pkg/kubeconfig/create_kubecfg_test.go +++ b/pkg/kubeconfig/create_kubecfg_test.go @@ -19,7 +19,6 @@ package kubeconfig import ( "context" "testing" - "time" v1 "k8s.io/api/core/v1" "k8s.io/kops/dnsprovider/pkg/dnsprovider" @@ -149,13 +148,10 @@ func TestBuildKubecfg(t *testing.T) { }() type args struct { - cluster *kops.Cluster - secretStore fi.SecretStore - status fakeStatusCloud - admin time.Duration - user string - internal bool - useKopsAuthenticationPlugin bool + CreateKubecfgOptions + cluster *kops.Cluster + secretStore fi.SecretStore + status fakeStatusCloud } publicCluster := buildMinimalCluster("testcluster", "testcluster.test.com", false, false) @@ -187,8 +183,10 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: publicCluster, status: fakeStatus, - admin: DefaultKubecfgAdminLifetime, - user: "", + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: DefaultKubecfgAdminLifetime, + User: "", + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -204,7 +202,9 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: certNLBCluster, status: fakeStatus, - admin: DefaultKubecfgAdminLifetime, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: DefaultKubecfgAdminLifetime, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -220,7 +220,9 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: certCluster, status: fakeStatus, - admin: DefaultKubecfgAdminLifetime, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: DefaultKubecfgAdminLifetime, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -236,7 +238,9 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: certNLBCluster, status: fakeStatus, - admin: 0, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: 0, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -252,8 +256,10 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: publicCluster, status: fakeStatus, - admin: 0, - user: "myuser", + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: 0, + User: "myuser", + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -269,8 +275,10 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: emptyMasterPublicNameCluster, status: fakeStatus, - admin: 0, - user: "", + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: 0, + User: "", + }, }, want: &KubeconfigBuilder{ Context: "emptyMasterPublicNameCluster", @@ -299,10 +307,12 @@ func TestBuildKubecfg(t *testing.T) { { name: "Public DNS with kops auth plugin", args: args{ - cluster: publicCluster, - status: fakeStatus, - admin: 0, - useKopsAuthenticationPlugin: true, + cluster: publicCluster, + status: fakeStatus, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: 0, + UseKopsAuthenticationPlugin: true, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -323,10 +333,12 @@ func TestBuildKubecfg(t *testing.T) { { name: "Test Kube Config Data For internal DNS name with admin", args: args{ - cluster: publicCluster, - status: fakeStatus, - admin: DefaultKubecfgAdminLifetime, - internal: true, + cluster: publicCluster, + status: fakeStatus, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: DefaultKubecfgAdminLifetime, + Internal: true, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -342,7 +354,9 @@ func TestBuildKubecfg(t *testing.T) { args: args{ cluster: certGossipNLBCluster, status: fakeStatus, - admin: DefaultKubecfgAdminLifetime, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: DefaultKubecfgAdminLifetime, + }, }, want: &KubeconfigBuilder{ Context: "testgossipcluster.k8s.local", @@ -356,10 +370,12 @@ func TestBuildKubecfg(t *testing.T) { { name: "Test Kube Config Data for Public cluster with admin and internal option", args: args{ - cluster: publicCluster, - status: fakeStatus, - admin: DefaultKubecfgAdminLifetime, - internal: true, + cluster: publicCluster, + status: fakeStatus, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: DefaultKubecfgAdminLifetime, + Internal: true, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -373,10 +389,12 @@ func TestBuildKubecfg(t *testing.T) { { name: "Test Kube Config Data for Public cluster without admin and with internal option", args: args{ - cluster: publicCluster, - status: fakeStatus, - admin: 0, - internal: true, + cluster: publicCluster, + status: fakeStatus, + CreateKubecfgOptions: CreateKubecfgOptions{ + Admin: 0, + Internal: true, + }, }, want: &KubeconfigBuilder{ Context: "testcluster", @@ -401,7 +419,7 @@ func TestBuildKubecfg(t *testing.T) { }, } - got, err := BuildKubecfg(ctx, tt.args.cluster, keyStore, tt.args.secretStore, tt.args.status, tt.args.admin, tt.args.user, tt.args.internal, kopsStateStore, tt.args.useKopsAuthenticationPlugin) + got, err := BuildKubecfg(ctx, tt.args.cluster, keyStore, tt.args.secretStore, tt.args.status, tt.args.CreateKubecfgOptions, kopsStateStore) if (err != nil) != tt.wantErr { t.Errorf("BuildKubecfg() error = %v, wantErr %v", err, tt.wantErr) return