Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cli: refactor to separate K8s and image upgrade #2331

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 17 additions & 7 deletions cli/internal/cmd/upgradeapply.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,20 +233,29 @@ func (u *upgradeApplyCmd) upgradeApply(cmd *cobra.Command, upgradeDir string, fl
return fmt.Errorf("upgrading services: %w", err)
}
}

skipImageUpgrade := flags.skipPhases.contains(skipImagePhase)
skipK8sUpgrade := flags.skipPhases.contains(skipK8sPhase)
if !(skipImageUpgrade && skipK8sUpgrade) {
err = u.kubeUpgrader.UpgradeNodeVersion(cmd.Context(), conf, flags.force, skipImageUpgrade, skipK8sUpgrade)
// image upgrades need to be done before k8s upgrade because image upgrades are not allowed during another upgrade
if !flags.skipPhases.contains(skipImagePhase) {
err = u.kubeUpgrader.UpgradeImageVersion(cmd.Context(), conf, flags.force)
switch {
case errors.Is(err, kubecmd.ErrInProgress):
cmd.PrintErrln("Skipping image and Kubernetes upgrades. Another upgrade is in progress.")
cmd.PrintErrln("Skipping image upgrade. Another upgrade is in progress.")
case errors.As(err, &upgradeErr):
cmd.PrintErrln(err)
case err != nil:
return fmt.Errorf("upgrading NodeVersion: %w", err)
}
}
if !flags.skipPhases.contains(skipK8sPhase) {
err = u.kubeUpgrader.UpgradeK8sVersion(cmd.Context(), validK8sVersion, flags.force)
switch {
case errors.As(err, &upgradeErr):
cmd.PrintErrln(err)
case err == nil:
cmd.Println("Successfully upgraded Kubernetes.")
case err != nil:
return fmt.Errorf("upgrading Kubernetes: %w", err)
}
}
return nil
}

Expand Down Expand Up @@ -621,7 +630,8 @@ func (s skipPhases) contains(phase skipPhase) bool {
}

type kubernetesUpgrader interface {
UpgradeNodeVersion(ctx context.Context, conf *config.Config, force, skipImage, skipK8s bool) error
UpgradeImageVersion(ctx context.Context, conf *config.Config, force bool) error
UpgradeK8sVersion(ctx context.Context, k8sVersion versions.ValidK8sVersion, force bool) error
ExtendClusterConfigCertSANs(ctx context.Context, alternativeNames []string) error
GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error)
ApplyJoinConfig(ctx context.Context, newAttestConfig config.AttestationCfg, measurementSalt []byte) error
Expand Down
7 changes: 6 additions & 1 deletion cli/internal/cmd/upgradeapply_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ func TestUpgradeApplyFlagsForSkipPhases(t *testing.T) {

type stubKubernetesUpgrader struct {
nodeVersionErr error
k8sErr error
currentConfig config.AttestationCfg
calledNodeUpgrade bool
}
Expand All @@ -192,11 +193,15 @@ func (u *stubKubernetesUpgrader) BackupCRs(_ context.Context, _ []apiextensionsv
return nil
}

func (u *stubKubernetesUpgrader) UpgradeNodeVersion(_ context.Context, _ *config.Config, _, _, _ bool) error {
func (u *stubKubernetesUpgrader) UpgradeImageVersion(_ context.Context, _ *config.Config, _ bool) error {
u.calledNodeUpgrade = true
return u.nodeVersionErr
}

func (u *stubKubernetesUpgrader) UpgradeK8sVersion(_ context.Context, _ versions.ValidK8sVersion, _ bool) error {
return u.k8sErr
}

func (u *stubKubernetesUpgrader) ApplyJoinConfig(_ context.Context, _ config.AttestationCfg, _ []byte) error {
return nil
}
Expand Down
100 changes: 47 additions & 53 deletions cli/internal/kubecmd/kubecmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,42 @@ func New(outWriter io.Writer, kubeConfigPath string, fileHandler file.Handler, l
}, nil
}

// UpgradeNodeVersion upgrades the cluster's NodeVersion object and in turn triggers image & k8s version upgrades.
// UpgradeK8sVersion upgrades the cluster's Kubernetes version.
func (k *KubeCmd) UpgradeK8sVersion(ctx context.Context, k8sVersion versions.ValidK8sVersion, force bool) error {
nodeVersion, err := k.getConstellationVersion(ctx)
if err != nil {
return err
}
// We have to allow users to specify outdated k8s patch versions.
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
var components *corev1.ConfigMap
_, err = versions.NewValidK8sVersion(string(k8sVersion), true)
if err != nil {
innerErr := fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", "))
return compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion, string(k8sVersion), innerErr)
}
versionConfig, ok := versions.VersionConfigs[k8sVersion]
if !ok {
return compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion,
string(k8sVersion), fmt.Errorf("no version config matching K8s %s", k8sVersion))
}
components, err = k.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force)
if err != nil {
return fmt.Errorf("updating K8s: %w", err)
}
if err := k.applyComponentsCM(ctx, components); err != nil {
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
}
updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion)
if err != nil {
return fmt.Errorf("applying upgrade: %w", err)
}
return checkForApplyError(nodeVersion, updatedNodeVersion)
}

// UpgradeImageVersion upgrades the cluster's image.
// The versions set in the config are validated against the versions running in the cluster.
// TODO(elchead): AB#3434 Split K8s and image upgrade of UpgradeNodeVersion.
func (k *KubeCmd) UpgradeNodeVersion(ctx context.Context, conf *config.Config, force, skipImage, skipK8s bool) error {
func (k *KubeCmd) UpgradeImageVersion(ctx context.Context, conf *config.Config, force bool) error {
provider := conf.GetProvider()
attestationVariant := conf.GetAttestationConfig().GetVariant()
region := conf.GetRegion()
Expand All @@ -119,60 +151,22 @@ func (k *KubeCmd) UpgradeNodeVersion(ctx context.Context, conf *config.Config, f
return err
}

upgradeErrs := []error{}
var upgradeErr *compatibility.InvalidUpgradeError
if !skipImage {
err = k.isValidImageUpgrade(nodeVersion, imageVersion.Version(), force)
switch {
case errors.As(err, &upgradeErr):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping image upgrades: %w", err))
case err != nil:
return fmt.Errorf("updating image version: %w", err)
}
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.Version())
nodeVersion.Spec.ImageReference = imageReference
nodeVersion.Spec.ImageVersion = imageVersion.Version()
}

if !skipK8s {
// We have to allow users to specify outdated k8s patch versions.
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
var components *corev1.ConfigMap
currentK8sVersion, err := versions.NewValidK8sVersion(conf.KubernetesVersion, true)
if err != nil {
innerErr := fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", "))
err = compatibility.NewInvalidUpgradeError(nodeVersion.Spec.KubernetesClusterVersion, conf.KubernetesVersion, innerErr)
} else {
versionConfig := versions.VersionConfigs[currentK8sVersion]
components, err = k.updateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force)
}

switch {
case err == nil:
err := k.applyComponentsCM(ctx, components)
if err != nil {
return fmt.Errorf("applying k8s components ConfigMap: %w", err)
}
case errors.As(err, &upgradeErr):
upgradeErrs = append(upgradeErrs, fmt.Errorf("skipping Kubernetes upgrades: %w", err))
default:
return fmt.Errorf("updating Kubernetes version: %w", err)
}
}

if len(upgradeErrs) == 2 {
return errors.Join(upgradeErrs...)
}

err = k.isValidImageUpgrade(nodeVersion, imageVersion.Version(), force)
switch {
case errors.As(err, &upgradeErr):
return fmt.Errorf("skipping image upgrades: %w", err)
case err != nil:
return fmt.Errorf("updating image version: %w", err)
}
k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s\n", nodeVersion.Spec.ImageVersion, imageVersion.Version())
nodeVersion.Spec.ImageReference = imageReference
nodeVersion.Spec.ImageVersion = imageVersion.Version()
updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion)
if err != nil {
return fmt.Errorf("applying upgrade: %w", err)
}

if err := checkForApplyError(nodeVersion, updatedNodeVersion); err != nil {
return err
}
return errors.Join(upgradeErrs...)
return checkForApplyError(nodeVersion, updatedNodeVersion)
}

// ClusterStatus returns a map from node name to NodeStatus.
Expand Down Expand Up @@ -432,7 +426,7 @@ func (k *KubeCmd) updateK8s(nodeVersion *updatev1alpha1.NodeVersion, newClusterV
}
}

k.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
k.log.Debugf("Updating local copy of nodeVersion Kubernetes version from %s to %s\n", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion)
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion

Expand Down
Loading