From 18441d7df82049c8ab5d3d6274a59d637acd8514 Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 20 Nov 2024 13:09:03 +0100 Subject: [PATCH] Add v1beta2 conditions to CABPK --- .../api/v1beta1/v1beta2_condition_consts.go | 38 ++++- .../controllers/kubeadmconfig_controller.go | 149 +++++++++++++++++- 2 files changed, 181 insertions(+), 6 deletions(-) diff --git a/bootstrap/kubeadm/api/v1beta1/v1beta2_condition_consts.go b/bootstrap/kubeadm/api/v1beta1/v1beta2_condition_consts.go index 0fa17a6f25e9..b24ca777809e 100644 --- a/bootstrap/kubeadm/api/v1beta1/v1beta2_condition_consts.go +++ b/bootstrap/kubeadm/api/v1beta1/v1beta2_condition_consts.go @@ -18,16 +18,44 @@ package v1beta1 import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -// Conditions that will be used for the KubeadmConfig object in v1Beta2 API version. +// KubeadmConfig's Ready condition and corresponding reasons that will be used in v1Beta2 API version. const ( // KubeadmConfigReadyV1Beta2Condition is true if the KubeadmConfig is not deleted, // and both DataSecretCreated, CertificatesAvailable conditions are true. KubeadmConfigReadyV1Beta2Condition = clusterv1.ReadyV1Beta2Condition - // CertificatesAvailableV1Beta2Condition documents that cluster certificates required + // KubeadmConfigReadyV1Beta2Reason surfaces when the KubeadmConfig is ready. + KubeadmConfigReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + + // KubeadmConfigNotReadyV1Beta2Reason surfaces when the KubeadmConfig is not ready. + KubeadmConfigNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + + // KubeadmConfigReadyUnknownV1Beta2Reason surfaces when KubeadmConfig readiness is unknown. + KubeadmConfigReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason +) + +// KubeadmConfig's CertificatesAvailable condition and corresponding reasons that will be used in v1Beta2 API version. +const ( + // KubeadmConfigCertificatesAvailableV1Beta2Condition documents that cluster certificates required // for generating the bootstrap data secret are available. - CertificatesAvailableV1Beta2Condition = "CertificatesAvailable" + KubeadmConfigCertificatesAvailableV1Beta2Condition = "CertificatesAvailable" + + // KubeadmConfigCertificatesAvailableV1Beta2Reason surfaces when certificates required for machine bootstrap are is available. + KubeadmConfigCertificatesAvailableV1Beta2Reason = clusterv1.AvailableV1Beta2Reason + + // KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason surfaces unexpected failures when reading or + // generating certificates required for machine bootstrap. + KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason = clusterv1.InternalErrorV1Beta2Reason +) + +// KubeadmConfig's DataSecretAvailable condition and corresponding reasons that will be used in v1Beta2 API version. +const ( + // KubeadmConfigDataSecretAvailableV1Beta2Condition is true if the bootstrap secret is available. + KubeadmConfigDataSecretAvailableV1Beta2Condition = "DataSecretAvailable" + + // KubeadmConfigDataSecretAvailableV1Beta2Reason surfaces when the bootstrap secret is available. + KubeadmConfigDataSecretAvailableV1Beta2Reason = clusterv1.AvailableV1Beta2Reason - // DataSecretAvailableV1Beta2Condition is true if the bootstrap secret is available. - DataSecretAvailableV1Beta2Condition = "DataSecretAvailable" + // KubeadmConfigDataSecretNotAvailableV1Beta2Reason surfaces when the bootstrap secret is not available. + KubeadmConfigDataSecretNotAvailableV1Beta2Reason = clusterv1.NotAvailableV1Beta2Reason ) diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index b9a6ea250e7b..f44295c25160 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -56,6 +56,7 @@ import ( "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/paused" @@ -229,8 +230,39 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques bootstrapv1.CertificatesAvailableCondition, ), ) + if err := v1beta2conditions.SetSummaryCondition(config, config, bootstrapv1.KubeadmConfigReadyV1Beta2Condition, + v1beta2conditions.ForConditionTypes{ + bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + }, + // Using a custom merge strategy to override reasons applied during merge and to ignore some + // info message so the ready condition aggregation in other resources is less noisy. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + // Use custom reasons. + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + bootstrapv1.KubeadmConfigNotReadyV1Beta2Reason, + bootstrapv1.KubeadmConfigReadyUnknownV1Beta2Reason, + bootstrapv1.KubeadmConfigReadyV1Beta2Reason, + )), + ), + }, + ); err != nil { + rerr = kerrors.NewAggregate([]error{rerr, err}) + } // Patch ObservedGeneration only if the reconciliation completed successfully - patchOpts := []patch.Option{} + patchOpts := []patch.Option{ + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + bootstrapv1.DataSecretAvailableCondition, + bootstrapv1.CertificatesAvailableCondition, + }}, + patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + bootstrapv1.KubeadmConfigReadyV1Beta2Condition, + bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + }}, + } if rerr == nil { patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) } @@ -264,6 +296,12 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c case !cluster.Status.InfrastructureReady: log.Info("Cluster infrastructure is not ready, waiting") conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Waiting for Cluster status.infrastructureReady to be true", + }) return ctrl.Result{}, nil // Reconcile status for machines that already have a secret reference, but our status isn't up to date. // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. @@ -271,6 +309,11 @@ func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, c config.Status.Ready = true config.Status.DataSecretName = configOwner.DataSecretName() conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Reason, + }) return ctrl.Result{}, nil // Status is ready means a config has been generated. case config.Status.Ready: @@ -402,6 +445,12 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // using the DataSecretGeneratedFailedReason if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Waiting for Cluster control plane to be initialized", + }) } // if it's NOT a control plane machine, requeue @@ -505,10 +554,21 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex } if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, + Message: "Please check controller logs for errors", + }) return ctrl.Result{}, err } conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, + }) verbosityFlag := "" if scope.Config.Spec.Verbosity != nil { @@ -518,12 +578,24 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex files, err := r.resolveFiles(ctx, scope.Config) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to read content from secrets for spec.files", + }) return ctrl.Result{}, err } users, err := r.resolveUsers(ctx, scope.Config) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to read password from secrets for spec.users", + }) return ctrl.Result{}, err } @@ -580,13 +652,30 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) ) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, + Message: "Please check controller logs for errors", + }) return ctrl.Result{}, err } if err := certificates.EnsureAllExist(); err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, + Message: "Please check controller logs for errors", + }) return ctrl.Result{}, err } conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, + }) // Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster. if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config, certificates); err != nil { @@ -630,12 +719,24 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) files, err := r.resolveFiles(ctx, scope.Config) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to read content from secrets for spec.files", + }) return ctrl.Result{}, err } users, err := r.resolveUsers(ctx, scope.Config) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to read password from secrets for spec.users", + }) return ctrl.Result{}, err } @@ -643,6 +744,12 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) kubeconfig, err := r.resolveDiscoveryKubeConfig(discoveryFile) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to create kubeconfig for spec.joinConfiguration.discovery.file", + }) return ctrl.Result{}, err } files = append(files, *kubeconfig) @@ -709,14 +816,31 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S ) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, + Message: "Please check controller logs for errors", + }) return ctrl.Result{}, err } if err := certificates.EnsureAllExist(); err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableInternalErrorV1Beta2Reason, + Message: "Please check controller logs for errors", + }) return ctrl.Result{}, err } conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: bootstrapv1.KubeadmConfigCertificatesAvailableV1Beta2Reason, + }) // Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster. if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config, certificates); err != nil { @@ -747,12 +871,24 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S files, err := r.resolveFiles(ctx, scope.Config) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to read content from secrets for spec.files", + }) return ctrl.Result{}, err } users, err := r.resolveUsers(ctx, scope.Config) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to read password from secrets for spec.users", + }) return ctrl.Result{}, err } @@ -760,6 +896,12 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S kubeconfig, err := r.resolveDiscoveryKubeConfig(discoveryFile) if err != nil { conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: bootstrapv1.KubeadmConfigDataSecretNotAvailableV1Beta2Reason, + Message: "Failed to create kubeconfig for spec.joinConfiguration.discovery.file", + }) return ctrl.Result{}, err } files = append(files, *kubeconfig) @@ -1235,6 +1377,11 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope scope.Config.Status.DataSecretName = ptr.To(secret.Name) scope.Config.Status.Ready = true conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) + v1beta2conditions.Set(scope.Config, metav1.Condition{ + Type: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: bootstrapv1.KubeadmConfigDataSecretAvailableV1Beta2Reason, + }) return nil }