From 314f72d46d3bd22bc1a18ddd3716d4511c452a04 Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Thu, 24 Oct 2024 11:37:08 -0700 Subject: [PATCH 1/5] Implemented the unmanaged cluster CRD for adopting existing k8s clusters --- Makefile | 16 +- PROJECT | 18 + api/v1alpha1/unmanagedcluster_types.go | 95 ++++ api/v1alpha1/unmanagedmachine_types.go | 69 +++ api/v1alpha1/zz_generated.deepcopy.go | 199 +++++++ cmd/main.go | 20 + go.mod | 1 + go.sum | 2 + internal/controller/suite_test.go | 3 + .../controller/unmanagedcluster_controller.go | 505 ++++++++++++++++++ .../unmanagedcluster_controller_test.go | 162 ++++++ .../controller/unmanagedmachine_controller.go | 117 ++++ .../unmanagedmachine_controller_test.go | 155 ++++++ internal/webhook/unmanagedcluster_webhook.go | 105 ++++ .../webhook/unmanagedcluster_webhook_test.go | 103 ++++ templates/provider/hmc/templates/_helpers.tpl | 1 + .../hmc.mirantis.com_unmanagedclusters.yaml | 168 ++++++ .../hmc.mirantis.com_unmanagedmachines.yaml | 127 +++++ .../rbac/controller/rolebindings.yaml | 17 + .../hmc/templates/rbac/controller/roles.yaml | 37 ++ .../user-facing/unmanagedcluster-editor.yaml | 13 + .../user-facing/unmanagedcluster-viewer.yaml | 13 + .../provider/hmc/templates/webhooks.yaml | 22 + .../unmanagedcluster/unmanagedcluster.go | 41 ++ 24 files changed, 2007 insertions(+), 2 deletions(-) create mode 100644 api/v1alpha1/unmanagedcluster_types.go create mode 100644 api/v1alpha1/unmanagedmachine_types.go create mode 100644 internal/controller/unmanagedcluster_controller.go create mode 100644 internal/controller/unmanagedcluster_controller_test.go create mode 100644 internal/controller/unmanagedmachine_controller.go create mode 100644 internal/controller/unmanagedmachine_controller_test.go create mode 100644 internal/webhook/unmanagedcluster_webhook.go create mode 100644 internal/webhook/unmanagedcluster_webhook_test.go create mode 100644 templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml create mode 100644 templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml create mode 100644 test/objects/unmanagedcluster/unmanagedcluster.go diff --git a/Makefile b/Makefile index ea0f40c1b..a51d417e5 100644 --- a/Makefile +++ b/Makefile @@ -400,8 +400,12 @@ FLUX_SOURCE_REPO_NAME ?= source-helmrepositories FLUX_SOURCE_REPO_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_SOURCE_REPO_NAME)-$(FLUX_SOURCE_VERSION).yaml FLUX_SOURCE_CHART_NAME ?= source-helmchart FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_SOURCE_CHART_NAME)-$(FLUX_SOURCE_VERSION).yaml - FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version') +FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml +CAPI_VERSION ?= v1.8.4 +CAPI_CRD ?= $(EXTERNAL_CRD_DIR)/capi-$(CAPI_VERSION).yaml +K0SMOTRON_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/k0sproject/k0smotron") | .Version') +K0SMOTRON_CRD ?= $(EXTERNAL_CRD_DIR)/k0smotron-$(K0SMOTRON_VERSION).yaml FLUX_HELM_NAME ?= helm FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_HELM_NAME)-$(FLUX_HELM_VERSION).yaml @@ -479,8 +483,16 @@ $(SVELTOS_CRD): | yq $(EXTERNAL_CRD_DIR) rm -f $(EXTERNAL_CRD_DIR)/$(SVELTOS_NAME)* curl -s --fail https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD) +$(K0SMOTRON_CRD): $(EXTERNAL_CRD_DIR) + rm -f $(K0SMOTRON_CRD) + curl -s https://raw.githubusercontent.com/k0sproject/k0smotron/$(K0SMOTRON_VERSION)/config/crd/bases/infrastructure.cluster.x-k8s.io_remoteclusters.yaml > $(K0SMOTRON_CRD) + +$(CAPI_CRD): $(EXTERNAL_CRD_DIR) + rm -f $(CAPI_CRD) + curl -s https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_clusters.yaml > $(CAPI_CRD) + .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(K0SMOTRON_CRD) $(CAPI_CRD) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/PROJECT b/PROJECT index 1320ee237..7efe4c378 100644 --- a/PROJECT +++ b/PROJECT @@ -101,4 +101,22 @@ resources: kind: MultiClusterService path: github.com/Mirantis/hmc/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: hmc.mirantis.com + group: hmc.mirantis.com + kind: UnmanagedCluster + path: github.com/Mirantis/hmc/api/v1alpha + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: hmc.mirantis.com + group: hmc.mirantis.com + kind: UnmanagedMachine + path: github.com/Mirantis/hmc/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/unmanagedcluster_types.go b/api/v1alpha1/unmanagedcluster_types.go new file mode 100644 index 000000000..9fbe76b79 --- /dev/null +++ b/api/v1alpha1/unmanagedcluster_types.go @@ -0,0 +1,95 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +const ( + UnmanagedClusterKind = "UnmanagedCluster" + UnmanagedClusterFinalizer = "hmc.mirantis.com/unmanage-dcluster" + AllNodesCondition = "AllNodesCondition" + NodeCondition = "NodeCondition" + HelmChart = "HelmChart" +) + +// UnmanagedClusterSpec defines the desired state of UnmanagedCluster +type UnmanagedClusterSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Name string `json:"name,omitempty"` + // Services is a list of services created via ServiceTemplates + // that could be installed on the target cluster. + Services []ServiceSpec `json:"services,omitempty"` + + // ServicesPriority sets the priority for the services defined in this spec. + // Higher value means higher priority and lower means lower. + // In case of conflict with another object managing the service, + // the one with higher priority will get to deploy its services. + ServicesPriority int32 `json:"servicesPriority,omitempty"` + // DryRun specifies whether the template should be applied after validation or only validated. + // DryRun bool `json:"dryRun,omitempty"` + + // +kubebuilder:default:=false + + // StopOnConflict specifies what to do in case of a conflict. + // E.g. If another object is already managing a service. + // By default the remaining services will be deployed even if conflict is detected. + // If set to true, the deployment will stop after encountering the first conflict. + StopOnConflict bool `json:"stopOnConflict,omitempty"` +} + +// UnmanagedClusterStatus defines the observed state of UnmanagedCluster +type UnmanagedClusterStatus struct { + // Flag indicating whether the unmanaged cluster is in the ready state or not + Ready bool `json:"ready"` + + // Conditions contains details for the current state of the ManagedCluster. + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:metadata:labels=cluster.x-k8s.io/v1beta1=v1alpha1 +// UnmanagedCluster is the Schema for the unmanagedclusters API +type UnmanagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UnmanagedClusterSpec `json:"spec,omitempty"` + Status UnmanagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UnmanagedClusterList contains a list of UnmanagedCluster +type UnmanagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UnmanagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&UnmanagedCluster{}, &UnmanagedClusterList{}) +} + +func (in *UnmanagedCluster) GetConditions() *[]metav1.Condition { + return &in.Status.Conditions +} diff --git a/api/v1alpha1/unmanagedmachine_types.go b/api/v1alpha1/unmanagedmachine_types.go new file mode 100644 index 000000000..45e2025d6 --- /dev/null +++ b/api/v1alpha1/unmanagedmachine_types.go @@ -0,0 +1,69 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// UnmanagedMachineSpec defines the desired state of UnmanagedMachine +type UnmanagedMachineSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + ProviderID string `json:"providerID,omitempty"` + ClusterName string `json:"clusterName,omitempty"` +} + +// UnmanagedMachineStatus defines the observed state of UnmanagedMachine +type UnmanagedMachineStatus struct { + // Flag indicating whether the machine is in the ready state or not + Ready bool `json:"ready,omitempty"` + // Conditions contains details for the current state of the ManagedCluster + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status" +// +kubebuilder:metadata:labels=cluster.x-k8s.io/v1beta1=v1alpha1 + +// UnmanagedMachine is the Schema for the unmanagedmachines API +type UnmanagedMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UnmanagedMachineSpec `json:"spec,omitempty"` + Status UnmanagedMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UnmanagedMachineList contains a list of UnmanagedMachine +type UnmanagedMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UnmanagedMachine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&UnmanagedMachine{}, &UnmanagedMachineList{}) +} + +func (in *UnmanagedMachine) GetConditions() *[]metav1.Condition { + return &in.Status.Conditions +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 59bcaa4e2..674102e04 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1484,3 +1484,202 @@ func (in *TemplateValidationStatus) DeepCopy() *TemplateValidationStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedCluster) DeepCopyInto(out *UnmanagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedCluster. +func (in *UnmanagedCluster) DeepCopy() *UnmanagedCluster { + if in == nil { + return nil + } + out := new(UnmanagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedClusterList) DeepCopyInto(out *UnmanagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UnmanagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterList. +func (in *UnmanagedClusterList) DeepCopy() *UnmanagedClusterList { + if in == nil { + return nil + } + out := new(UnmanagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedClusterSpec) DeepCopyInto(out *UnmanagedClusterSpec) { + *out = *in + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServiceSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterSpec. +func (in *UnmanagedClusterSpec) DeepCopy() *UnmanagedClusterSpec { + if in == nil { + return nil + } + out := new(UnmanagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedClusterStatus) DeepCopyInto(out *UnmanagedClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterStatus. +func (in *UnmanagedClusterStatus) DeepCopy() *UnmanagedClusterStatus { + if in == nil { + return nil + } + out := new(UnmanagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachine) DeepCopyInto(out *UnmanagedMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachine. +func (in *UnmanagedMachine) DeepCopy() *UnmanagedMachine { + if in == nil { + return nil + } + out := new(UnmanagedMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachineList) DeepCopyInto(out *UnmanagedMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UnmanagedMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachineList. +func (in *UnmanagedMachineList) DeepCopy() *UnmanagedMachineList { + if in == nil { + return nil + } + out := new(UnmanagedMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachineSpec) DeepCopyInto(out *UnmanagedMachineSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachineSpec. +func (in *UnmanagedMachineSpec) DeepCopy() *UnmanagedMachineSpec { + if in == nil { + return nil + } + out := new(UnmanagedMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachineStatus) DeepCopyInto(out *UnmanagedMachineStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachineStatus. +func (in *UnmanagedMachineStatus) DeepCopy() *UnmanagedMachineStatus { + if in == nil { + return nil + } + out := new(UnmanagedMachineStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index 5ee887fb9..4e0553b42 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -302,6 +302,21 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "MultiClusterService") os.Exit(1) } + if err = (&controller.UnmanagedClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "UnmanagedCluster") + os.Exit(1) + } + + if err = (&controller.UnmanagedMachineReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "UnmanagedMachine") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { @@ -364,5 +379,10 @@ func setupWebhooks(mgr ctrl.Manager, currentNamespace string) error { setupLog.Error(err, "unable to create webhook", "webhook", "ProviderTemplate") return err } + setupLog.Info("setup UnmanagedClusterValidator webhook") + if err := (&hmcwebhook.UnmanagedClusterValidator{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "UnmanagedCluster") + return err + } return nil } diff --git a/go.mod b/go.mod index 58a77e3a4..d073577ac 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/fluxcd/source-controller/api v1.4.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 + github.com/k0sproject/k0smotron v1.1.2 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 diff --git a/go.sum b/go.sum index 3f0317f23..a39032154 100644 --- a/go.sum +++ b/go.sum @@ -291,6 +291,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0sproject/k0smotron v1.1.2 h1:5jyGugN37Yk64pd/YTcuJwfBAVUx820MGI7zEeNdlRI= +github.com/k0sproject/k0smotron v1.1.2/go.mod h1:TZVJaCTigFGpKpUkpfIsWPSkpCLAYf73420bI9Gt6n8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index e805f260c..37f7f0959 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -176,6 +176,9 @@ var _ = BeforeSuite(func() { err = (&hmcwebhook.ProviderTemplateValidator{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) + err = (&hmcwebhook.UnmanagedClusterValidator{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + go func() { defer GinkgoRecover() err = mgr.Start(ctx) diff --git a/internal/controller/unmanagedcluster_controller.go b/internal/controller/unmanagedcluster_controller.go new file mode 100644 index 000000000..e11e7172c --- /dev/null +++ b/internal/controller/unmanagedcluster_controller.go @@ -0,0 +1,505 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + "net/url" + "strconv" + "strings" + + v1beta12 "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" + "github.com/projectsveltos/libsveltos/lib/clusterproxy" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/internal/sveltos" +) + +// UnmanagedClusterReconciler reconciles a UnmanagedCluster object +type UnmanagedClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the UnmanagedCluster object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile +func (r *UnmanagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + l.Info("Reconciling UnmanagedCluster") + + if err := v1beta12.AddToScheme(r.Client.Scheme()); err != nil { + return ctrl.Result{}, err + } + + if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { + return ctrl.Result{}, err + } + + unmanagedCluster := new(hmc.UnmanagedCluster) + if err := r.Get(ctx, req.NamespacedName, unmanagedCluster); err != nil { + if apierrors.IsNotFound(err) { + l.Info("UnmanagedCluster not found, ignoring since object must be deleted") + return ctrl.Result{}, nil + } + l.Error(err, "Failed to get UnmanagedCluster") + return ctrl.Result{}, err + } + + if controllerutil.AddFinalizer(unmanagedCluster, hmc.UnmanagedClusterFinalizer) { + if err := r.Client.Update(ctx, unmanagedCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update UnmanagedCluster %s with finalizer %s: %w", unmanagedCluster.Name, hmc.UnmanagedClusterFinalizer, err) + } + } + return r.reconcileUnmanagedCluster(ctx, unmanagedCluster) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *UnmanagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&hmc.UnmanagedCluster{}). + Complete(r) +} + +func (r *UnmanagedClusterReconciler) getControlPlaneEndpoint(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (v1beta1.APIEndpoint, error) { + bytes, err := kubeconfig.FromSecret(ctx, r.Client, client.ObjectKey{ + Namespace: unmanagedCluster.Namespace, + Name: unmanagedCluster.Name, + }) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("failed to get cluster kubeconfig secret: %w", err) + } + + config, err := clientcmd.RESTConfigFromKubeConfig(bytes) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("failed to get rest config from kube config secret: %w", err) + } + + hostURL, err := url.Parse(config.Host) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("kube config secret contains invalid host: %w", err) + } + + portNumber, err := strconv.Atoi(hostURL.Port()) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("kube config secret contains invalid port: %w", err) + } + return v1beta1.APIEndpoint{Host: hostURL.Hostname(), Port: int32(portNumber)}, nil +} + +func (r *UnmanagedClusterReconciler) createCluster(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) error { + controlPlaneEndPoint, err := r.getControlPlaneEndpoint(ctx, unmanagedCluster) + if err != nil { + return fmt.Errorf("failed to get control plane endpoint: %w", err) + } + + clusterObject := &v1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedCluster.Name, + Namespace: unmanagedCluster.Namespace, + Labels: map[string]string{ + "helm.toolkit.fluxcd.io/name": unmanagedCluster.Name, + "helm.toolkit.fluxcd.io/namespace": unmanagedCluster.Namespace, + }, + }, + Spec: v1beta1.ClusterSpec{ + ControlPlaneEndpoint: controlPlaneEndPoint, + InfrastructureRef: &corev1.ObjectReference{ + Kind: "UnmanagedCluster", + Namespace: unmanagedCluster.Namespace, + Name: unmanagedCluster.Name, + APIVersion: unmanagedCluster.APIVersion, + }, + }, + } + clusterObject.Status.SetTypedPhase(v1beta1.ClusterPhaseUnknown) + err = r.Client.Create(ctx, clusterObject) + if err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create unmanagedCluster object %s/%s: %s", unmanagedCluster.Namespace, unmanagedCluster.Name, err) + } + + return nil +} + +func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) error { + l := ctrl.LoggerFrom(ctx) + + nodelist, err := r.getNodeList(ctx, unmanagedCluster) + if err != nil { + return err + } + + kubeConfigSecretName := secret.Name(unmanagedCluster.Name, secret.Kubeconfig) + + // find any existing unmanaged machines for the cluster to see if any need to be cleaned up because + // the underlying node was removed + existingMachines := &hmc.UnmanagedMachineList{} + if err := r.List(ctx, existingMachines, &client.ListOptions{ + Namespace: unmanagedCluster.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{v1beta1.ClusterNameLabel: unmanagedCluster.Name}), + }); err != nil { + return fmt.Errorf("failed to list existing unmanaged machines: %w", err) + } + + existingMachinesByName := map[string]*hmc.UnmanagedMachine{} + for _, existingMachine := range existingMachines.Items { + existingMachinesByName[existingMachine.GetName()] = &existingMachine + } + + for _, node := range nodelist.Items { + delete(existingMachinesByName, node.Name) + + unmanagedMachine := hmc.UnmanagedMachine{ + TypeMeta: metav1.TypeMeta{ + Kind: "UnmanagedMachine", + APIVersion: hmc.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + Namespace: unmanagedCluster.Namespace, + Labels: map[string]string{ + v1beta1.ClusterNameLabel: unmanagedCluster.Name, + }, + }, + Spec: hmc.UnmanagedMachineSpec{ + ProviderID: node.Spec.ProviderID, + ClusterName: unmanagedCluster.Name, + }, + Status: hmc.UnmanagedMachineStatus{ + Ready: true, + }, + } + + err := r.Create(ctx, &unmanagedMachine) + if err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create machine: %w", err) + } + + ref := types.NamespacedName{Name: unmanagedMachine.Name, Namespace: unmanagedMachine.Namespace} + if err := r.Get(ctx, ref, &unmanagedMachine); err != nil { + return fmt.Errorf("failed to get unmanaged machine: %w", err) + } + unmanagedMachine.Status = hmc.UnmanagedMachineStatus{ + Ready: true, + } + if err := r.Status().Update(ctx, &unmanagedMachine); err != nil { + return fmt.Errorf("failed to update unmanaged machine status: %w", err) + } + + l.Info("Create machine", "node", node.Name) + machine := v1beta1.Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: "Machine", + APIVersion: v1beta1.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + Namespace: unmanagedCluster.Namespace, + Labels: map[string]string{v1beta1.GroupVersion.Identifier(): hmc.GroupVersion.Version, v1beta1.ClusterNameLabel: unmanagedCluster.Name}, + }, + Spec: v1beta1.MachineSpec{ + ClusterName: unmanagedCluster.Name, + Bootstrap: v1beta1.Bootstrap{ + DataSecretName: &kubeConfigSecretName, + }, + InfrastructureRef: corev1.ObjectReference{ + Kind: "UnmanagedMachine", + Namespace: unmanagedCluster.Namespace, + Name: node.Name, + APIVersion: hmc.GroupVersion.Identifier(), + }, + ProviderID: &node.Spec.ProviderID, + }, + Status: v1beta1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Kind: "Node", + Name: node.Name, + APIVersion: "v1", + }, + NodeInfo: &corev1.NodeSystemInfo{}, + CertificatesExpiryDate: nil, + BootstrapReady: true, + InfrastructureReady: true, + }, + } + + if _, ok := node.Labels[v1beta1.NodeRoleLabelPrefix+"/control-plane"]; ok { + if machine.Labels == nil { + machine.Labels = make(map[string]string) + } + machine.Labels[v1beta1.MachineControlPlaneLabel] = "true" + } + err = r.Create(ctx, &machine) + if err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create machine: %w", err) + } + } + + // cleanup any orphaned unmanaged machines and capi machines + for _, existingUnmanagedMachine := range existingMachinesByName { + if err := r.Delete(ctx, existingUnmanagedMachine); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned unmanaged machine: %w", err) + } + + if err := r.Delete(ctx, &v1beta1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: existingUnmanagedMachine.Name, + Namespace: unmanagedCluster.Namespace, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned machine: %w", err) + } + } + return nil +} + +func (r *UnmanagedClusterReconciler) getNodeList(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (*corev1.NodeList, error) { + l := ctrl.LoggerFrom(ctx) + clusterClient, err := clusterproxy.GetCAPIKubernetesClient(ctx, l, r.Client, r.Client.Scheme(), unmanagedCluster.Namespace, unmanagedCluster.Name) + if err != nil { + return nil, fmt.Errorf("failed to connect to remote cluster: %w", err) + } + + nodelist := &corev1.NodeList{} + if err := clusterClient.List(ctx, nodelist); err != nil { + return nil, fmt.Errorf("failed to list cluster nodes: %w", err) + } + return nodelist, nil +} + +func (r *UnmanagedClusterReconciler) reconcileUnmanagedCluster(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + if !unmanagedCluster.DeletionTimestamp.IsZero() { + l.Info("Deleting UnmanagedCluster") + return r.reconcileDeletion(ctx, unmanagedCluster) + } + + if err := r.createCluster(ctx, unmanagedCluster); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if err := r.createServices(ctx, unmanagedCluster); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if err := r.createMachines(ctx, unmanagedCluster); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + requeue, err := r.updateStatus(ctx, unmanagedCluster) + if err != nil { + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil + } + return ctrl.Result{}, nil +} + +func (r *UnmanagedClusterReconciler) createServices(ctx context.Context, mc *hmc.UnmanagedCluster) error { + opts, err := helmChartOpts(ctx, r.Client, mc.Namespace, mc.Spec.Services) + if err != nil { + return err + } + + if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name, + sveltos.ReconcileProfileOpts{ + OwnerReference: &metav1.OwnerReference{ + APIVersion: hmc.GroupVersion.String(), + Kind: hmc.UnmanagedClusterKind, + Name: mc.Name, + UID: mc.UID, + }, + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + hmc.FluxHelmChartNamespaceKey: mc.Namespace, + hmc.FluxHelmChartNameKey: mc.Name, + }, + }, + HelmChartOpts: opts, + Priority: mc.Spec.ServicesPriority, + StopOnConflict: mc.Spec.StopOnConflict, + }); err != nil { + return fmt.Errorf("failed to reconcile Profile: %w", err) + } + + return nil +} + +func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (ctrl.Result, error) { + clusterLabel := map[string]string{v1beta1.ClusterNameLabel: unmanagedCluster.Name} + deleteAllOpts := []client.DeleteAllOfOption{ + client.InNamespace(unmanagedCluster.Namespace), + client.MatchingLabels(clusterLabel), + } + + if err := r.DeleteAllOf( + ctx, + &hmc.UnmanagedMachine{}, + deleteAllOpts..., + ); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + } + + if err := r.DeleteAllOf( + ctx, + &v1beta1.Machine{}, + deleteAllOpts..., + ); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + } + + if err := r.Delete(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: unmanagedCluster.Namespace, + Name: secret.Name(unmanagedCluster.Name, secret.Kubeconfig), + Labels: map[string]string{ + v1beta1.ClusterNameLabel: unmanagedCluster.Name, + }, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster secret: %w", err) + } + + if err := r.Delete(ctx, &v1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: unmanagedCluster.Namespace, + Name: unmanagedCluster.Name, + Labels: map[string]string{ + v1beta1.ClusterNameLabel: unmanagedCluster.Name, + }, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster: %w", err) + } + + if controllerutil.RemoveFinalizer(unmanagedCluster, hmc.UnmanagedClusterFinalizer) { + if err := r.Client.Update(ctx, unmanagedCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer %s from UnmanagedCluster %s: %w", + hmc.UnmanagedClusterFinalizer, unmanagedCluster.Name, err) + } + } + return ctrl.Result{}, nil +} + +func (r *UnmanagedClusterReconciler) updateStatus(ctx context.Context, cluster *hmc.UnmanagedCluster) (bool, error) { + requeue := false + nodelist, err := r.getNodeList(ctx, cluster) + if err != nil { + return true, err + } + + allNodeCondition := metav1.Condition{ + Type: hmc.AllNodesCondition, + Status: "True", + Message: "All nodes are ready", + Reason: hmc.SucceededReason, + } + + cluster.Status.Ready = true + var nonReadyNodes []string + for _, node := range nodelist.Items { + for _, nodeCondition := range node.Status.Conditions { + if nodeCondition.Type == corev1.NodeReady { + if nodeCondition.Status != corev1.ConditionTrue { + allNodeCondition.Status = metav1.ConditionFalse + allNodeCondition.Reason = hmc.FailedReason + nonReadyNodes = append(nonReadyNodes, node.Name) + requeue = true + cluster.Status.Ready = false + } + } + } + } + + if len(nonReadyNodes) > 0 { + allNodeCondition.Message = fmt.Sprintf("Nodes %s are not ready", strings.Join(nonReadyNodes, ",")) + } + apimeta.SetStatusCondition(cluster.GetConditions(), allNodeCondition) + + if len(cluster.Spec.Services) > 0 { + sveltosClusterSummaries := &sveltosv1beta1.ClusterSummaryList{} + if err := r.List(ctx, sveltosClusterSummaries, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{sveltosv1beta1.ClusterNameLabel: cluster.Name}), + }); err != nil { + return true, fmt.Errorf("failed to list sveltos cluster summary: %w", err) + } + + if len(sveltosClusterSummaries.Items) > 0 { + var failedCharts []string + + helmCondition := metav1.Condition{ + Type: hmc.HelmChart, + Reason: hmc.SucceededReason, + Status: metav1.ConditionTrue, + } + + for _, clusterSummary := range sveltosClusterSummaries.Items { + for _, helmReleaseSummary := range clusterSummary.Status.HelmReleaseSummaries { + if helmReleaseSummary.Status != sveltosv1beta1.HelmChartStatusManaging { + helmCondition.Reason = hmc.FailedReason + helmCondition.Status = metav1.ConditionFalse + requeue = true + failedCharts = append(failedCharts, helmReleaseSummary.ReleaseName) + } + } + } + + if len(failedCharts) > 0 { + helmCondition.Message = "Charts failed to deploy " + strings.Join(failedCharts, ",") + } + apimeta.SetStatusCondition(cluster.GetConditions(), helmCondition) + } else { + requeue = true + } + } + + if err := r.Status().Update(ctx, cluster); err != nil { + return true, fmt.Errorf("failed to update unmanaged cluster status: %w", err) + } + + return requeue, nil +} diff --git a/internal/controller/unmanagedcluster_controller_test.go b/internal/controller/unmanagedcluster_controller_test.go new file mode 100644 index 000000000..1c404b59b --- /dev/null +++ b/internal/controller/unmanagedcluster_controller_test.go @@ -0,0 +1,162 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + capi "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +var _ = Describe("UnmanagedCluster Controller", func() { + Context("When reconciling a resource", func() { + const ( + unmanagedClusterName = "test-managed-cluster" + unmanagedClusterNamespace = "default" + ) + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + } + unmanagedcluster := &hmc.UnmanagedCluster{} + + BeforeEach(func() { + Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) + By("creating the custom resource for the Kind UnmanagedCluster") + + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + Labels: map[string]string{capi.ClusterNameLabel: unmanagedClusterName}, + }, + Data: map[string][]byte{secret.KubeconfigDataName: generateTestKubeConfig()}, + } + + err := k8sClient.Get(ctx, types.NamespacedName{Name: secretName, Namespace: unmanagedClusterNamespace}, secret) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + err = k8sClient.Get(ctx, typeNamespacedName, unmanagedcluster) + if err != nil && errors.IsNotFound(err) { + resource := &hmc.UnmanagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + Spec: hmc.UnmanagedClusterSpec{ + Name: unmanagedClusterName, + Services: nil, + ServicesPriority: 1, + StopOnConflict: true, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &hmc.UnmanagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance UnmanagedCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + Expect(k8sClient.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + }})).To(Succeed()) + + Expect(k8sClient.Delete(ctx, + &capi.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + })).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &UnmanagedClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) + +func generateTestKubeConfig() []byte { + GinkgoHelper() + clusters := make(map[string]*api.Cluster) + clusters["default-cluster"] = &api.Cluster{ + Server: cfg.Host, + CertificateAuthorityData: cfg.CAData, + } + contexts := make(map[string]*api.Context) + contexts["default-context"] = &api.Context{ + Cluster: "default-cluster", + AuthInfo: "default-user", + } + authinfos := make(map[string]*api.AuthInfo) + authinfos["default-user"] = &api.AuthInfo{ + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + } + clientConfig := api.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: clusters, + Contexts: contexts, + CurrentContext: "default-context", + AuthInfos: authinfos, + } + + kubecfg, err := clientcmd.Write(clientConfig) + Expect(err).NotTo(HaveOccurred()) + return kubecfg +} diff --git a/internal/controller/unmanagedmachine_controller.go b/internal/controller/unmanagedmachine_controller.go new file mode 100644 index 000000000..988b046fa --- /dev/null +++ b/internal/controller/unmanagedmachine_controller.go @@ -0,0 +1,117 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + + "github.com/projectsveltos/libsveltos/lib/clusterproxy" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +// UnmanagedMachineReconciler reconciles a UnmanagedMachine object +type UnmanagedMachineReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/finalizers,verbs=update + +func (r *UnmanagedMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + + unmanagedMachine := new(hmc.UnmanagedMachine) + if err := r.Get(ctx, req.NamespacedName, unmanagedMachine); err != nil { + if apierrors.IsNotFound(err) { + l.Info("UnmanagedMachine not found, ignoring since object must be deleted") + return ctrl.Result{}, nil + } + l.Error(err, "Failed to get UnmanagedMachine") + return ctrl.Result{}, err + } + + requeue, err := r.reconcileStatus(ctx, unmanagedMachine) + if err != nil { + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil + } + return ctrl.Result{}, nil +} + +func (r *UnmanagedMachineReconciler) reconcileStatus(ctx context.Context, unmanagedMachine *hmc.UnmanagedMachine) (bool, error) { + requeue := false + + l := ctrl.LoggerFrom(ctx) + clusterClient, err := clusterproxy.GetCAPIKubernetesClient(ctx, l, r.Client, r.Client.Scheme(), unmanagedMachine.Namespace, unmanagedMachine.Spec.ClusterName) + if err != nil { + return true, fmt.Errorf("failed to connect to remote cluster: %w", err) + } + + node := &corev1.Node{} + if err := clusterClient.Get(ctx, types.NamespacedName{Name: unmanagedMachine.Name, Namespace: ""}, node); err != nil { + return true, fmt.Errorf("failed to get node :%w", err) + } + + for _, nodeCondition := range node.Status.Conditions { + if nodeCondition.Type == corev1.NodeReady { + unmanagedMachine.Status.Ready = true + machineCondition := metav1.Condition{ + Type: hmc.NodeCondition, + Status: "True", + Reason: hmc.SucceededReason, + } + + if nodeCondition.Status != corev1.ConditionTrue { + requeue = true + machineCondition.Reason = hmc.FailedReason + machineCondition.Status = "False" + unmanagedMachine.Status.Ready = false + } + apimeta.SetStatusCondition(unmanagedMachine.GetConditions(), machineCondition) + } + } + + if err := r.Status().Update(ctx, unmanagedMachine); err != nil { + return true, fmt.Errorf("failed to update unmanaged machine status: %w", err) + } + + return requeue, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *UnmanagedMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&hmc.UnmanagedMachine{}). + Complete(r) +} diff --git a/internal/controller/unmanagedmachine_controller_test.go b/internal/controller/unmanagedmachine_controller_test.go new file mode 100644 index 000000000..f8d0b2b92 --- /dev/null +++ b/internal/controller/unmanagedmachine_controller_test.go @@ -0,0 +1,155 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + capi "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +var _ = Describe("UnmanagedMachine Controller", func() { + Context("When reconciling a resource", func() { + const ( + unmanagedClusterName = "test-managed-cluster" + unmanagedClusterNamespace = "default" + unmanagedMachineName = "test-machine" + ) + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: unmanagedMachineName, + Namespace: unmanagedClusterNamespace, + } + unmanagedmachine := &hmc.UnmanagedMachine{} + + BeforeEach(func() { + By("creating the custom resource for the Kind UnmanagedCluster") + Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) + Expect(capi.AddToScheme(k8sClient.Scheme())).To(Succeed()) + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + Labels: map[string]string{capi.ClusterNameLabel: unmanagedClusterName}, + }, + Data: map[string][]byte{secret.KubeconfigDataName: generateTestKubeConfig()}, + } + + err := k8sClient.Get(ctx, types.NamespacedName{Name: secretName, Namespace: unmanagedClusterNamespace}, secret) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + cluster := &capi.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: capi.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + } + err = k8sClient.Get(ctx, typeNamespacedName, cluster) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, cluster)).To(Succeed()) + } + + By("creating the custom resource for the Kind UnmanagedMachine") + Expect(k8sClient.Create(ctx, &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachineName, + }, + })).To(Succeed()) + + err = k8sClient.Get(ctx, typeNamespacedName, unmanagedmachine) + if err != nil && errors.IsNotFound(err) { + resource := &hmc.UnmanagedMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachineName, + Namespace: "default", + }, + Spec: hmc.UnmanagedMachineSpec{ + ProviderID: unmanagedMachineName, + ClusterName: unmanagedClusterName, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &hmc.UnmanagedMachine{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance UnmanagedMachine") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + Expect(k8sClient.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + }})).To(Succeed()) + + Expect(k8sClient.Delete(ctx, + &capi.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + })).To(Succeed()) + + Expect(k8sClient.Delete(ctx, &corev1.Node{ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachineName, + Namespace: unmanagedClusterNamespace, + }})).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &UnmanagedMachineReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/internal/webhook/unmanagedcluster_webhook.go b/internal/webhook/unmanagedcluster_webhook.go new file mode 100644 index 000000000..f3e67f734 --- /dev/null +++ b/internal/webhook/unmanagedcluster_webhook.go @@ -0,0 +1,105 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhook + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hmcv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" +) + +type UnmanagedClusterValidator struct { + client.Client +} + +func (v *UnmanagedClusterValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { + v.Client = mgr.GetClient() + return ctrl.NewWebhookManagedBy(mgr). + For(&hmcv1alpha1.UnmanagedCluster{}). + WithValidator(v). + WithDefaulter(v). + Complete() +} + +var ( + _ webhook.CustomValidator = &UnmanagedClusterValidator{} + _ webhook.CustomDefaulter = &UnmanagedClusterValidator{} +) + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (v *UnmanagedClusterValidator) ValidateCreate(ctx context.Context, newObj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, newObj) +} + +func (v *UnmanagedClusterValidator) validate(ctx context.Context, newObj runtime.Object) (admission.Warnings, error) { + unmanagedCluster, ok := newObj.(*hmcv1alpha1.UnmanagedCluster) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected UnmanagedCluster but got a %T", newObj)) + } + + if !unmanagedCluster.DeletionTimestamp.IsZero() { + return nil, nil + } + + kubecfgSeccret := &corev1.Secret{} + if err := v.Client.Get(ctx, types.NamespacedName{ + Namespace: unmanagedCluster.Namespace, + Name: secret.Name(unmanagedCluster.Name, secret.Kubeconfig), + }, kubecfgSeccret); err != nil && !apierrors.IsNotFound(err) { + return nil, apierrors.NewInternalError(err) + } else if apierrors.IsNotFound(err) { + return nil, apierrors.NewBadRequest(fmt.Sprintf("required secret with name: %s not found in namespace: %s", + secret.Name(unmanagedCluster.Name, secret.Kubeconfig), unmanagedCluster.Namespace)) + } + + if _, ok := kubecfgSeccret.Data[secret.KubeconfigDataName]; !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("required secret with name: %s does not have a data item "+ + "with key %s", kubecfgSeccret.Name, secret.KubeconfigDataName)) + } + + if cluserNameLabel, ok := kubecfgSeccret.Labels[v1beta1.ClusterNameLabel]; !ok || cluserNameLabel != unmanagedCluster.Name { + return nil, apierrors.NewBadRequest(fmt.Sprintf("required secret with name: %s does not have a %s label set to: %s", + secret.Name(unmanagedCluster.Name, secret.Kubeconfig), v1beta1.ClusterNameLabel, unmanagedCluster.Name)) + } + + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (v *UnmanagedClusterValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, newObj) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (*UnmanagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (*UnmanagedClusterValidator) Default(_ context.Context, _ runtime.Object) error { + return nil +} diff --git a/internal/webhook/unmanagedcluster_webhook_test.go b/internal/webhook/unmanagedcluster_webhook_test.go new file mode 100644 index 000000000..56b132bd0 --- /dev/null +++ b/internal/webhook/unmanagedcluster_webhook_test.go @@ -0,0 +1,103 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhook + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/Mirantis/hmc/api/v1alpha1" + uc "github.com/Mirantis/hmc/test/objects/unmanagedcluster" + "github.com/Mirantis/hmc/test/scheme" +) + +func TestUnmanagedClusterValidateCreate(t *testing.T) { + const ( + testNamespace = "test-namespace" + testClusterName = "test" + ) + g := NewWithT(t) + + ctx := context.Background() + + kubecfg := "apiVersion: v1\nclusters:\n- cluster:\n certificate-authority-data: \n\tserver: https://nowhere.xyz\n" + + " name: test\ncontexts:\n- context:\n cluster: test\n user: test-admin\n name: test-admin@test\n" + + "current-context: test-admin@test\nkind: Config\npreferences: {}\nusers:\n- name: test-admin\n user:\n " + + "client-certificate-data: \n\tclient-key-data: " + + secretName := secret.Name(testClusterName, secret.Kubeconfig) + kubeSecret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, Namespace: testNamespace, + Labels: map[string]string{v1beta1.ClusterNameLabel: testClusterName}, + }, + Data: map[string][]byte{secret.KubeconfigDataName: []byte(kubecfg)}, + } + + tests := []struct { + name string + tm *v1alpha1.UnmanagedCluster + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "should fail if the required secret does not exist", + tm: uc.NewUnmanagedCluster(uc.WithNameAndNamespace(testClusterName, testNamespace)), + existingObjects: nil, + err: fmt.Sprintf("required secret with name: %s not found in namespace: %s", secretName, testNamespace), + }, + { + name: "should succeed", + tm: uc.NewUnmanagedCluster(uc.WithNameAndNamespace(testClusterName, testNamespace)), + existingObjects: []runtime.Object{kubeSecret}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithRuntimeObjects(tt.existingObjects...). + Build() + validator := &UnmanagedClusterValidator{Client: c} + warn, err := validator.ValidateCreate(ctx, tt.tm) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + if err.Error() != tt.err { + t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) + } + } else { + g.Expect(err).To(Succeed()) + } + if len(tt.warnings) > 0 { + g.Expect(warn).To(Equal(tt.warnings)) + } else { + g.Expect(warn).To(BeEmpty()) + } + }) + } +} diff --git a/templates/provider/hmc/templates/_helpers.tpl b/templates/provider/hmc/templates/_helpers.tpl index 2d9e15365..58a849572 100644 --- a/templates/provider/hmc/templates/_helpers.tpl +++ b/templates/provider/hmc/templates/_helpers.tpl @@ -96,6 +96,7 @@ hmc-webhook {{- define "rbac.editorVerbs" -}} - create - delete +- deletecollection - get - list - patch diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml new file mode 100644 index 000000000..4c8459ad8 --- /dev/null +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml @@ -0,0 +1,168 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + labels: + cluster.x-k8s.io/v1beta1: v1alpha1 + name: unmanagedclusters.hmc.mirantis.com +spec: + group: hmc.mirantis.com + names: + kind: UnmanagedCluster + listKind: UnmanagedClusterList + plural: unmanagedclusters + singular: unmanagedcluster + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: UnmanagedCluster is the Schema for the unmanagedclusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UnmanagedClusterSpec defines the desired state of UnmanagedCluster + properties: + name: + type: string + services: + description: |- + Services is a list of services created via ServiceTemplates + that could be installed on the target cluster. + items: + description: ServiceSpec represents a Service to be managed + properties: + disable: + description: Disable can be set to disable handling of this + service. + type: boolean + name: + description: Name is the chart release. + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace the release will be installed in. + It will default to Name if not provided. + type: string + template: + description: Template is a reference to a Template object located + in the same namespace. + minLength: 1 + type: string + values: + description: Values is the helm values to be passed to the template. + x-kubernetes-preserve-unknown-fields: true + required: + - name + - template + type: object + type: array + servicesPriority: + description: |- + ServicesPriority sets the priority for the services defined in this spec. + Higher value means higher priority and lower means lower. + In case of conflict with another object managing the service, + the one with higher priority will get to deploy its services. + format: int32 + type: integer + stopOnConflict: + default: false + description: |- + StopOnConflict specifies what to do in case of a conflict. + E.g. If another object is already managing a service. + By default the remaining services will be deployed even if conflict is detected. + If set to true, the deployment will stop after encountering the first conflict. + type: boolean + type: object + status: + description: UnmanagedClusterStatus defines the observed state of UnmanagedCluster + properties: + conditions: + description: Conditions contains details for the current state of + the ManagedCluster. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + ready: + description: Flag indicating whether the unmanaged cluster is in the + ready state or not + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml new file mode 100644 index 000000000..0482e8710 --- /dev/null +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml @@ -0,0 +1,127 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + labels: + cluster.x-k8s.io/v1beta1: v1alpha1 + name: unmanagedmachines.hmc.mirantis.com +spec: + group: hmc.mirantis.com + names: + kind: UnmanagedMachine + listKind: UnmanagedMachineList + plural: unmanagedmachines + singular: unmanagedmachine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Machine ready status + jsonPath: .status.ready + name: Ready + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: UnmanagedMachine is the Schema for the unmanagedmachines API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UnmanagedMachineSpec defines the desired state of UnmanagedMachine + properties: + clusterName: + type: string + providerID: + description: |- + INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + type: string + type: object + status: + description: UnmanagedMachineStatus defines the observed state of UnmanagedMachine + properties: + conditions: + description: Conditions contains details for the current state of + the ManagedCluster + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + ready: + description: Flag indicating whether the machine is in the ready state + or not + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml b/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml index d510c533e..a35d934fa 100644 --- a/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml +++ b/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml @@ -28,3 +28,20 @@ subjects: - kind: ServiceAccount name: '{{ include "hmc.fullname" . }}-controller-manager' namespace: '{{ .Release.Namespace }}' + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "hmc.fullname" . }}-capi-manager-rolebinding + namespace: {{ .Release.Namespace }} + labels: + {{- include "hmc.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "hmc.fullname" . }}-manager-role' +subjects: + - kind: ServiceAccount + name: 'capi-manager' + namespace: '{{ .Release.Namespace }}' diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 69206ee6b..b805d1bf6 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -209,6 +209,26 @@ rules: - "" resources: - secrets + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + - unmanagedclusters/status + - unmanagedmachines + - unmanagedmachines/status + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - machines + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} + - delete +- apiGroups: + - config.projectsveltos.io + resources: + - clustersummaries verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -226,3 +246,20 @@ rules: verbs: - get - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-capi-role + labels: + {{- include "hmc.labels" . | nindent 4 }} + cluster.x-k8s.io/aggregate-to-manager: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + - unmanagedclusters/status + - unmanagedmachines + - unmanagedmachines/status + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} \ No newline at end of file diff --git a/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml new file mode 100644 index 000000000..014e76e9f --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml @@ -0,0 +1,13 @@ +# permissions for end users to edit unmanagedclusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" + name: {{ include "hmc.fullname" . }}-unmanagedcluster-editor-role +rules: +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml new file mode 100644 index 000000000..2196efc69 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml @@ -0,0 +1,13 @@ +# permissions for end users to view unmanagedclusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" + name: unmanagedcluster-viewer-role +rules: +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} \ No newline at end of file diff --git a/templates/provider/hmc/templates/webhooks.yaml b/templates/provider/hmc/templates/webhooks.yaml index b0e41200e..04674dfff 100644 --- a/templates/provider/hmc/templates/webhooks.yaml +++ b/templates/provider/hmc/templates/webhooks.yaml @@ -233,4 +233,26 @@ webhooks: resources: - servicetemplatechains sideEffects: None + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: {{ include "hmc.webhook.serviceName" . }} + namespace: {{ include "hmc.webhook.serviceNamespace" . }} + path: /validate-hmc-mirantis-com-v1alpha1-unmanagedcluster + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.unmanagedcluster.hmc.mirantis.com + rules: + - apiGroups: + - hmc.mirantis.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - unmanagedclusters + sideEffects: None {{- end }} diff --git a/test/objects/unmanagedcluster/unmanagedcluster.go b/test/objects/unmanagedcluster/unmanagedcluster.go new file mode 100644 index 000000000..c796e1771 --- /dev/null +++ b/test/objects/unmanagedcluster/unmanagedcluster.go @@ -0,0 +1,41 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unmanagedcluster + +import "github.com/Mirantis/hmc/api/v1alpha1" + +type Opt func(unmanagedCluster *v1alpha1.UnmanagedCluster) + +const ( + DefaultName = "hmc-uc" +) + +func NewUnmanagedCluster(opts ...Opt) *v1alpha1.UnmanagedCluster { + uc := &v1alpha1.UnmanagedCluster{ + Spec: v1alpha1.UnmanagedClusterSpec{Name: DefaultName}, + } + + for _, opt := range opts { + opt(uc) + } + return uc +} + +func WithNameAndNamespace(name, namespace string) Opt { + return func(uc *v1alpha1.UnmanagedCluster) { + uc.Name = name + uc.Namespace = namespace + } +} From c88f05d7fde824b77a0f43edb88f622bc55fc7cf Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Tue, 12 Nov 2024 16:13:14 -0800 Subject: [PATCH 2/5] Fixed review comments. Still need to fix up RBAC issues --- Makefile | 17 +- api/v1alpha1/unmanagedcluster_types.go | 10 +- api/v1alpha1/unmanagedmachine_types.go | 11 +- go.mod | 1 - go.sum | 2 - internal/controller/suite_test.go | 3 + .../controller/unmanagedcluster_controller.go | 154 +++++------------- .../unmanagedcluster_controller_test.go | 6 - .../controller/unmanagedmachine_controller.go | 81 +++++++-- .../unmanagedmachine_controller_test.go | 3 - .../hmc.mirantis.com_unmanagedclusters.yaml | 3 +- .../hmc.mirantis.com_unmanagedmachines.yaml | 6 +- .../unmanagedcluster/unmanagedcluster.go | 19 ++- 13 files changed, 136 insertions(+), 180 deletions(-) diff --git a/Makefile b/Makefile index a51d417e5..3f6b43a9e 100644 --- a/Makefile +++ b/Makefile @@ -403,11 +403,9 @@ FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_SOURCE_CHART_NAME)-$(FLUX_SO FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version') FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml CAPI_VERSION ?= v1.8.4 -CAPI_CRD ?= $(EXTERNAL_CRD_DIR)/capi-$(CAPI_VERSION).yaml -K0SMOTRON_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/k0sproject/k0smotron") | .Version') -K0SMOTRON_CRD ?= $(EXTERNAL_CRD_DIR)/k0smotron-$(K0SMOTRON_VERSION).yaml +CAPI_REPO_NAME ?= capi +CAPI_CRD ?= $(EXTERNAL_CRD_DIR)/$(CAPI_REPO_NAME)-$(CAPI_VERSION).yaml FLUX_HELM_NAME ?= helm -FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_HELM_NAME)-$(FLUX_HELM_VERSION).yaml SVELTOS_VERSION ?= v$(shell $(YQ) -r '.appVersion' $(PROVIDER_TEMPLATES_DIR)/projectsveltos/Chart.yaml) SVELTOS_NAME ?= sveltos @@ -483,16 +481,13 @@ $(SVELTOS_CRD): | yq $(EXTERNAL_CRD_DIR) rm -f $(EXTERNAL_CRD_DIR)/$(SVELTOS_NAME)* curl -s --fail https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD) -$(K0SMOTRON_CRD): $(EXTERNAL_CRD_DIR) - rm -f $(K0SMOTRON_CRD) - curl -s https://raw.githubusercontent.com/k0sproject/k0smotron/$(K0SMOTRON_VERSION)/config/crd/bases/infrastructure.cluster.x-k8s.io_remoteclusters.yaml > $(K0SMOTRON_CRD) - $(CAPI_CRD): $(EXTERNAL_CRD_DIR) - rm -f $(CAPI_CRD) - curl -s https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_clusters.yaml > $(CAPI_CRD) + rm -f $(EXTERNAL_CRD_DIR)/$(CAPI_REPO_NAME)* + curl -s --fail https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_clusters.yaml > $(CAPI_CRD) + curl -s --fail https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_machines.yaml >> $(CAPI_CRD) .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(K0SMOTRON_CRD) $(CAPI_CRD) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(CAPI_CRD) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/api/v1alpha1/unmanagedcluster_types.go b/api/v1alpha1/unmanagedcluster_types.go index 9fbe76b79..6a5a4a3d6 100644 --- a/api/v1alpha1/unmanagedcluster_types.go +++ b/api/v1alpha1/unmanagedcluster_types.go @@ -18,12 +18,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - const ( UnmanagedClusterKind = "UnmanagedCluster" - UnmanagedClusterFinalizer = "hmc.mirantis.com/unmanage-dcluster" + UnmanagedClusterFinalizer = "hmc.mirantis.com/unmanaged-cluster" AllNodesCondition = "AllNodesCondition" NodeCondition = "NodeCondition" HelmChart = "HelmChart" @@ -31,10 +28,6 @@ const ( // UnmanagedClusterSpec defines the desired state of UnmanagedCluster type UnmanagedClusterSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - Name string `json:"name,omitempty"` // Services is a list of services created via ServiceTemplates // that could be installed on the target cluster. Services []ServiceSpec `json:"services,omitempty"` @@ -59,6 +52,7 @@ type UnmanagedClusterSpec struct { // UnmanagedClusterStatus defines the observed state of UnmanagedCluster type UnmanagedClusterStatus struct { // Flag indicating whether the unmanaged cluster is in the ready state or not + // +kubebuilder:default:=false Ready bool `json:"ready"` // Conditions contains details for the current state of the ManagedCluster. diff --git a/api/v1alpha1/unmanagedmachine_types.go b/api/v1alpha1/unmanagedmachine_types.go index 45e2025d6..a9e9eb41e 100644 --- a/api/v1alpha1/unmanagedmachine_types.go +++ b/api/v1alpha1/unmanagedmachine_types.go @@ -18,20 +18,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // UnmanagedMachineSpec defines the desired state of UnmanagedMachine type UnmanagedMachineSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - ProviderID string `json:"providerID,omitempty"` - ClusterName string `json:"clusterName,omitempty"` + ProviderID string `json:"providerID,omitempty"` + ClusterName string `json:"clusterName,omitempty"` + ControlPlane bool `json:"controlPlane,omitempty"` } // UnmanagedMachineStatus defines the observed state of UnmanagedMachine type UnmanagedMachineStatus struct { // Flag indicating whether the machine is in the ready state or not + // +kubebuilder:default:=false Ready bool `json:"ready,omitempty"` // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` diff --git a/go.mod b/go.mod index d073577ac..58a77e3a4 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,6 @@ require ( github.com/fluxcd/source-controller/api v1.4.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 - github.com/k0sproject/k0smotron v1.1.2 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 diff --git a/go.sum b/go.sum index a39032154..3f0317f23 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,6 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k0sproject/k0smotron v1.1.2 h1:5jyGugN37Yk64pd/YTcuJwfBAVUx820MGI7zEeNdlRI= -github.com/k0sproject/k0smotron v1.1.2/go.mod h1:TZVJaCTigFGpKpUkpfIsWPSkpCLAYf73420bI9Gt6n8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 37f7f0959..f665334d6 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "sigs.k8s.io/cluster-api/api/v1beta1" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -118,6 +119,8 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = sveltosv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/internal/controller/unmanagedcluster_controller.go b/internal/controller/unmanagedcluster_controller.go index e11e7172c..f6830745f 100644 --- a/internal/controller/unmanagedcluster_controller.go +++ b/internal/controller/unmanagedcluster_controller.go @@ -16,12 +16,12 @@ package controller import ( "context" + "errors" "fmt" "net/url" "strconv" "strings" - v1beta12 "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" "github.com/projectsveltos/libsveltos/lib/clusterproxy" corev1 "k8s.io/api/core/v1" @@ -30,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -49,32 +48,10 @@ type UnmanagedClusterReconciler struct { Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the UnmanagedCluster object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile func (r *UnmanagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := ctrl.LoggerFrom(ctx) l.Info("Reconciling UnmanagedCluster") - - if err := v1beta12.AddToScheme(r.Client.Scheme()); err != nil { - return ctrl.Result{}, err - } - - if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { - return ctrl.Result{}, err - } - - unmanagedCluster := new(hmc.UnmanagedCluster) + unmanagedCluster := &hmc.UnmanagedCluster{} if err := r.Get(ctx, req.NamespacedName, unmanagedCluster); err != nil { if apierrors.IsNotFound(err) { l.Info("UnmanagedCluster not found, ignoring since object must be deleted") @@ -94,6 +71,10 @@ func (r *UnmanagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req // SetupWithManager sets up the controller with the Manager. func (r *UnmanagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). For(&hmc.UnmanagedCluster{}). Complete(r) @@ -150,25 +131,23 @@ func (r *UnmanagedClusterReconciler) createCluster(ctx context.Context, unmanage }, }, } - clusterObject.Status.SetTypedPhase(v1beta1.ClusterPhaseUnknown) + err = r.Client.Create(ctx, clusterObject) if err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create unmanagedCluster object %s/%s: %s", unmanagedCluster.Namespace, unmanagedCluster.Name, err) + return fmt.Errorf("failed to create unmanagedCluster object %s/%s: %w", unmanagedCluster.Namespace, unmanagedCluster.Name, err) } return nil } func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) error { - l := ctrl.LoggerFrom(ctx) + // l := ctrl.LoggerFrom(ctx) nodelist, err := r.getNodeList(ctx, unmanagedCluster) if err != nil { return err } - kubeConfigSecretName := secret.Name(unmanagedCluster.Name, secret.Kubeconfig) - // find any existing unmanaged machines for the cluster to see if any need to be cleaned up because // the underlying node was removed existingMachines := &hmc.UnmanagedMachineList{} @@ -179,13 +158,17 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag return fmt.Errorf("failed to list existing unmanaged machines: %w", err) } - existingMachinesByName := map[string]*hmc.UnmanagedMachine{} + existingMachinesByName := make(map[string]*hmc.UnmanagedMachine) for _, existingMachine := range existingMachines.Items { existingMachinesByName[existingMachine.GetName()] = &existingMachine } for _, node := range nodelist.Items { delete(existingMachinesByName, node.Name) + isControlPlane := false + if _, ok := node.Labels[v1beta1.NodeRoleLabelPrefix+"/control-plane"]; ok { + isControlPlane = true + } unmanagedMachine := hmc.UnmanagedMachine{ TypeMeta: metav1.TypeMeta{ @@ -200,11 +183,9 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag }, }, Spec: hmc.UnmanagedMachineSpec{ - ProviderID: node.Spec.ProviderID, - ClusterName: unmanagedCluster.Name, - }, - Status: hmc.UnmanagedMachineStatus{ - Ready: true, + ProviderID: node.Spec.ProviderID, + ClusterName: unmanagedCluster.Name, + ControlPlane: isControlPlane, }, } @@ -212,65 +193,6 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag if err != nil && !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failed to create machine: %w", err) } - - ref := types.NamespacedName{Name: unmanagedMachine.Name, Namespace: unmanagedMachine.Namespace} - if err := r.Get(ctx, ref, &unmanagedMachine); err != nil { - return fmt.Errorf("failed to get unmanaged machine: %w", err) - } - unmanagedMachine.Status = hmc.UnmanagedMachineStatus{ - Ready: true, - } - if err := r.Status().Update(ctx, &unmanagedMachine); err != nil { - return fmt.Errorf("failed to update unmanaged machine status: %w", err) - } - - l.Info("Create machine", "node", node.Name) - machine := v1beta1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: v1beta1.GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: node.Name, - Namespace: unmanagedCluster.Namespace, - Labels: map[string]string{v1beta1.GroupVersion.Identifier(): hmc.GroupVersion.Version, v1beta1.ClusterNameLabel: unmanagedCluster.Name}, - }, - Spec: v1beta1.MachineSpec{ - ClusterName: unmanagedCluster.Name, - Bootstrap: v1beta1.Bootstrap{ - DataSecretName: &kubeConfigSecretName, - }, - InfrastructureRef: corev1.ObjectReference{ - Kind: "UnmanagedMachine", - Namespace: unmanagedCluster.Namespace, - Name: node.Name, - APIVersion: hmc.GroupVersion.Identifier(), - }, - ProviderID: &node.Spec.ProviderID, - }, - Status: v1beta1.MachineStatus{ - NodeRef: &corev1.ObjectReference{ - Kind: "Node", - Name: node.Name, - APIVersion: "v1", - }, - NodeInfo: &corev1.NodeSystemInfo{}, - CertificatesExpiryDate: nil, - BootstrapReady: true, - InfrastructureReady: true, - }, - } - - if _, ok := node.Labels[v1beta1.NodeRoleLabelPrefix+"/control-plane"]; ok { - if machine.Labels == nil { - machine.Labels = make(map[string]string) - } - machine.Labels[v1beta1.MachineControlPlaneLabel] = "true" - } - err = r.Create(ctx, &machine) - if err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create machine: %w", err) - } } // cleanup any orphaned unmanaged machines and capi machines @@ -283,6 +205,14 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag ObjectMeta: metav1.ObjectMeta{ Name: existingUnmanagedMachine.Name, Namespace: unmanagedCluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: hmc.GroupVersion.Identifier(), + Kind: "UnmanagedMachine", + Name: existingUnmanagedMachine.Name, + UID: existingUnmanagedMachine.UID, + }, + }, }, }); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("failed to delete orphaned machine: %w", err) @@ -313,29 +243,19 @@ func (r *UnmanagedClusterReconciler) reconcileUnmanagedCluster(ctx context.Conte } if err := r.createCluster(ctx, unmanagedCluster); err != nil { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: true}, err } if err := r.createServices(ctx, unmanagedCluster); err != nil { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: true}, err } if err := r.createMachines(ctx, unmanagedCluster); err != nil { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: true}, err } requeue, err := r.updateStatus(ctx, unmanagedCluster) - if err != nil { - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err - } - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err - } - - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil - } - return ctrl.Result{}, nil + return ctrl.Result{Requeue: requeue}, err } func (r *UnmanagedClusterReconciler) createServices(ctx context.Context, mc *hmc.UnmanagedCluster) error { @@ -380,7 +300,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma &hmc.UnmanagedMachine{}, deleteAllOpts..., ); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete unmanaged machines: %w", err) } if err := r.DeleteAllOf( @@ -388,7 +308,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma &v1beta1.Machine{}, deleteAllOpts..., ); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete unmanaged machines: %w", err) } if err := r.Delete(ctx, &corev1.Secret{ @@ -400,7 +320,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma }, }, }); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster secret: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete cluster secret: %w", err) } if err := r.Delete(ctx, &v1beta1.Cluster{ @@ -412,7 +332,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma }, }, }); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete cluster: %w", err) } if controllerutil.RemoveFinalizer(unmanagedCluster, hmc.UnmanagedClusterFinalizer) { @@ -431,6 +351,10 @@ func (r *UnmanagedClusterReconciler) updateStatus(ctx context.Context, cluster * return true, err } + defer func() { + err = errors.Join(err, r.Status().Update(ctx, cluster)) + }() + allNodeCondition := metav1.Condition{ Type: hmc.AllNodesCondition, Status: "True", @@ -497,9 +421,5 @@ func (r *UnmanagedClusterReconciler) updateStatus(ctx context.Context, cluster * } } - if err := r.Status().Update(ctx, cluster); err != nil { - return true, fmt.Errorf("failed to update unmanaged cluster status: %w", err) - } - return requeue, nil } diff --git a/internal/controller/unmanagedcluster_controller_test.go b/internal/controller/unmanagedcluster_controller_test.go index 1c404b59b..d70e3e137 100644 --- a/internal/controller/unmanagedcluster_controller_test.go +++ b/internal/controller/unmanagedcluster_controller_test.go @@ -17,7 +17,6 @@ package controller import ( "context" - "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -49,11 +48,9 @@ var _ = Describe("UnmanagedCluster Controller", func() { unmanagedcluster := &hmc.UnmanagedCluster{} BeforeEach(func() { - Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) By("creating the custom resource for the Kind UnmanagedCluster") secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) - secret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -80,7 +77,6 @@ var _ = Describe("UnmanagedCluster Controller", func() { Namespace: unmanagedClusterNamespace, }, Spec: hmc.UnmanagedClusterSpec{ - Name: unmanagedClusterName, Services: nil, ServicesPriority: 1, StopOnConflict: true, @@ -124,8 +120,6 @@ var _ = Describe("UnmanagedCluster Controller", func() { NamespacedName: typeNamespacedName, }) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/unmanagedmachine_controller.go b/internal/controller/unmanagedmachine_controller.go index 988b046fa..c29d8146a 100644 --- a/internal/controller/unmanagedmachine_controller.go +++ b/internal/controller/unmanagedmachine_controller.go @@ -17,6 +17,7 @@ package controller import ( "context" "fmt" + "strconv" "github.com/projectsveltos/libsveltos/lib/clusterproxy" corev1 "k8s.io/api/core/v1" @@ -25,6 +26,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -38,14 +41,9 @@ type UnmanagedMachineReconciler struct { Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/finalizers,verbs=update - func (r *UnmanagedMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx) - - unmanagedMachine := new(hmc.UnmanagedMachine) + unmanagedMachine := &hmc.UnmanagedMachine{} if err := r.Get(ctx, req.NamespacedName, unmanagedMachine); err != nil { if apierrors.IsNotFound(err) { l.Info("UnmanagedMachine not found, ignoring since object must be deleted") @@ -55,18 +53,17 @@ func (r *UnmanagedMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } - requeue, err := r.reconcileStatus(ctx, unmanagedMachine) + requeue, err := r.reconcileMachine(ctx, unmanagedMachine) if err != nil { - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err - } - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: requeue}, err } - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil + requeue, err = r.reconcileStatus(ctx, unmanagedMachine) + if err != nil { + return ctrl.Result{Requeue: requeue}, err } - return ctrl.Result{}, nil + + return ctrl.Result{Requeue: requeue}, nil } func (r *UnmanagedMachineReconciler) reconcileStatus(ctx context.Context, unmanagedMachine *hmc.UnmanagedMachine) (bool, error) { @@ -111,7 +108,63 @@ func (r *UnmanagedMachineReconciler) reconcileStatus(ctx context.Context, unmana // SetupWithManager sets up the controller with the Manager. func (r *UnmanagedMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). For(&hmc.UnmanagedMachine{}). Complete(r) } + +func (r *UnmanagedMachineReconciler) reconcileMachine(ctx context.Context, unmanagedMachine *hmc.UnmanagedMachine) (bool, error) { + l := log.FromContext(ctx) + + secretName := secret.Name(unmanagedMachine.Spec.ClusterName, secret.Kubeconfig) + l.Info("Create machine", "node", unmanagedMachine.Name) + machine := v1beta1.Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: "Machine", + APIVersion: v1beta1.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachine.Name, + Namespace: unmanagedMachine.Namespace, + Labels: map[string]string{ + v1beta1.GroupVersion.Identifier(): hmc.GroupVersion.Version, + v1beta1.ClusterNameLabel: unmanagedMachine.Spec.ClusterName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: hmc.GroupVersion.Identifier(), + Kind: "UnmanagedMachine", + Name: unmanagedMachine.Name, + UID: unmanagedMachine.UID, + }, + }, + }, + Spec: v1beta1.MachineSpec{ + ClusterName: unmanagedMachine.Spec.ClusterName, + Bootstrap: v1beta1.Bootstrap{ + DataSecretName: &secretName, + }, + InfrastructureRef: corev1.ObjectReference{ + Kind: "UnmanagedMachine", + Namespace: unmanagedMachine.Namespace, + Name: unmanagedMachine.Name, + APIVersion: hmc.GroupVersion.Identifier(), + }, + ProviderID: &unmanagedMachine.Spec.ProviderID, + }, + } + + if machine.Labels == nil { + machine.Labels = make(map[string]string) + } + machine.Labels[v1beta1.MachineControlPlaneLabel] = strconv.FormatBool(unmanagedMachine.Spec.ControlPlane) + if err := r.Create(ctx, &machine); err != nil && !apierrors.IsAlreadyExists(err) { + return true, fmt.Errorf("failed to create machine: %w", err) + } + + return false, nil +} diff --git a/internal/controller/unmanagedmachine_controller_test.go b/internal/controller/unmanagedmachine_controller_test.go index f8d0b2b92..7853c3458 100644 --- a/internal/controller/unmanagedmachine_controller_test.go +++ b/internal/controller/unmanagedmachine_controller_test.go @@ -17,7 +17,6 @@ package controller import ( "context" - "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -48,8 +47,6 @@ var _ = Describe("UnmanagedMachine Controller", func() { BeforeEach(func() { By("creating the custom resource for the Kind UnmanagedCluster") - Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) - Expect(capi.AddToScheme(k8sClient.Scheme())).To(Succeed()) secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) secret := &corev1.Secret{ diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml index 4c8459ad8..883c4a2f0 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml @@ -41,8 +41,6 @@ spec: spec: description: UnmanagedClusterSpec defines the desired state of UnmanagedCluster properties: - name: - type: string services: description: |- Services is a list of services created via ServiceTemplates @@ -155,6 +153,7 @@ spec: type: object type: array ready: + default: false description: Flag indicating whether the unmanaged cluster is in the ready state or not type: boolean diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml index 0482e8710..53473d117 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml @@ -48,10 +48,9 @@ spec: properties: clusterName: type: string + controlPlane: + type: boolean providerID: - description: |- - INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file type: string type: object status: @@ -116,6 +115,7 @@ spec: type: object type: array ready: + default: false description: Flag indicating whether the machine is in the ready state or not type: boolean diff --git a/test/objects/unmanagedcluster/unmanagedcluster.go b/test/objects/unmanagedcluster/unmanagedcluster.go index c796e1771..e90052559 100644 --- a/test/objects/unmanagedcluster/unmanagedcluster.go +++ b/test/objects/unmanagedcluster/unmanagedcluster.go @@ -14,17 +14,24 @@ package unmanagedcluster -import "github.com/Mirantis/hmc/api/v1alpha1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -type Opt func(unmanagedCluster *v1alpha1.UnmanagedCluster) + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +type Opt func(unmanagedCluster *hmc.UnmanagedCluster) const ( DefaultName = "hmc-uc" ) -func NewUnmanagedCluster(opts ...Opt) *v1alpha1.UnmanagedCluster { - uc := &v1alpha1.UnmanagedCluster{ - Spec: v1alpha1.UnmanagedClusterSpec{Name: DefaultName}, +func NewUnmanagedCluster(opts ...Opt) *hmc.UnmanagedCluster { + uc := &hmc.UnmanagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultName, + }, + Spec: hmc.UnmanagedClusterSpec{}, } for _, opt := range opts { @@ -34,7 +41,7 @@ func NewUnmanagedCluster(opts ...Opt) *v1alpha1.UnmanagedCluster { } func WithNameAndNamespace(name, namespace string) Opt { - return func(uc *v1alpha1.UnmanagedCluster) { + return func(uc *hmc.UnmanagedCluster) { uc.Name = name uc.Namespace = namespace } From c689cd11ce87cbeeafa98675ab2c7a1436d0654c Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Wed, 13 Nov 2024 09:48:41 -0800 Subject: [PATCH 3/5] Refactor rbac --- .../hmc/templates/rbac/controller/roles.yaml | 52 +++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index b805d1bf6..79e2a3978 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -206,18 +206,50 @@ rules: - patch - update - apiGroups: - - "" + - "" resources: - - secrets - verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} + - secrets + verbs: + - get + - list - apiGroups: - hmc.mirantis.com resources: - unmanagedclusters + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters/finalizers + verbs: + - update +- apiGroups: + - hmc.mirantis.com + resources: - unmanagedclusters/status + verbs: + - get + - patch + - update +- apiGroups: + - hmc.mirantis.com + resources: - unmanagedmachines - - unmanagedmachines/status verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedmachines/finalizers + verbs: + - update +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedmachines/status + verbs: + - get + - patch + - update - apiGroups: - cluster.x-k8s.io resources: @@ -259,7 +291,19 @@ rules: - hmc.mirantis.com resources: - unmanagedclusters + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} + - apiGroups: + - hmc.mirantis.com + resources: - unmanagedclusters/status + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} + - apiGroups: + - hmc.mirantis.com + resources: - unmanagedmachines + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} + - apiGroups: + - hmc.mirantis.com + resources: - unmanagedmachines/status verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} \ No newline at end of file From 199a194fdaec6e2fa2bea81619e7cd13dd2222d1 Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Wed, 13 Nov 2024 10:04:37 -0800 Subject: [PATCH 4/5] Refactored the service spec into a common struct --- api/v1alpha1/common.go | 22 +++++++++ api/v1alpha1/managedcluster_types.go | 22 +-------- api/v1alpha1/multiclusterservice_types.go | 22 +-------- api/v1alpha1/unmanagedcluster_types.go | 20 +------- api/v1alpha1/zz_generated.deepcopy.go | 46 ++++++++++--------- .../hmc.mirantis.com_managedclusters.yaml | 3 -- ...hmc.mirantis.com_multiclusterservices.yaml | 3 -- 7 files changed, 51 insertions(+), 87 deletions(-) diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 16f3ef3e4..6d45205b2 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -52,3 +52,25 @@ const ( // Provider Sveltos ProviderSveltosName = "projectsveltos" ) + +type ServicesType struct { + // Services is a list of services created via ServiceTemplates + // that could be installed on the target cluster. + Services []ServiceSpec `json:"services,omitempty"` + + // ServicesPriority sets the priority for the services defined in this spec. + // Higher value means higher priority and lower means lower. + // In case of conflict with another object managing the service, + // the one with higher priority will get to deploy its services. + ServicesPriority int32 `json:"servicesPriority,omitempty"` + // DryRun specifies whether the template should be applied after validation or only validated. + // DryRun bool `json:"dryRun,omitempty"` + + // +kubebuilder:default:=false + + // StopOnConflict specifies what to do in case of a conflict. + // E.g. If another object is already managing a service. + // By default the remaining services will be deployed even if conflict is detected. + // If set to true, the deployment will stop after encountering the first conflict. + StopOnConflict bool `json:"stopOnConflict,omitempty"` +} diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 03cd0118a..78ea9165e 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -65,29 +65,11 @@ type ManagedClusterSpec struct { Template string `json:"template"` // Name reference to the related Credentials object. Credential string `json:"credential,omitempty"` - // Services is a list of services created via ServiceTemplates - // that could be installed on the target cluster. - Services []ServiceSpec `json:"services,omitempty"` - - // +kubebuilder:default:=100 - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=2147483646 - - // ServicesPriority sets the priority for the services defined in this spec. - // Higher value means higher priority and lower means lower. - // In case of conflict with another object managing the service, - // the one with higher priority will get to deploy its services. - ServicesPriority int32 `json:"servicesPriority,omitempty"` + // DryRun specifies whether the template should be applied after validation or only validated. DryRun bool `json:"dryRun,omitempty"` - // +kubebuilder:default:=false - - // StopOnConflict specifies what to do in case of a conflict. - // E.g. If another object is already managing a service. - // By default the remaining services will be deployed even if conflict is detected. - // If set to true, the deployment will stop after encountering the first conflict. - StopOnConflict bool `json:"stopOnConflict,omitempty"` + ServicesType `json:",inline"` } // ManagedClusterStatus defines the observed state of ManagedCluster diff --git a/api/v1alpha1/multiclusterservice_types.go b/api/v1alpha1/multiclusterservice_types.go index fe575294f..4d56b273d 100644 --- a/api/v1alpha1/multiclusterservice_types.go +++ b/api/v1alpha1/multiclusterservice_types.go @@ -63,27 +63,7 @@ type ServiceSpec struct { type MultiClusterServiceSpec struct { // ClusterSelector identifies target clusters to manage services on. ClusterSelector metav1.LabelSelector `json:"clusterSelector,omitempty"` - // Services is a list of services created via ServiceTemplates - // that could be installed on the target cluster. - Services []ServiceSpec `json:"services,omitempty"` - - // +kubebuilder:default:=100 - // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Maximum=2147483646 - - // ServicesPriority sets the priority for the services defined in this spec. - // Higher value means higher priority and lower means lower. - // In case of conflict with another object managing the service, - // the one with higher priority will get to deploy its services. - ServicesPriority int32 `json:"servicesPriority,omitempty"` - - // +kubebuilder:default:=false - - // StopOnConflict specifies what to do in case of a conflict. - // E.g. If another object is already managing a service. - // By default the remaining services will be deployed even if conflict is detected. - // If set to true, the deployment will stop after encountering the first conflict. - StopOnConflict bool `json:"stopOnConflict,omitempty"` + ServicesType `json:",inline"` } // ServiceStatus contains details for the state of services. diff --git a/api/v1alpha1/unmanagedcluster_types.go b/api/v1alpha1/unmanagedcluster_types.go index 6a5a4a3d6..41166ef27 100644 --- a/api/v1alpha1/unmanagedcluster_types.go +++ b/api/v1alpha1/unmanagedcluster_types.go @@ -28,25 +28,7 @@ const ( // UnmanagedClusterSpec defines the desired state of UnmanagedCluster type UnmanagedClusterSpec struct { - // Services is a list of services created via ServiceTemplates - // that could be installed on the target cluster. - Services []ServiceSpec `json:"services,omitempty"` - - // ServicesPriority sets the priority for the services defined in this spec. - // Higher value means higher priority and lower means lower. - // In case of conflict with another object managing the service, - // the one with higher priority will get to deploy its services. - ServicesPriority int32 `json:"servicesPriority,omitempty"` - // DryRun specifies whether the template should be applied after validation or only validated. - // DryRun bool `json:"dryRun,omitempty"` - - // +kubebuilder:default:=false - - // StopOnConflict specifies what to do in case of a conflict. - // E.g. If another object is already managing a service. - // By default the remaining services will be deployed even if conflict is detected. - // If set to true, the deployment will stop after encountering the first conflict. - StopOnConflict bool `json:"stopOnConflict,omitempty"` + ServicesType `json:",inline"` } // UnmanagedClusterStatus defines the observed state of UnmanagedCluster diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 674102e04..8ce4761bf 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -509,13 +509,7 @@ func (in *ManagedClusterSpec) DeepCopyInto(out *ManagedClusterSpec) { *out = new(apiextensionsv1.JSON) (*in).DeepCopyInto(*out) } - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServiceSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ServicesType.DeepCopyInto(&out.ServicesType) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSpec. @@ -756,13 +750,7 @@ func (in *MultiClusterServiceList) DeepCopyObject() runtime.Object { func (in *MultiClusterServiceSpec) DeepCopyInto(out *MultiClusterServiceSpec) { *out = *in in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServiceSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ServicesType.DeepCopyInto(&out.ServicesType) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterServiceSpec. @@ -1274,6 +1262,28 @@ func (in *ServiceTemplateStatus) DeepCopy() *ServiceTemplateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicesType) DeepCopyInto(out *ServicesType) { + *out = *in + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServiceSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicesType. +func (in *ServicesType) DeepCopy() *ServicesType { + if in == nil { + return nil + } + out := new(ServicesType) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SupportedTemplate) DeepCopyInto(out *SupportedTemplate) { *out = *in @@ -1547,13 +1557,7 @@ func (in *UnmanagedClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UnmanagedClusterSpec) DeepCopyInto(out *UnmanagedClusterSpec) { *out = *in - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServiceSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + in.ServicesType.DeepCopyInto(&out.ServicesType) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterSpec. diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index c1ef43e63..b33882246 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -103,15 +103,12 @@ spec: type: object type: array servicesPriority: - default: 100 description: |- ServicesPriority sets the priority for the services defined in this spec. Higher value means higher priority and lower means lower. In case of conflict with another object managing the service, the one with higher priority will get to deploy its services. format: int32 - maximum: 2147483646 - minimum: 1 type: integer stopOnConflict: default: false diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml index e15a1e115..3e76cdf22 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml @@ -121,15 +121,12 @@ spec: type: object type: array servicesPriority: - default: 100 description: |- ServicesPriority sets the priority for the services defined in this spec. Higher value means higher priority and lower means lower. In case of conflict with another object managing the service, the one with higher priority will get to deploy its services. format: int32 - maximum: 2147483646 - minimum: 1 type: integer stopOnConflict: default: false From 56bd57803b52ce6f3face95b7a906a324ef0eb8c Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Wed, 13 Nov 2024 10:20:15 -0800 Subject: [PATCH 5/5] Combine the cluster / machine permissions per review --- api/v1alpha1/common.go | 3 +++ .../controller/managedcluster_controller_test.go | 10 ++++++---- .../multiclusterservice_controller_test.go | 10 ++++++---- internal/controller/unmanagedcluster_controller.go | 13 ------------- .../controller/unmanagedcluster_controller_test.go | 6 +----- .../crds/hmc.mirantis.com_managedclusters.yaml | 3 +++ .../crds/hmc.mirantis.com_multiclusterservices.yaml | 3 +++ .../crds/hmc.mirantis.com_unmanagedclusters.yaml | 3 +++ .../hmc/templates/rbac/controller/roles.yaml | 13 ++++--------- 9 files changed, 29 insertions(+), 35 deletions(-) diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 6d45205b2..3b5da3072 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -58,6 +58,9 @@ type ServicesType struct { // that could be installed on the target cluster. Services []ServiceSpec `json:"services,omitempty"` + // +kubebuilder:default:=100 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=2147483646 // ServicesPriority sets the priority for the services defined in this spec. // Higher value means higher priority and lower means lower. // In case of conflict with another object managing the service, diff --git a/internal/controller/managedcluster_controller_test.go b/internal/controller/managedcluster_controller_test.go index ac37d1901..2ddf12716 100644 --- a/internal/controller/managedcluster_controller_test.go +++ b/internal/controller/managedcluster_controller_test.go @@ -180,10 +180,12 @@ var _ = Describe("ManagedCluster Controller", func() { Spec: hmc.ManagedClusterSpec{ Template: templateName, Credential: credentialName, - Services: []hmc.ServiceSpec{ - { - Template: svcTemplateName, - Name: "test-svc-name", + ServicesType: hmc.ServicesType{ + Services: []hmc.ServiceSpec{ + { + Template: svcTemplateName, + Name: "test-svc-name", + }, }, }, }, diff --git a/internal/controller/multiclusterservice_controller_test.go b/internal/controller/multiclusterservice_controller_test.go index 817eba6f5..19b3e3c10 100644 --- a/internal/controller/multiclusterservice_controller_test.go +++ b/internal/controller/multiclusterservice_controller_test.go @@ -176,10 +176,12 @@ var _ = Describe("MultiClusterService Controller", func() { }, }, Spec: hmc.MultiClusterServiceSpec{ - Services: []hmc.ServiceSpec{ - { - Template: serviceTemplateName, - Name: helmChartReleaseName, + ServicesType: hmc.ServicesType{ + Services: []hmc.ServiceSpec{ + { + Template: serviceTemplateName, + Name: helmChartReleaseName, + }, }, }, }, diff --git a/internal/controller/unmanagedcluster_controller.go b/internal/controller/unmanagedcluster_controller.go index f6830745f..3ca24b836 100644 --- a/internal/controller/unmanagedcluster_controller.go +++ b/internal/controller/unmanagedcluster_controller.go @@ -33,7 +33,6 @@ import ( "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" - "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -311,18 +310,6 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete unmanaged machines: %w", err) } - if err := r.Delete(ctx, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: unmanagedCluster.Namespace, - Name: secret.Name(unmanagedCluster.Name, secret.Kubeconfig), - Labels: map[string]string{ - v1beta1.ClusterNameLabel: unmanagedCluster.Name, - }, - }, - }); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete cluster secret: %w", err) - } - if err := r.Delete(ctx, &v1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: unmanagedCluster.Namespace, diff --git a/internal/controller/unmanagedcluster_controller_test.go b/internal/controller/unmanagedcluster_controller_test.go index d70e3e137..3a6fc7fdf 100644 --- a/internal/controller/unmanagedcluster_controller_test.go +++ b/internal/controller/unmanagedcluster_controller_test.go @@ -76,11 +76,7 @@ var _ = Describe("UnmanagedCluster Controller", func() { Name: unmanagedClusterName, Namespace: unmanagedClusterNamespace, }, - Spec: hmc.UnmanagedClusterSpec{ - Services: nil, - ServicesPriority: 1, - StopOnConflict: true, - }, + Spec: hmc.UnmanagedClusterSpec{}, } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index b33882246..c1ef43e63 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -103,12 +103,15 @@ spec: type: object type: array servicesPriority: + default: 100 description: |- ServicesPriority sets the priority for the services defined in this spec. Higher value means higher priority and lower means lower. In case of conflict with another object managing the service, the one with higher priority will get to deploy its services. format: int32 + maximum: 2147483646 + minimum: 1 type: integer stopOnConflict: default: false diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml index 3e76cdf22..e15a1e115 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml @@ -121,12 +121,15 @@ spec: type: object type: array servicesPriority: + default: 100 description: |- ServicesPriority sets the priority for the services defined in this spec. Higher value means higher priority and lower means lower. In case of conflict with another object managing the service, the one with higher priority will get to deploy its services. format: int32 + maximum: 2147483646 + minimum: 1 type: integer stopOnConflict: default: false diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml index 883c4a2f0..cb8bc9c51 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml @@ -75,12 +75,15 @@ spec: type: object type: array servicesPriority: + default: 100 description: |- ServicesPriority sets the priority for the services defined in this spec. Higher value means higher priority and lower means lower. In case of conflict with another object managing the service, the one with higher priority will get to deploy its services. format: int32 + maximum: 2147483646 + minimum: 1 type: integer stopOnConflict: default: false diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 79e2a3978..f977d242a 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -19,7 +19,8 @@ rules: - cluster.x-k8s.io resources: - clusters - verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} + - delete - apiGroups: - helm.toolkit.fluxcd.io resources: @@ -145,7 +146,8 @@ rules: - cluster.x-k8s.io resources: - machines - verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} + - delete - apiGroups: - "" resources: @@ -250,13 +252,6 @@ rules: - get - patch - update -- apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - machines - verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} - - delete - apiGroups: - config.projectsveltos.io resources: