diff --git a/Makefile b/Makefile index 6a9b44e8d..5b9a91333 100644 --- a/Makefile +++ b/Makefile @@ -400,8 +400,12 @@ FLUX_SOURCE_REPO_NAME ?= source-helmrepositories FLUX_SOURCE_REPO_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_SOURCE_REPO_NAME)-$(FLUX_SOURCE_VERSION).yaml FLUX_SOURCE_CHART_NAME ?= source-helmchart FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_SOURCE_CHART_NAME)-$(FLUX_SOURCE_VERSION).yaml - FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version') +FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml +CAPI_VERSION ?= v1.8.4 +CAPI_CRD ?= $(EXTERNAL_CRD_DIR)/capi-$(CAPI_VERSION).yaml +K0SMOTRON_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/k0sproject/k0smotron") | .Version') +K0SMOTRON_CRD ?= $(EXTERNAL_CRD_DIR)/k0smotron-$(K0SMOTRON_VERSION).yaml FLUX_HELM_NAME ?= helm FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_HELM_NAME)-$(FLUX_HELM_VERSION).yaml @@ -478,8 +482,16 @@ $(SVELTOS_CRD): | yq $(EXTERNAL_CRD_DIR) rm -f $(EXTERNAL_CRD_DIR)/$(SVELTOS_NAME)* curl -s --fail https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD) +$(K0SMOTRON_CRD): $(EXTERNAL_CRD_DIR) + rm -f $(K0SMOTRON_CRD) + curl -s https://raw.githubusercontent.com/k0sproject/k0smotron/$(K0SMOTRON_VERSION)/config/crd/bases/infrastructure.cluster.x-k8s.io_remoteclusters.yaml > $(K0SMOTRON_CRD) + +$(CAPI_CRD): $(EXTERNAL_CRD_DIR) + rm -f $(CAPI_CRD) + curl -s https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_clusters.yaml > $(CAPI_CRD) + .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(K0SMOTRON_CRD) $(CAPI_CRD) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/PROJECT b/PROJECT index 1320ee237..7efe4c378 100644 --- a/PROJECT +++ b/PROJECT @@ -101,4 +101,22 @@ resources: kind: MultiClusterService path: github.com/Mirantis/hmc/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: hmc.mirantis.com + group: hmc.mirantis.com + kind: UnmanagedCluster + path: github.com/Mirantis/hmc/api/v1alpha + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: hmc.mirantis.com + group: hmc.mirantis.com + kind: UnmanagedMachine + path: github.com/Mirantis/hmc/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/unmanagedcluster_types.go b/api/v1alpha1/unmanagedcluster_types.go new file mode 100644 index 000000000..9fbe76b79 --- /dev/null +++ b/api/v1alpha1/unmanagedcluster_types.go @@ -0,0 +1,95 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +const ( + UnmanagedClusterKind = "UnmanagedCluster" + UnmanagedClusterFinalizer = "hmc.mirantis.com/unmanage-dcluster" + AllNodesCondition = "AllNodesCondition" + NodeCondition = "NodeCondition" + HelmChart = "HelmChart" +) + +// UnmanagedClusterSpec defines the desired state of UnmanagedCluster +type UnmanagedClusterSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Name string `json:"name,omitempty"` + // Services is a list of services created via ServiceTemplates + // that could be installed on the target cluster. + Services []ServiceSpec `json:"services,omitempty"` + + // ServicesPriority sets the priority for the services defined in this spec. + // Higher value means higher priority and lower means lower. + // In case of conflict with another object managing the service, + // the one with higher priority will get to deploy its services. + ServicesPriority int32 `json:"servicesPriority,omitempty"` + // DryRun specifies whether the template should be applied after validation or only validated. + // DryRun bool `json:"dryRun,omitempty"` + + // +kubebuilder:default:=false + + // StopOnConflict specifies what to do in case of a conflict. + // E.g. If another object is already managing a service. + // By default the remaining services will be deployed even if conflict is detected. + // If set to true, the deployment will stop after encountering the first conflict. + StopOnConflict bool `json:"stopOnConflict,omitempty"` +} + +// UnmanagedClusterStatus defines the observed state of UnmanagedCluster +type UnmanagedClusterStatus struct { + // Flag indicating whether the unmanaged cluster is in the ready state or not + Ready bool `json:"ready"` + + // Conditions contains details for the current state of the ManagedCluster. + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:metadata:labels=cluster.x-k8s.io/v1beta1=v1alpha1 +// UnmanagedCluster is the Schema for the unmanagedclusters API +type UnmanagedCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UnmanagedClusterSpec `json:"spec,omitempty"` + Status UnmanagedClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UnmanagedClusterList contains a list of UnmanagedCluster +type UnmanagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UnmanagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&UnmanagedCluster{}, &UnmanagedClusterList{}) +} + +func (in *UnmanagedCluster) GetConditions() *[]metav1.Condition { + return &in.Status.Conditions +} diff --git a/api/v1alpha1/unmanagedmachine_types.go b/api/v1alpha1/unmanagedmachine_types.go new file mode 100644 index 000000000..45e2025d6 --- /dev/null +++ b/api/v1alpha1/unmanagedmachine_types.go @@ -0,0 +1,69 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// UnmanagedMachineSpec defines the desired state of UnmanagedMachine +type UnmanagedMachineSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + ProviderID string `json:"providerID,omitempty"` + ClusterName string `json:"clusterName,omitempty"` +} + +// UnmanagedMachineStatus defines the observed state of UnmanagedMachine +type UnmanagedMachineStatus struct { + // Flag indicating whether the machine is in the ready state or not + Ready bool `json:"ready,omitempty"` + // Conditions contains details for the current state of the ManagedCluster + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status" +// +kubebuilder:metadata:labels=cluster.x-k8s.io/v1beta1=v1alpha1 + +// UnmanagedMachine is the Schema for the unmanagedmachines API +type UnmanagedMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UnmanagedMachineSpec `json:"spec,omitempty"` + Status UnmanagedMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UnmanagedMachineList contains a list of UnmanagedMachine +type UnmanagedMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []UnmanagedMachine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&UnmanagedMachine{}, &UnmanagedMachineList{}) +} + +func (in *UnmanagedMachine) GetConditions() *[]metav1.Condition { + return &in.Status.Conditions +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 59bcaa4e2..674102e04 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1484,3 +1484,202 @@ func (in *TemplateValidationStatus) DeepCopy() *TemplateValidationStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedCluster) DeepCopyInto(out *UnmanagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedCluster. +func (in *UnmanagedCluster) DeepCopy() *UnmanagedCluster { + if in == nil { + return nil + } + out := new(UnmanagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedClusterList) DeepCopyInto(out *UnmanagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UnmanagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterList. +func (in *UnmanagedClusterList) DeepCopy() *UnmanagedClusterList { + if in == nil { + return nil + } + out := new(UnmanagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedClusterSpec) DeepCopyInto(out *UnmanagedClusterSpec) { + *out = *in + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServiceSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterSpec. +func (in *UnmanagedClusterSpec) DeepCopy() *UnmanagedClusterSpec { + if in == nil { + return nil + } + out := new(UnmanagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedClusterStatus) DeepCopyInto(out *UnmanagedClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedClusterStatus. +func (in *UnmanagedClusterStatus) DeepCopy() *UnmanagedClusterStatus { + if in == nil { + return nil + } + out := new(UnmanagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachine) DeepCopyInto(out *UnmanagedMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachine. +func (in *UnmanagedMachine) DeepCopy() *UnmanagedMachine { + if in == nil { + return nil + } + out := new(UnmanagedMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachineList) DeepCopyInto(out *UnmanagedMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UnmanagedMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachineList. +func (in *UnmanagedMachineList) DeepCopy() *UnmanagedMachineList { + if in == nil { + return nil + } + out := new(UnmanagedMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnmanagedMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachineSpec) DeepCopyInto(out *UnmanagedMachineSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachineSpec. +func (in *UnmanagedMachineSpec) DeepCopy() *UnmanagedMachineSpec { + if in == nil { + return nil + } + out := new(UnmanagedMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnmanagedMachineStatus) DeepCopyInto(out *UnmanagedMachineStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnmanagedMachineStatus. +func (in *UnmanagedMachineStatus) DeepCopy() *UnmanagedMachineStatus { + if in == nil { + return nil + } + out := new(UnmanagedMachineStatus) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index 5ee887fb9..4e0553b42 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -302,6 +302,21 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "MultiClusterService") os.Exit(1) } + if err = (&controller.UnmanagedClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "UnmanagedCluster") + os.Exit(1) + } + + if err = (&controller.UnmanagedMachineReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "UnmanagedMachine") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { @@ -364,5 +379,10 @@ func setupWebhooks(mgr ctrl.Manager, currentNamespace string) error { setupLog.Error(err, "unable to create webhook", "webhook", "ProviderTemplate") return err } + setupLog.Info("setup UnmanagedClusterValidator webhook") + if err := (&hmcwebhook.UnmanagedClusterValidator{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "UnmanagedCluster") + return err + } return nil } diff --git a/go.mod b/go.mod index e913ba3bb..1a4df884e 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/fluxcd/source-controller/api v1.4.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 + github.com/k0sproject/k0smotron v1.1.2 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 diff --git a/go.sum b/go.sum index 800abdb08..dd2ae8638 100644 --- a/go.sum +++ b/go.sum @@ -291,6 +291,8 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0sproject/k0smotron v1.1.2 h1:5jyGugN37Yk64pd/YTcuJwfBAVUx820MGI7zEeNdlRI= +github.com/k0sproject/k0smotron v1.1.2/go.mod h1:TZVJaCTigFGpKpUkpfIsWPSkpCLAYf73420bI9Gt6n8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index e805f260c..37f7f0959 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -176,6 +176,9 @@ var _ = BeforeSuite(func() { err = (&hmcwebhook.ProviderTemplateValidator{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) + err = (&hmcwebhook.UnmanagedClusterValidator{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + go func() { defer GinkgoRecover() err = mgr.Start(ctx) diff --git a/internal/controller/unmanagedcluster_controller.go b/internal/controller/unmanagedcluster_controller.go new file mode 100644 index 000000000..e11e7172c --- /dev/null +++ b/internal/controller/unmanagedcluster_controller.go @@ -0,0 +1,505 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + "net/url" + "strconv" + "strings" + + v1beta12 "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" + "github.com/projectsveltos/libsveltos/lib/clusterproxy" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/internal/sveltos" +) + +// UnmanagedClusterReconciler reconciles a UnmanagedCluster object +type UnmanagedClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the UnmanagedCluster object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile +func (r *UnmanagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + l.Info("Reconciling UnmanagedCluster") + + if err := v1beta12.AddToScheme(r.Client.Scheme()); err != nil { + return ctrl.Result{}, err + } + + if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { + return ctrl.Result{}, err + } + + unmanagedCluster := new(hmc.UnmanagedCluster) + if err := r.Get(ctx, req.NamespacedName, unmanagedCluster); err != nil { + if apierrors.IsNotFound(err) { + l.Info("UnmanagedCluster not found, ignoring since object must be deleted") + return ctrl.Result{}, nil + } + l.Error(err, "Failed to get UnmanagedCluster") + return ctrl.Result{}, err + } + + if controllerutil.AddFinalizer(unmanagedCluster, hmc.UnmanagedClusterFinalizer) { + if err := r.Client.Update(ctx, unmanagedCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update UnmanagedCluster %s with finalizer %s: %w", unmanagedCluster.Name, hmc.UnmanagedClusterFinalizer, err) + } + } + return r.reconcileUnmanagedCluster(ctx, unmanagedCluster) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *UnmanagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&hmc.UnmanagedCluster{}). + Complete(r) +} + +func (r *UnmanagedClusterReconciler) getControlPlaneEndpoint(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (v1beta1.APIEndpoint, error) { + bytes, err := kubeconfig.FromSecret(ctx, r.Client, client.ObjectKey{ + Namespace: unmanagedCluster.Namespace, + Name: unmanagedCluster.Name, + }) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("failed to get cluster kubeconfig secret: %w", err) + } + + config, err := clientcmd.RESTConfigFromKubeConfig(bytes) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("failed to get rest config from kube config secret: %w", err) + } + + hostURL, err := url.Parse(config.Host) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("kube config secret contains invalid host: %w", err) + } + + portNumber, err := strconv.Atoi(hostURL.Port()) + if err != nil { + return v1beta1.APIEndpoint{}, fmt.Errorf("kube config secret contains invalid port: %w", err) + } + return v1beta1.APIEndpoint{Host: hostURL.Hostname(), Port: int32(portNumber)}, nil +} + +func (r *UnmanagedClusterReconciler) createCluster(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) error { + controlPlaneEndPoint, err := r.getControlPlaneEndpoint(ctx, unmanagedCluster) + if err != nil { + return fmt.Errorf("failed to get control plane endpoint: %w", err) + } + + clusterObject := &v1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedCluster.Name, + Namespace: unmanagedCluster.Namespace, + Labels: map[string]string{ + "helm.toolkit.fluxcd.io/name": unmanagedCluster.Name, + "helm.toolkit.fluxcd.io/namespace": unmanagedCluster.Namespace, + }, + }, + Spec: v1beta1.ClusterSpec{ + ControlPlaneEndpoint: controlPlaneEndPoint, + InfrastructureRef: &corev1.ObjectReference{ + Kind: "UnmanagedCluster", + Namespace: unmanagedCluster.Namespace, + Name: unmanagedCluster.Name, + APIVersion: unmanagedCluster.APIVersion, + }, + }, + } + clusterObject.Status.SetTypedPhase(v1beta1.ClusterPhaseUnknown) + err = r.Client.Create(ctx, clusterObject) + if err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create unmanagedCluster object %s/%s: %s", unmanagedCluster.Namespace, unmanagedCluster.Name, err) + } + + return nil +} + +func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) error { + l := ctrl.LoggerFrom(ctx) + + nodelist, err := r.getNodeList(ctx, unmanagedCluster) + if err != nil { + return err + } + + kubeConfigSecretName := secret.Name(unmanagedCluster.Name, secret.Kubeconfig) + + // find any existing unmanaged machines for the cluster to see if any need to be cleaned up because + // the underlying node was removed + existingMachines := &hmc.UnmanagedMachineList{} + if err := r.List(ctx, existingMachines, &client.ListOptions{ + Namespace: unmanagedCluster.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{v1beta1.ClusterNameLabel: unmanagedCluster.Name}), + }); err != nil { + return fmt.Errorf("failed to list existing unmanaged machines: %w", err) + } + + existingMachinesByName := map[string]*hmc.UnmanagedMachine{} + for _, existingMachine := range existingMachines.Items { + existingMachinesByName[existingMachine.GetName()] = &existingMachine + } + + for _, node := range nodelist.Items { + delete(existingMachinesByName, node.Name) + + unmanagedMachine := hmc.UnmanagedMachine{ + TypeMeta: metav1.TypeMeta{ + Kind: "UnmanagedMachine", + APIVersion: hmc.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + Namespace: unmanagedCluster.Namespace, + Labels: map[string]string{ + v1beta1.ClusterNameLabel: unmanagedCluster.Name, + }, + }, + Spec: hmc.UnmanagedMachineSpec{ + ProviderID: node.Spec.ProviderID, + ClusterName: unmanagedCluster.Name, + }, + Status: hmc.UnmanagedMachineStatus{ + Ready: true, + }, + } + + err := r.Create(ctx, &unmanagedMachine) + if err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create machine: %w", err) + } + + ref := types.NamespacedName{Name: unmanagedMachine.Name, Namespace: unmanagedMachine.Namespace} + if err := r.Get(ctx, ref, &unmanagedMachine); err != nil { + return fmt.Errorf("failed to get unmanaged machine: %w", err) + } + unmanagedMachine.Status = hmc.UnmanagedMachineStatus{ + Ready: true, + } + if err := r.Status().Update(ctx, &unmanagedMachine); err != nil { + return fmt.Errorf("failed to update unmanaged machine status: %w", err) + } + + l.Info("Create machine", "node", node.Name) + machine := v1beta1.Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: "Machine", + APIVersion: v1beta1.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + Namespace: unmanagedCluster.Namespace, + Labels: map[string]string{v1beta1.GroupVersion.Identifier(): hmc.GroupVersion.Version, v1beta1.ClusterNameLabel: unmanagedCluster.Name}, + }, + Spec: v1beta1.MachineSpec{ + ClusterName: unmanagedCluster.Name, + Bootstrap: v1beta1.Bootstrap{ + DataSecretName: &kubeConfigSecretName, + }, + InfrastructureRef: corev1.ObjectReference{ + Kind: "UnmanagedMachine", + Namespace: unmanagedCluster.Namespace, + Name: node.Name, + APIVersion: hmc.GroupVersion.Identifier(), + }, + ProviderID: &node.Spec.ProviderID, + }, + Status: v1beta1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Kind: "Node", + Name: node.Name, + APIVersion: "v1", + }, + NodeInfo: &corev1.NodeSystemInfo{}, + CertificatesExpiryDate: nil, + BootstrapReady: true, + InfrastructureReady: true, + }, + } + + if _, ok := node.Labels[v1beta1.NodeRoleLabelPrefix+"/control-plane"]; ok { + if machine.Labels == nil { + machine.Labels = make(map[string]string) + } + machine.Labels[v1beta1.MachineControlPlaneLabel] = "true" + } + err = r.Create(ctx, &machine) + if err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create machine: %w", err) + } + } + + // cleanup any orphaned unmanaged machines and capi machines + for _, existingUnmanagedMachine := range existingMachinesByName { + if err := r.Delete(ctx, existingUnmanagedMachine); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned unmanaged machine: %w", err) + } + + if err := r.Delete(ctx, &v1beta1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: existingUnmanagedMachine.Name, + Namespace: unmanagedCluster.Namespace, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete orphaned machine: %w", err) + } + } + return nil +} + +func (r *UnmanagedClusterReconciler) getNodeList(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (*corev1.NodeList, error) { + l := ctrl.LoggerFrom(ctx) + clusterClient, err := clusterproxy.GetCAPIKubernetesClient(ctx, l, r.Client, r.Client.Scheme(), unmanagedCluster.Namespace, unmanagedCluster.Name) + if err != nil { + return nil, fmt.Errorf("failed to connect to remote cluster: %w", err) + } + + nodelist := &corev1.NodeList{} + if err := clusterClient.List(ctx, nodelist); err != nil { + return nil, fmt.Errorf("failed to list cluster nodes: %w", err) + } + return nodelist, nil +} + +func (r *UnmanagedClusterReconciler) reconcileUnmanagedCluster(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + if !unmanagedCluster.DeletionTimestamp.IsZero() { + l.Info("Deleting UnmanagedCluster") + return r.reconcileDeletion(ctx, unmanagedCluster) + } + + if err := r.createCluster(ctx, unmanagedCluster); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if err := r.createServices(ctx, unmanagedCluster); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if err := r.createMachines(ctx, unmanagedCluster); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + requeue, err := r.updateStatus(ctx, unmanagedCluster) + if err != nil { + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil + } + return ctrl.Result{}, nil +} + +func (r *UnmanagedClusterReconciler) createServices(ctx context.Context, mc *hmc.UnmanagedCluster) error { + opts, err := helmChartOpts(ctx, r.Client, mc.Namespace, mc.Spec.Services) + if err != nil { + return err + } + + if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name, + sveltos.ReconcileProfileOpts{ + OwnerReference: &metav1.OwnerReference{ + APIVersion: hmc.GroupVersion.String(), + Kind: hmc.UnmanagedClusterKind, + Name: mc.Name, + UID: mc.UID, + }, + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + hmc.FluxHelmChartNamespaceKey: mc.Namespace, + hmc.FluxHelmChartNameKey: mc.Name, + }, + }, + HelmChartOpts: opts, + Priority: mc.Spec.ServicesPriority, + StopOnConflict: mc.Spec.StopOnConflict, + }); err != nil { + return fmt.Errorf("failed to reconcile Profile: %w", err) + } + + return nil +} + +func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) (ctrl.Result, error) { + clusterLabel := map[string]string{v1beta1.ClusterNameLabel: unmanagedCluster.Name} + deleteAllOpts := []client.DeleteAllOfOption{ + client.InNamespace(unmanagedCluster.Namespace), + client.MatchingLabels(clusterLabel), + } + + if err := r.DeleteAllOf( + ctx, + &hmc.UnmanagedMachine{}, + deleteAllOpts..., + ); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + } + + if err := r.DeleteAllOf( + ctx, + &v1beta1.Machine{}, + deleteAllOpts..., + ); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + } + + if err := r.Delete(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: unmanagedCluster.Namespace, + Name: secret.Name(unmanagedCluster.Name, secret.Kubeconfig), + Labels: map[string]string{ + v1beta1.ClusterNameLabel: unmanagedCluster.Name, + }, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster secret: %w", err) + } + + if err := r.Delete(ctx, &v1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: unmanagedCluster.Namespace, + Name: unmanagedCluster.Name, + Labels: map[string]string{ + v1beta1.ClusterNameLabel: unmanagedCluster.Name, + }, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster: %w", err) + } + + if controllerutil.RemoveFinalizer(unmanagedCluster, hmc.UnmanagedClusterFinalizer) { + if err := r.Client.Update(ctx, unmanagedCluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer %s from UnmanagedCluster %s: %w", + hmc.UnmanagedClusterFinalizer, unmanagedCluster.Name, err) + } + } + return ctrl.Result{}, nil +} + +func (r *UnmanagedClusterReconciler) updateStatus(ctx context.Context, cluster *hmc.UnmanagedCluster) (bool, error) { + requeue := false + nodelist, err := r.getNodeList(ctx, cluster) + if err != nil { + return true, err + } + + allNodeCondition := metav1.Condition{ + Type: hmc.AllNodesCondition, + Status: "True", + Message: "All nodes are ready", + Reason: hmc.SucceededReason, + } + + cluster.Status.Ready = true + var nonReadyNodes []string + for _, node := range nodelist.Items { + for _, nodeCondition := range node.Status.Conditions { + if nodeCondition.Type == corev1.NodeReady { + if nodeCondition.Status != corev1.ConditionTrue { + allNodeCondition.Status = metav1.ConditionFalse + allNodeCondition.Reason = hmc.FailedReason + nonReadyNodes = append(nonReadyNodes, node.Name) + requeue = true + cluster.Status.Ready = false + } + } + } + } + + if len(nonReadyNodes) > 0 { + allNodeCondition.Message = fmt.Sprintf("Nodes %s are not ready", strings.Join(nonReadyNodes, ",")) + } + apimeta.SetStatusCondition(cluster.GetConditions(), allNodeCondition) + + if len(cluster.Spec.Services) > 0 { + sveltosClusterSummaries := &sveltosv1beta1.ClusterSummaryList{} + if err := r.List(ctx, sveltosClusterSummaries, &client.ListOptions{ + Namespace: cluster.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{sveltosv1beta1.ClusterNameLabel: cluster.Name}), + }); err != nil { + return true, fmt.Errorf("failed to list sveltos cluster summary: %w", err) + } + + if len(sveltosClusterSummaries.Items) > 0 { + var failedCharts []string + + helmCondition := metav1.Condition{ + Type: hmc.HelmChart, + Reason: hmc.SucceededReason, + Status: metav1.ConditionTrue, + } + + for _, clusterSummary := range sveltosClusterSummaries.Items { + for _, helmReleaseSummary := range clusterSummary.Status.HelmReleaseSummaries { + if helmReleaseSummary.Status != sveltosv1beta1.HelmChartStatusManaging { + helmCondition.Reason = hmc.FailedReason + helmCondition.Status = metav1.ConditionFalse + requeue = true + failedCharts = append(failedCharts, helmReleaseSummary.ReleaseName) + } + } + } + + if len(failedCharts) > 0 { + helmCondition.Message = "Charts failed to deploy " + strings.Join(failedCharts, ",") + } + apimeta.SetStatusCondition(cluster.GetConditions(), helmCondition) + } else { + requeue = true + } + } + + if err := r.Status().Update(ctx, cluster); err != nil { + return true, fmt.Errorf("failed to update unmanaged cluster status: %w", err) + } + + return requeue, nil +} diff --git a/internal/controller/unmanagedcluster_controller_test.go b/internal/controller/unmanagedcluster_controller_test.go new file mode 100644 index 000000000..1c404b59b --- /dev/null +++ b/internal/controller/unmanagedcluster_controller_test.go @@ -0,0 +1,162 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + capi "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +var _ = Describe("UnmanagedCluster Controller", func() { + Context("When reconciling a resource", func() { + const ( + unmanagedClusterName = "test-managed-cluster" + unmanagedClusterNamespace = "default" + ) + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + } + unmanagedcluster := &hmc.UnmanagedCluster{} + + BeforeEach(func() { + Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) + By("creating the custom resource for the Kind UnmanagedCluster") + + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + Labels: map[string]string{capi.ClusterNameLabel: unmanagedClusterName}, + }, + Data: map[string][]byte{secret.KubeconfigDataName: generateTestKubeConfig()}, + } + + err := k8sClient.Get(ctx, types.NamespacedName{Name: secretName, Namespace: unmanagedClusterNamespace}, secret) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + err = k8sClient.Get(ctx, typeNamespacedName, unmanagedcluster) + if err != nil && errors.IsNotFound(err) { + resource := &hmc.UnmanagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + Spec: hmc.UnmanagedClusterSpec{ + Name: unmanagedClusterName, + Services: nil, + ServicesPriority: 1, + StopOnConflict: true, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &hmc.UnmanagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance UnmanagedCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + Expect(k8sClient.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + }})).To(Succeed()) + + Expect(k8sClient.Delete(ctx, + &capi.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + })).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &UnmanagedClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) + +func generateTestKubeConfig() []byte { + GinkgoHelper() + clusters := make(map[string]*api.Cluster) + clusters["default-cluster"] = &api.Cluster{ + Server: cfg.Host, + CertificateAuthorityData: cfg.CAData, + } + contexts := make(map[string]*api.Context) + contexts["default-context"] = &api.Context{ + Cluster: "default-cluster", + AuthInfo: "default-user", + } + authinfos := make(map[string]*api.AuthInfo) + authinfos["default-user"] = &api.AuthInfo{ + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + } + clientConfig := api.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: clusters, + Contexts: contexts, + CurrentContext: "default-context", + AuthInfos: authinfos, + } + + kubecfg, err := clientcmd.Write(clientConfig) + Expect(err).NotTo(HaveOccurred()) + return kubecfg +} diff --git a/internal/controller/unmanagedmachine_controller.go b/internal/controller/unmanagedmachine_controller.go new file mode 100644 index 000000000..988b046fa --- /dev/null +++ b/internal/controller/unmanagedmachine_controller.go @@ -0,0 +1,117 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "fmt" + + "github.com/projectsveltos/libsveltos/lib/clusterproxy" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +// UnmanagedMachineReconciler reconciles a UnmanagedMachine object +type UnmanagedMachineReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/finalizers,verbs=update + +func (r *UnmanagedMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + + unmanagedMachine := new(hmc.UnmanagedMachine) + if err := r.Get(ctx, req.NamespacedName, unmanagedMachine); err != nil { + if apierrors.IsNotFound(err) { + l.Info("UnmanagedMachine not found, ignoring since object must be deleted") + return ctrl.Result{}, nil + } + l.Error(err, "Failed to get UnmanagedMachine") + return ctrl.Result{}, err + } + + requeue, err := r.reconcileStatus(ctx, unmanagedMachine) + if err != nil { + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + } + + if requeue { + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil + } + return ctrl.Result{}, nil +} + +func (r *UnmanagedMachineReconciler) reconcileStatus(ctx context.Context, unmanagedMachine *hmc.UnmanagedMachine) (bool, error) { + requeue := false + + l := ctrl.LoggerFrom(ctx) + clusterClient, err := clusterproxy.GetCAPIKubernetesClient(ctx, l, r.Client, r.Client.Scheme(), unmanagedMachine.Namespace, unmanagedMachine.Spec.ClusterName) + if err != nil { + return true, fmt.Errorf("failed to connect to remote cluster: %w", err) + } + + node := &corev1.Node{} + if err := clusterClient.Get(ctx, types.NamespacedName{Name: unmanagedMachine.Name, Namespace: ""}, node); err != nil { + return true, fmt.Errorf("failed to get node :%w", err) + } + + for _, nodeCondition := range node.Status.Conditions { + if nodeCondition.Type == corev1.NodeReady { + unmanagedMachine.Status.Ready = true + machineCondition := metav1.Condition{ + Type: hmc.NodeCondition, + Status: "True", + Reason: hmc.SucceededReason, + } + + if nodeCondition.Status != corev1.ConditionTrue { + requeue = true + machineCondition.Reason = hmc.FailedReason + machineCondition.Status = "False" + unmanagedMachine.Status.Ready = false + } + apimeta.SetStatusCondition(unmanagedMachine.GetConditions(), machineCondition) + } + } + + if err := r.Status().Update(ctx, unmanagedMachine); err != nil { + return true, fmt.Errorf("failed to update unmanaged machine status: %w", err) + } + + return requeue, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *UnmanagedMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&hmc.UnmanagedMachine{}). + Complete(r) +} diff --git a/internal/controller/unmanagedmachine_controller_test.go b/internal/controller/unmanagedmachine_controller_test.go new file mode 100644 index 000000000..f8d0b2b92 --- /dev/null +++ b/internal/controller/unmanagedmachine_controller_test.go @@ -0,0 +1,155 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + + "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + capi "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +var _ = Describe("UnmanagedMachine Controller", func() { + Context("When reconciling a resource", func() { + const ( + unmanagedClusterName = "test-managed-cluster" + unmanagedClusterNamespace = "default" + unmanagedMachineName = "test-machine" + ) + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: unmanagedMachineName, + Namespace: unmanagedClusterNamespace, + } + unmanagedmachine := &hmc.UnmanagedMachine{} + + BeforeEach(func() { + By("creating the custom resource for the Kind UnmanagedCluster") + Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) + Expect(capi.AddToScheme(k8sClient.Scheme())).To(Succeed()) + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + Labels: map[string]string{capi.ClusterNameLabel: unmanagedClusterName}, + }, + Data: map[string][]byte{secret.KubeconfigDataName: generateTestKubeConfig()}, + } + + err := k8sClient.Get(ctx, types.NamespacedName{Name: secretName, Namespace: unmanagedClusterNamespace}, secret) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + } + + cluster := &capi.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: capi.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + } + err = k8sClient.Get(ctx, typeNamespacedName, cluster) + if err != nil && errors.IsNotFound(err) { + Expect(k8sClient.Create(ctx, cluster)).To(Succeed()) + } + + By("creating the custom resource for the Kind UnmanagedMachine") + Expect(k8sClient.Create(ctx, &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachineName, + }, + })).To(Succeed()) + + err = k8sClient.Get(ctx, typeNamespacedName, unmanagedmachine) + if err != nil && errors.IsNotFound(err) { + resource := &hmc.UnmanagedMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachineName, + Namespace: "default", + }, + Spec: hmc.UnmanagedMachineSpec{ + ProviderID: unmanagedMachineName, + ClusterName: unmanagedClusterName, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &hmc.UnmanagedMachine{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance UnmanagedMachine") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + + secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) + Expect(k8sClient.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: unmanagedClusterNamespace, + }})).To(Succeed()) + + Expect(k8sClient.Delete(ctx, + &capi.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedClusterName, + Namespace: unmanagedClusterNamespace, + }, + })).To(Succeed()) + + Expect(k8sClient.Delete(ctx, &corev1.Node{ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachineName, + Namespace: unmanagedClusterNamespace, + }})).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &UnmanagedMachineReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/internal/webhook/unmanagedcluster_webhook.go b/internal/webhook/unmanagedcluster_webhook.go new file mode 100644 index 000000000..f3e67f734 --- /dev/null +++ b/internal/webhook/unmanagedcluster_webhook.go @@ -0,0 +1,105 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhook + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + hmcv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" +) + +type UnmanagedClusterValidator struct { + client.Client +} + +func (v *UnmanagedClusterValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { + v.Client = mgr.GetClient() + return ctrl.NewWebhookManagedBy(mgr). + For(&hmcv1alpha1.UnmanagedCluster{}). + WithValidator(v). + WithDefaulter(v). + Complete() +} + +var ( + _ webhook.CustomValidator = &UnmanagedClusterValidator{} + _ webhook.CustomDefaulter = &UnmanagedClusterValidator{} +) + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (v *UnmanagedClusterValidator) ValidateCreate(ctx context.Context, newObj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, newObj) +} + +func (v *UnmanagedClusterValidator) validate(ctx context.Context, newObj runtime.Object) (admission.Warnings, error) { + unmanagedCluster, ok := newObj.(*hmcv1alpha1.UnmanagedCluster) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected UnmanagedCluster but got a %T", newObj)) + } + + if !unmanagedCluster.DeletionTimestamp.IsZero() { + return nil, nil + } + + kubecfgSeccret := &corev1.Secret{} + if err := v.Client.Get(ctx, types.NamespacedName{ + Namespace: unmanagedCluster.Namespace, + Name: secret.Name(unmanagedCluster.Name, secret.Kubeconfig), + }, kubecfgSeccret); err != nil && !apierrors.IsNotFound(err) { + return nil, apierrors.NewInternalError(err) + } else if apierrors.IsNotFound(err) { + return nil, apierrors.NewBadRequest(fmt.Sprintf("required secret with name: %s not found in namespace: %s", + secret.Name(unmanagedCluster.Name, secret.Kubeconfig), unmanagedCluster.Namespace)) + } + + if _, ok := kubecfgSeccret.Data[secret.KubeconfigDataName]; !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("required secret with name: %s does not have a data item "+ + "with key %s", kubecfgSeccret.Name, secret.KubeconfigDataName)) + } + + if cluserNameLabel, ok := kubecfgSeccret.Labels[v1beta1.ClusterNameLabel]; !ok || cluserNameLabel != unmanagedCluster.Name { + return nil, apierrors.NewBadRequest(fmt.Sprintf("required secret with name: %s does not have a %s label set to: %s", + secret.Name(unmanagedCluster.Name, secret.Kubeconfig), v1beta1.ClusterNameLabel, unmanagedCluster.Name)) + } + + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (v *UnmanagedClusterValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, newObj) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (*UnmanagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (*UnmanagedClusterValidator) Default(_ context.Context, _ runtime.Object) error { + return nil +} diff --git a/internal/webhook/unmanagedcluster_webhook_test.go b/internal/webhook/unmanagedcluster_webhook_test.go new file mode 100644 index 000000000..56b132bd0 --- /dev/null +++ b/internal/webhook/unmanagedcluster_webhook_test.go @@ -0,0 +1,103 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhook + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/Mirantis/hmc/api/v1alpha1" + uc "github.com/Mirantis/hmc/test/objects/unmanagedcluster" + "github.com/Mirantis/hmc/test/scheme" +) + +func TestUnmanagedClusterValidateCreate(t *testing.T) { + const ( + testNamespace = "test-namespace" + testClusterName = "test" + ) + g := NewWithT(t) + + ctx := context.Background() + + kubecfg := "apiVersion: v1\nclusters:\n- cluster:\n certificate-authority-data: \n\tserver: https://nowhere.xyz\n" + + " name: test\ncontexts:\n- context:\n cluster: test\n user: test-admin\n name: test-admin@test\n" + + "current-context: test-admin@test\nkind: Config\npreferences: {}\nusers:\n- name: test-admin\n user:\n " + + "client-certificate-data: \n\tclient-key-data: " + + secretName := secret.Name(testClusterName, secret.Kubeconfig) + kubeSecret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, Namespace: testNamespace, + Labels: map[string]string{v1beta1.ClusterNameLabel: testClusterName}, + }, + Data: map[string][]byte{secret.KubeconfigDataName: []byte(kubecfg)}, + } + + tests := []struct { + name string + tm *v1alpha1.UnmanagedCluster + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "should fail if the required secret does not exist", + tm: uc.NewUnmanagedCluster(uc.WithNameAndNamespace(testClusterName, testNamespace)), + existingObjects: nil, + err: fmt.Sprintf("required secret with name: %s not found in namespace: %s", secretName, testNamespace), + }, + { + name: "should succeed", + tm: uc.NewUnmanagedCluster(uc.WithNameAndNamespace(testClusterName, testNamespace)), + existingObjects: []runtime.Object{kubeSecret}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithRuntimeObjects(tt.existingObjects...). + Build() + validator := &UnmanagedClusterValidator{Client: c} + warn, err := validator.ValidateCreate(ctx, tt.tm) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + if err.Error() != tt.err { + t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) + } + } else { + g.Expect(err).To(Succeed()) + } + if len(tt.warnings) > 0 { + g.Expect(warn).To(Equal(tt.warnings)) + } else { + g.Expect(warn).To(BeEmpty()) + } + }) + } +} diff --git a/templates/provider/hmc/templates/_helpers.tpl b/templates/provider/hmc/templates/_helpers.tpl index 2d9e15365..58a849572 100644 --- a/templates/provider/hmc/templates/_helpers.tpl +++ b/templates/provider/hmc/templates/_helpers.tpl @@ -96,6 +96,7 @@ hmc-webhook {{- define "rbac.editorVerbs" -}} - create - delete +- deletecollection - get - list - patch diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml new file mode 100644 index 000000000..4c8459ad8 --- /dev/null +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml @@ -0,0 +1,168 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + labels: + cluster.x-k8s.io/v1beta1: v1alpha1 + name: unmanagedclusters.hmc.mirantis.com +spec: + group: hmc.mirantis.com + names: + kind: UnmanagedCluster + listKind: UnmanagedClusterList + plural: unmanagedclusters + singular: unmanagedcluster + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: UnmanagedCluster is the Schema for the unmanagedclusters API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UnmanagedClusterSpec defines the desired state of UnmanagedCluster + properties: + name: + type: string + services: + description: |- + Services is a list of services created via ServiceTemplates + that could be installed on the target cluster. + items: + description: ServiceSpec represents a Service to be managed + properties: + disable: + description: Disable can be set to disable handling of this + service. + type: boolean + name: + description: Name is the chart release. + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace the release will be installed in. + It will default to Name if not provided. + type: string + template: + description: Template is a reference to a Template object located + in the same namespace. + minLength: 1 + type: string + values: + description: Values is the helm values to be passed to the template. + x-kubernetes-preserve-unknown-fields: true + required: + - name + - template + type: object + type: array + servicesPriority: + description: |- + ServicesPriority sets the priority for the services defined in this spec. + Higher value means higher priority and lower means lower. + In case of conflict with another object managing the service, + the one with higher priority will get to deploy its services. + format: int32 + type: integer + stopOnConflict: + default: false + description: |- + StopOnConflict specifies what to do in case of a conflict. + E.g. If another object is already managing a service. + By default the remaining services will be deployed even if conflict is detected. + If set to true, the deployment will stop after encountering the first conflict. + type: boolean + type: object + status: + description: UnmanagedClusterStatus defines the observed state of UnmanagedCluster + properties: + conditions: + description: Conditions contains details for the current state of + the ManagedCluster. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + ready: + description: Flag indicating whether the unmanaged cluster is in the + ready state or not + type: boolean + required: + - ready + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml new file mode 100644 index 000000000..0482e8710 --- /dev/null +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml @@ -0,0 +1,127 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + labels: + cluster.x-k8s.io/v1beta1: v1alpha1 + name: unmanagedmachines.hmc.mirantis.com +spec: + group: hmc.mirantis.com + names: + kind: UnmanagedMachine + listKind: UnmanagedMachineList + plural: unmanagedmachines + singular: unmanagedmachine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Machine ready status + jsonPath: .status.ready + name: Ready + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: UnmanagedMachine is the Schema for the unmanagedmachines API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: UnmanagedMachineSpec defines the desired state of UnmanagedMachine + properties: + clusterName: + type: string + providerID: + description: |- + INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + type: string + type: object + status: + description: UnmanagedMachineStatus defines the observed state of UnmanagedMachine + properties: + conditions: + description: Conditions contains details for the current state of + the ManagedCluster + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + ready: + description: Flag indicating whether the machine is in the ready state + or not + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml b/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml index d510c533e..a35d934fa 100644 --- a/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml +++ b/templates/provider/hmc/templates/rbac/controller/rolebindings.yaml @@ -28,3 +28,20 @@ subjects: - kind: ServiceAccount name: '{{ include "hmc.fullname" . }}-controller-manager' namespace: '{{ .Release.Namespace }}' + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "hmc.fullname" . }}-capi-manager-rolebinding + namespace: {{ .Release.Namespace }} + labels: + {{- include "hmc.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "hmc.fullname" . }}-manager-role' +subjects: + - kind: ServiceAccount + name: 'capi-manager' + namespace: '{{ .Release.Namespace }}' diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 69206ee6b..b805d1bf6 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -209,6 +209,26 @@ rules: - "" resources: - secrets + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + - unmanagedclusters/status + - unmanagedmachines + - unmanagedmachines/status + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - machines + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} + - delete +- apiGroups: + - config.projectsveltos.io + resources: + - clustersummaries verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -226,3 +246,20 @@ rules: verbs: - get - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-capi-role + labels: + {{- include "hmc.labels" . | nindent 4 }} + cluster.x-k8s.io/aggregate-to-manager: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + - unmanagedclusters/status + - unmanagedmachines + - unmanagedmachines/status + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} \ No newline at end of file diff --git a/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml new file mode 100644 index 000000000..014e76e9f --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-editor.yaml @@ -0,0 +1,13 @@ +# permissions for end users to edit unmanagedclusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" + name: {{ include "hmc.fullname" . }}-unmanagedcluster-editor-role +rules: +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml new file mode 100644 index 000000000..2196efc69 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/unmanagedcluster-viewer.yaml @@ -0,0 +1,13 @@ +# permissions for end users to view unmanagedclusters. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" + name: unmanagedcluster-viewer-role +rules: +- apiGroups: + - hmc.mirantis.com + resources: + - unmanagedclusters + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} \ No newline at end of file diff --git a/templates/provider/hmc/templates/webhooks.yaml b/templates/provider/hmc/templates/webhooks.yaml index b0e41200e..04674dfff 100644 --- a/templates/provider/hmc/templates/webhooks.yaml +++ b/templates/provider/hmc/templates/webhooks.yaml @@ -233,4 +233,26 @@ webhooks: resources: - servicetemplatechains sideEffects: None + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: {{ include "hmc.webhook.serviceName" . }} + namespace: {{ include "hmc.webhook.serviceNamespace" . }} + path: /validate-hmc-mirantis-com-v1alpha1-unmanagedcluster + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.unmanagedcluster.hmc.mirantis.com + rules: + - apiGroups: + - hmc.mirantis.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - unmanagedclusters + sideEffects: None {{- end }} diff --git a/test/objects/unmanagedcluster/unmanagedcluster.go b/test/objects/unmanagedcluster/unmanagedcluster.go new file mode 100644 index 000000000..c796e1771 --- /dev/null +++ b/test/objects/unmanagedcluster/unmanagedcluster.go @@ -0,0 +1,41 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unmanagedcluster + +import "github.com/Mirantis/hmc/api/v1alpha1" + +type Opt func(unmanagedCluster *v1alpha1.UnmanagedCluster) + +const ( + DefaultName = "hmc-uc" +) + +func NewUnmanagedCluster(opts ...Opt) *v1alpha1.UnmanagedCluster { + uc := &v1alpha1.UnmanagedCluster{ + Spec: v1alpha1.UnmanagedClusterSpec{Name: DefaultName}, + } + + for _, opt := range opts { + opt(uc) + } + return uc +} + +func WithNameAndNamespace(name, namespace string) Opt { + return func(uc *v1alpha1.UnmanagedCluster) { + uc.Name = name + uc.Namespace = namespace + } +}