diff --git a/test/utils/deployment.go b/test/deployment/deployment.go similarity index 61% rename from test/utils/deployment.go rename to test/deployment/deployment.go index 1f088bde8..5faedb169 100644 --- a/test/utils/deployment.go +++ b/test/deployment/deployment.go @@ -12,17 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -package utils +package deployment import ( + _ "embed" "encoding/json" "fmt" "os" "os/exec" + "github.com/Mirantis/hmc/test/utils" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" "gopkg.in/yaml.v3" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) type ProviderType string @@ -36,39 +40,32 @@ type Template string const ( TemplateAWSStandaloneCP Template = "aws-standalone-cp" TemplateAWSHostedCP Template = "aws-hosted-cp" - - deploymentConfigFile = "./config/dev/deployment.yaml" ) -// ConfigureDeploymentConfig modifies the config/dev/deployment.yaml for -// use in test and returns the generated cluster name. -func ConfigureDeploymentConfig(provider ProviderType, templateName Template) (string, error) { - generatedName := uuid.NewString()[:8] + "-e2e-test" +//go:embed resources/deployment.yaml.tpl +var deploymentConfigBytes []byte - deploymentConfigBytes, err := os.ReadFile(deploymentConfigFile) - if err != nil { - return "", fmt.Errorf("failed to read deployment config: %w", err) - } +// GetUnstructuredDeployment returns an unstructured deployment object based on +// the provider and template. +func GetUnstructuredDeployment(provider ProviderType, templateName Template) *unstructured.Unstructured { + GinkgoHelper() + + generatedName := uuid.New().String()[:8] + "-e2e-test" + _, _ = fmt.Fprintf(GinkgoWriter, "Generated AWS cluster name: %q\n", generatedName) var deploymentConfig map[string]interface{} - err = yaml.Unmarshal(deploymentConfigBytes, &deploymentConfig) - if err != nil { - return "", fmt.Errorf("failed to unmarshal deployment config: %w", err) - } + err := yaml.Unmarshal(deploymentConfigBytes, &deploymentConfig) + Expect(err).NotTo(HaveOccurred(), "failed to unmarshal deployment config") switch provider { case ProviderAWS: // XXX: Maybe we should just use automatic AMI selection here. - amiID, err := getAWSAMI() - if err != nil { - return "", fmt.Errorf("failed to get AWS AMI: %w", err) - } - + amiID := getAWSAMI() awsRegion := os.Getenv("AWS_REGION") - // Modify the existing ./config/dev/deployment.yaml file to use the - // AMI we just found and our AWS_REGION. + // Modify the deployment config to use the generated name and the AMI. + // TODO: This should be modified to use go templating. if metadata, ok := deploymentConfig["metadata"].(map[string]interface{}); ok { metadata["name"] = generatedName } else { @@ -92,34 +89,28 @@ func ConfigureDeploymentConfig(provider ProviderType, templateName Template) (st } } - deploymentConfigBytes, err = yaml.Marshal(deploymentConfig) - if err != nil { - return "", fmt.Errorf("failed to marshal deployment config: %w", err) - } - - _, _ = fmt.Fprintf(GinkgoWriter, "Generated AWS cluster name: %q\n", generatedName) - - return generatedName, os.WriteFile(deploymentConfigFile, deploymentConfigBytes, 0644) + return &unstructured.Unstructured{Object: deploymentConfig} default: - return "", fmt.Errorf("unsupported provider: %s", provider) + Fail(fmt.Sprintf("unsupported provider: %s", provider)) } + + return nil } // getAWSAMI returns an AWS AMI ID to use for test. -func getAWSAMI() (string, error) { +func getAWSAMI() string { + GinkgoHelper() + // For now we'll just use the latest Kubernetes version for ubuntu 20.04, // but we could potentially pin the Kube version and specify that here. cmd := exec.Command("./bin/clusterawsadm", "ami", "list", "--os=ubuntu-20.04", "-o", "json") - output, err := Run(cmd) - if err != nil { - return "", fmt.Errorf("failed to list AMIs: %w", err) - } + output, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "failed to list AMIs") var amiList map[string]interface{} - if err := json.Unmarshal(output, &amiList); err != nil { - return "", fmt.Errorf("failed to unmarshal AMI list: %w", err) - } + err = json.Unmarshal(output, &amiList) + Expect(err).NotTo(HaveOccurred(), "failed to unmarshal AMI list") // ami list returns a sorted list of AMIs by kube version, just get the // first one. @@ -131,9 +122,11 @@ func getAWSAMI() (string, error) { continue } - return ami, nil + return ami } } - return "", fmt.Errorf("no AMIs found") + Fail("no AMIs found") + + return "" } diff --git a/test/deployment/resources/deployment.yaml.tpl b/test/deployment/resources/deployment.yaml.tpl new file mode 100644 index 000000000..372003b58 --- /dev/null +++ b/test/deployment/resources/deployment.yaml.tpl @@ -0,0 +1,17 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: Deployment +metadata: + name: ${DEPLOYMENT_NAME} +spec: + config: + controlPlane: + amiID: ${AMI_ID} + instanceType: ${INSTANCE_TYPE} + controlPlaneNumber: ${CONTROL_PLANE_NUMBER} + publicIP: ${PUBLIC_IP} + region: ${AWS_REGION} + worker: + amiID: ${AMI_ID} + instanceType: ${INSTANCE_TYPE} + workersNumber: ${WORKERS_NUMBER} + template: ${TEMPLATE_NAME} diff --git a/test/deployment/validate_deleted.go b/test/deployment/validate_deleted.go new file mode 100644 index 000000000..6be7b5969 --- /dev/null +++ b/test/deployment/validate_deleted.go @@ -0,0 +1,116 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package deployment + +import ( + "context" + "fmt" + + "github.com/Mirantis/hmc/test/kubeclient" + . "github.com/onsi/ginkgo/v2" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +var deletionValidators = map[string]resourceValidationFunc{ + "clusters": validateClusterDeleted, + "machinedeployments": validateMachineDeploymentsDeleted, + "control-planes": validateK0sControlPlanesDeleted, +} + +// VerifyProviderDeleted is a provider-agnostic verification that checks +// to ensure generic resources managed by the provider have been deleted. +// It is intended to be used in conjunction with an Eventually block. +func VerifyProviderDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + // Sequentially validate each resource type, only returning the first error + // as to not move on to the next resource type until the first is resolved. + // We use []string here since order is important. + for _, name := range []string{"control-planes", "machinedeployments", "clusters"} { + validator, ok := resourceValidators[name] + if !ok { + continue + } + + if err := validator(ctx, kc, clusterName); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "[%s] validation error: %v\n", name, err) + return err + } + + _, _ = fmt.Fprintf(GinkgoWriter, "[%s] validation succeeded\n", name) + delete(resourceValidators, name) + } + + return nil +} + +// validateClusterDeleted validates that the Cluster resource has been deleted. +func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + // Validate that the Cluster resource has been deleted + cluster, err := kc.GetCluster(ctx, clusterName) + if err != nil { + return err + } + + var inPhase string + + if cluster != nil { + phase, _, _ := unstructured.NestedString(cluster.Object, "status", "phase") + if phase != "" { + inPhase = ", in phase: " + phase + } + + return fmt.Errorf("cluster %q still exists%s", clusterName, inPhase) + } + + return nil +} + +// validateMachineDeploymentsDeleted validates that all MachineDeployments have +// been deleted. +func validateMachineDeploymentsDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + machineDeployments, err := kc.ListMachineDeployments(ctx, clusterName) + if err != nil { + return err + } + + var mdNames []string + if len(machineDeployments) > 0 { + for _, md := range machineDeployments { + mdNames = append(mdNames, md.GetName()) + + return fmt.Errorf("machine deployments still exist: %s", mdNames) + } + } + + return nil +} + +// validateK0sControlPlanesDeleted validates that all k0scontrolplanes have +// been deleted. +func validateK0sControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + controlPlanes, err := kc.ListK0sControlPlanes(ctx, clusterName) + if err != nil { + return err + } + + var cpNames []string + if len(controlPlanes) > 0 { + for _, cp := range controlPlanes { + cpNames = append(cpNames, cp.GetName()) + + return fmt.Errorf("k0s control planes still exist: %s", cpNames) + } + } + + return nil +} diff --git a/test/e2e/validate_deployment.go b/test/deployment/validate_deployed.go similarity index 71% rename from test/e2e/validate_deployment.go rename to test/deployment/validate_deployed.go index 5d19f69b7..8aebc458c 100644 --- a/test/e2e/validate_deployment.go +++ b/test/deployment/validate_deployed.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package e2e +package deployment import ( "context" "fmt" + "strings" "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/utils" @@ -26,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -35,22 +35,24 @@ import ( type resourceValidationFunc func(context.Context, *kubeclient.KubeClient, string) error var resourceValidators = map[string]resourceValidationFunc{ - "clusters": validateClusters, + "clusters": validateCluster, "machines": validateMachines, "control-planes": validateK0sControlPlanes, "csi-driver": validateCSIDriver, "ccm": validateCCM, } -// verifyProviderDeployed is a provider-agnostic verification that checks for +// VerifyProviderDeployed is a provider-agnostic verification that checks for // the presence of specific resources in the cluster using -// resourceValidationFuncs and clusterValidationFuncs. It is meant to be used -// in conjunction with an Eventually block. In some cases it may be necessary -// to end the Eventually block early if the resource will never reach a ready -// state, in these instances Ginkgo's Fail function should be used. -func verifyProviderDeployed(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { +// resourceValidationFuncs. It is meant to be used in conjunction with an +// Eventually block. +// In some cases it may be necessary to end the Eventually block early if the +// resource will never reach a ready state, in these instances Ginkgo's Fail +// should be used to end the spec early. +func VerifyProviderDeployed(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { // Sequentially validate each resource type, only returning the first error // as to not move on to the next resource type until the first is resolved. + // We use []string here since order is important. for _, name := range []string{"clusters", "machines", "control-planes", "csi-driver", "ccm"} { validator, ok := resourceValidators[name] if !ok { @@ -63,30 +65,16 @@ func verifyProviderDeployed(ctx context.Context, kc *kubeclient.KubeClient, clus } _, _ = fmt.Fprintf(GinkgoWriter, "[%s] validation succeeded\n", name) - // XXX: Once we validate for the first time should we move the - // validation out and consider it "done"? Or is there a possibility - // that the resources could enter a non-ready state later? delete(resourceValidators, name) } return nil } -func validateClusters(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - gvr := schema.GroupVersionResource{ - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Resource: "clusters", - } - - client, err := kc.GetDynamicClient(gvr) - if err != nil { - Fail(fmt.Sprintf("failed to get %s client: %v", gvr.Resource, err)) - } - - cluster, err := client.Get(ctx, clusterName, metav1.GetOptions{}) +func validateCluster(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + cluster, err := kc.GetCluster(ctx, clusterName) if err != nil { - return fmt.Errorf("failed to get %s %s: %v", gvr.Resource, clusterName, err) + return err } phase, _, err := unstructured.NestedString(cluster.Object, "status", "phase") @@ -110,25 +98,16 @@ func validateClusters(ctx context.Context, kc *kubeclient.KubeClient, clusterNam } func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - gvr := schema.GroupVersionResource{ - Group: "cluster.x-k8s.io", - Version: "v1beta1", - Resource: "machines", - } - - client, err := kc.GetDynamicClient(gvr) + machines, err := kc.ListMachines(ctx, clusterName) if err != nil { - Fail(fmt.Sprintf("failed to get %s client: %v", gvr.Resource, err)) + return fmt.Errorf("failed to list machines: %w", err) } - machines, err := client.List(ctx, metav1.ListOptions{ - LabelSelector: "cluster.x-k8s.io/cluster-name=" + clusterName, - }) if err != nil { - return fmt.Errorf("failed to list %s: %v", gvr.Resource, err) + return fmt.Errorf("failed to list Machines: %w", err) } - for _, machine := range machines.Items { + for _, machine := range machines { if err := utils.ValidateObjectNamePrefix(&machine, clusterName); err != nil { Fail(err.Error()) } @@ -142,23 +121,12 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam } func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - k0sControlPlaneClient, err := kc.GetDynamicClient(schema.GroupVersionResource{ - Group: "controlplane.cluster.x-k8s.io", - Version: "v1beta1", - Resource: "k0scontrolplanes", - }) - if err != nil { - return fmt.Errorf("failed to get K0sControlPlane client: %w", err) - } - - controlPlanes, err := k0sControlPlaneClient.List(ctx, metav1.ListOptions{ - LabelSelector: "cluster.x-k8s.io/cluster-name=" + clusterName, - }) + controlPlanes, err := kc.ListK0sControlPlanes(ctx, clusterName) if err != nil { return fmt.Errorf("failed to list K0sControlPlanes: %w", err) } - for _, controlPlane := range controlPlanes.Items { + for _, controlPlane := range controlPlanes { if err := utils.ValidateObjectNamePrefix(&controlPlane, clusterName); err != nil { Fail(err.Error()) } @@ -181,30 +149,18 @@ func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, cl return fmt.Errorf("expected K0sControlPlane condition to be type map[string]interface{}, got: %T", status) } + if _, ok := st["ready"]; !ok { + return fmt.Errorf("%s %s has no 'ready' status", objKind, objName) + } + if !st["ready"].(bool) { - return fmt.Errorf("K0sControlPlane %s is not ready, status: %+v", controlPlane.GetName(), status) + return fmt.Errorf("%s %s is not ready, status: %+v", objKind, objName, st) } } return nil } -// apiVersion: v1 -// kind: Pod -// metadata: -// name: test-pvc-pod -// spec: -// volumes: -// - name: test-pvc-vol -// persistentVolumeClaim: -// claimName: pvcName -// containers: -// - name: test-pvc-container -// image: nginx -// volumeMounts: -// - mountPath: "/storage" -// name: task-pv-storage - // validateCSIDriver validates that the provider CSI driver is functioning // by creating a PVC and verifying it enters "Bound" status. func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { @@ -235,22 +191,64 @@ func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterNa // Since these resourceValidationFuncs are intended to be used in // Eventually we should ensure a follow-up PVCreate is a no-op. if !apierrors.IsAlreadyExists(err) { + // XXX: Maybe we should Fail here? return fmt.Errorf("failed to create test PVC: %w", err) } } - // Verify the PVC enters "Bound" status. + // Create a pod that uses the PVC so that the PVC enters "Bound" status. + _, err = clusterKC.Client.CoreV1().Pods(clusterKC.Namespace).Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName + "-pod", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "test-pvc-vol", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "test-pvc-container", + Image: "nginx", + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/storage", + Name: "test-pvc-vol", + }, + }, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create test Pod: %w", err) + } + } + + // Verify the PVC enters "Bound" status and inherits the CSI driver + // storageClass without us having to specify it. pvc, err := clusterKC.Client.CoreV1().PersistentVolumeClaims(clusterKC.Namespace). Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get test PVC: %w", err) } - if pvc.Status.Phase == corev1.ClaimBound { - return nil + if !strings.Contains(*pvc.Spec.StorageClassName, "csi") { + Fail(fmt.Sprintf("%s PersistentVolumeClaim does not have a CSI driver storageClass", pvcName)) + } + + if pvc.Status.Phase != corev1.ClaimBound { + return fmt.Errorf("%s PersistentVolume not yet 'Bound', current phase: %q", pvcName, pvc.Status.Phase) } - return fmt.Errorf("%s PersistentVolume not yet 'Bound', current phase: %q", pvcName, pvc.Status.Phase) + return nil } // validateCCM validates that the provider's cloud controller manager is diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index ffbde6a09..9645b1e72 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,6 +17,7 @@ package e2e import ( "context" "fmt" + "os" "os/exec" "strings" "time" @@ -25,6 +26,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/Mirantis/hmc/test/deployment" "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/utils" ) @@ -32,12 +34,12 @@ import ( const namespace = "hmc-system" var _ = Describe("controller", Ordered, func() { - // BeforeAll(func() { - // By("building and deploying the controller-manager") - // cmd := exec.Command("make", "test-apply") - // _, err := utils.Run(cmd) - // Expect(err).NotTo(HaveOccurred()) - // }) + BeforeAll(func() { + By("building and deploying the controller-manager") + cmd := exec.Command("make", "test-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + }) // AfterAll(func() { // By("removing the controller-manager") @@ -96,8 +98,10 @@ var _ = Describe("controller", Ordered, func() { Context("AWS Templates", func() { var ( - kc *kubeclient.KubeClient - err error + kc *kubeclient.KubeClient + deleteDeploymentFunc func() error + clusterName string + err error ) BeforeAll(func() { @@ -108,30 +112,44 @@ var _ = Describe("controller", Ordered, func() { }) AfterAll(func() { - // // Purge the AWS resources, the AfterAll for the controller will - // // clean up the management cluster. - // cmd := exec.Command("make", "dev-aws-nuke") - // _, err := utils.Run(cmd) - // ExpectWithOffset(2, err).NotTo(HaveOccurred()) + // Delete the deployment if it was created. + if deleteDeploymentFunc != nil { + err = deleteDeploymentFunc() + Expect(err).NotTo(HaveOccurred()) + } + + // Purge the AWS resources, the AfterAll for the controller will + // clean up the management cluster. + err = os.Setenv("CLUSTER_NAME", clusterName) + Expect(err).NotTo(HaveOccurred()) + cmd := exec.Command("make", "dev-aws-nuke") + _, err := utils.Run(cmd) + ExpectWithOffset(2, err).NotTo(HaveOccurred()) }) It("should work with an AWS provider", func() { - By("using the aws-standalone-cp template") - //clusterName, err := utils.ConfigureDeploymentConfig(utils.ProviderAWS, utils.TemplateAWSStandaloneCP) - //ExpectWithOffset(1, err).NotTo(HaveOccurred()) + By("creating a Deployment with aws-standalone-cp template") + d := deployment.GetUnstructuredDeployment(deployment.ProviderAWS, deployment.TemplateAWSStandaloneCP) + clusterName = d.GetName() - clusterName := "bba1743d-e2e-test" + deleteDeploymentFunc, err = kc.CreateDeployment(context.Background(), d) + Expect(err).NotTo(HaveOccurred()) - cmd := exec.Command("make", "dev-aws-apply") - _, err = utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for resource validation to succeed\n") + By("waiting for infrastructure providers to deploy successfully") Eventually(func() error { - return verifyProviderDeployed(context.Background(), kc, clusterName) - }).WithTimeout(30 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) - By("using the aws-hosted-cp template") - // TODO: Use the standalone control plane resources to craft a hosted - // control plane and test it. + return deployment.VerifyProviderDeployed(context.Background(), kc, clusterName) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verifying the deployment deletes successfully") + err = deleteDeploymentFunc() + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return deployment.VerifyProviderDeleted(context.Background(), kc, clusterName) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("creating a Deployment with aws-hosted-cp template") + // TODO: Use the standalone control plane resources to craft a + // hosted control plane and test it. }) }) }) diff --git a/test/kubeclient/kubeclient.go b/test/kubeclient/kubeclient.go index eb922777f..d2521291d 100644 --- a/test/kubeclient/kubeclient.go +++ b/test/kubeclient/kubeclient.go @@ -22,10 +22,12 @@ import ( "path/filepath" "github.com/Mirantis/hmc/test/utils" + . "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -162,3 +164,106 @@ func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource) (dynamic return client.Resource(gvr).Namespace(kc.Namespace), nil } + +// CreateDeployment creates a deployment.hmc.mirantis.com in the given +// namespace and returns a DeleteFunc to clean up the deployment. +// The DeleteFunc is a no-op if the deployment has already been deleted. +func (kc *KubeClient) CreateDeployment( + ctx context.Context, deployment *unstructured.Unstructured) (func() error, error) { + kind := deployment.GetKind() + + if kind != "Deployment" { + return nil, fmt.Errorf("expected kind Deployment, got: %s", kind) + } + + client, err := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: "hmc.mirantis.com", + Version: "v1alpha1", + Resource: "deployments", + }) + if err != nil { + return nil, fmt.Errorf("failed to get dynamic client: %w", err) + } + + _, err = client.Create(ctx, deployment, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create Deployment: %w", err) + } + + return func() error { + err := client.Delete(ctx, deployment.GetName(), metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + return err + }, nil +} + +// GetCluster returns a Cluster resource by name. +func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unstructured.Unstructured, error) { + gvr := schema.GroupVersionResource{ + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Resource: "clusters", + } + + client, err := kc.GetDynamicClient(gvr) + if err != nil { + Fail(fmt.Sprintf("failed to get %s client: %v", gvr.Resource, err)) + } + + cluster, err := client.Get(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get %s %s: %w", gvr.Resource, clusterName, err) + } + + return cluster, nil +} + +// listResource returns a list of resources for the given GroupVersionResource +// affiliated with the given clusterName. +func (kc *KubeClient) listResource( + ctx context.Context, gvr schema.GroupVersionResource, clusterName string) ([]unstructured.Unstructured, error) { + client, err := kc.GetDynamicClient(gvr) + if err != nil { + Fail(fmt.Sprintf("failed to get %s client: %v", gvr.Resource, err)) + } + + resources, err := client.List(ctx, metav1.ListOptions{ + LabelSelector: "cluster.x-k8s.io/cluster-name=" + clusterName, + }) + if err != nil { + return nil, fmt.Errorf("failed to list %s: %w", gvr.Resource, err) + } + + return resources.Items, nil +} + +// ListMachines returns a list of Machine resources for the given cluster. +func (kc *KubeClient) ListMachines(ctx context.Context, clusterName string) ([]unstructured.Unstructured, error) { + return kc.listResource(ctx, schema.GroupVersionResource{ + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Resource: "machines", + }, clusterName) +} + +// ListMachineDeployments returns a list of MachineDeployment resources for the +// given cluster. +func (kc *KubeClient) ListMachineDeployments( + ctx context.Context, clusterName string) ([]unstructured.Unstructured, error) { + return kc.listResource(ctx, schema.GroupVersionResource{ + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Resource: "machinedeployments", + }, clusterName) +} + +func (kc *KubeClient) ListK0sControlPlanes( + ctx context.Context, clusterName string) ([]unstructured.Unstructured, error) { + return kc.listResource(ctx, schema.GroupVersionResource{ + Group: "control-plane.cluster.x-k8s.io", + Version: "v1beta1", + Resource: "k0scontrolplanes", + }, clusterName) +}