From 7059384a74b6c73893061f05b60e2d7f3525d18e Mon Sep 17 00:00:00 2001 From: Markus Walker Date: Wed, 14 Jun 2023 11:28:23 -0700 Subject: [PATCH] Add support for deleting clusters and scaling nodepools Co-authored-by: Markus Walker Co-authored-by: Daniel Newman --- .../framework/extensions/clusters/clusters.go | 34 +++++ .../extensions/machinepools/machinepools.go | 122 +++++++++++++++--- .../framework/extensions/nodes/node_status.go | 25 +++- .../extensions/provisioning/verify.go | 67 ++++++++++ .../extensions/rke1/nodepools/nodepools.go | 98 ++++++++------ .../extensions/scalinginput/config.go | 15 +++ .../extensions/workloads/pods/verify.go | 57 ++++++++ tests/v2/validation/deleting/README.md | 15 +++ .../deleting/delete_cluster_k3s_test.go | 46 +++++++ .../deleting/delete_cluster_rke1_test.go | 46 +++++++ .../deleting/delete_cluster_rke2_test.go | 46 +++++++ .../{scaling => nodescaling}/README.md | 0 .../{scaling => nodescaling}/replace.go | 2 +- .../scale_replace_rke1_test.go | 2 +- .../scale_replace_test.go | 2 +- .../scaling_node_driver_k3s_test.go | 97 ++++++++++++++ .../scaling_node_driver_rke1_test.go | 93 +++++++++++++ .../scaling_node_driver_rke2_test.go | 97 ++++++++++++++ .../nodescaling/scaling_nodepools.go | 47 +++++++ .../airgap/k3s_custom_cluster_test.go | 2 +- .../airgap/rke2_custom_cluster_test.go | 2 +- .../hostname_truncation_test.go | 2 +- .../provisioning/registries/registry_test.go | 8 +- .../v2/validation/provisioning/rke1/README.md | 2 +- .../rke1/provisioning_node_driver_test.go | 4 +- 25 files changed, 863 insertions(+), 68 deletions(-) create mode 100644 tests/framework/extensions/scalinginput/config.go create mode 100644 tests/framework/extensions/workloads/pods/verify.go create mode 100644 tests/v2/validation/deleting/README.md create mode 100644 tests/v2/validation/deleting/delete_cluster_k3s_test.go create mode 100644 tests/v2/validation/deleting/delete_cluster_rke1_test.go create mode 100644 tests/v2/validation/deleting/delete_cluster_rke2_test.go rename tests/v2/validation/{scaling => nodescaling}/README.md (100%) rename tests/v2/validation/{scaling => nodescaling}/replace.go (99%) rename tests/v2/validation/{scaling => nodescaling}/scale_replace_rke1_test.go (98%) rename tests/v2/validation/{scaling => nodescaling}/scale_replace_test.go (98%) create mode 100644 tests/v2/validation/nodescaling/scaling_node_driver_k3s_test.go create mode 100644 tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go create mode 100644 tests/v2/validation/nodescaling/scaling_node_driver_rke2_test.go create mode 100644 tests/v2/validation/nodescaling/scaling_nodepools.go diff --git a/tests/framework/extensions/clusters/clusters.go b/tests/framework/extensions/clusters/clusters.go index 75b67a705b8..d0437e202ab 100644 --- a/tests/framework/extensions/clusters/clusters.go +++ b/tests/framework/extensions/clusters/clusters.go @@ -912,6 +912,40 @@ func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) ( return cluster, nil } +// DeleteKE1Cluster is a "helper" functions that takes a rancher client, and the rke1 cluster ID as parameters to delete +// the cluster. +func DeleteRKE1Cluster(client *rancher.Client, clusterID string) error { + cluster, err := client.Management.Cluster.ByID(clusterID) + if err != nil { + return err + } + + logrus.Infof("Deleting cluster %s...", cluster.Name) + err = client.Management.Cluster.Delete(cluster) + if err != nil { + return err + } + + return nil +} + +// DeleteK3SRKE2Cluster is a "helper" functions that takes a rancher client, and the non-rke1 cluster ID as parameters to delete +// the cluster. +func DeleteK3SRKE2Cluster(client *rancher.Client, clusterID string) error { + cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) + if err != nil { + return err + } + + logrus.Infof("Deleting cluster %s...", cluster.Name) + err = client.Steve.SteveType(ProvisioningSteveResourceType).Delete(cluster) + if err != nil { + return err + } + + return nil +} + // UpdateK3SRKE2Cluster is a "helper" functions that takes a rancher client, old rke2/k3s cluster config, and the new rke2/k3s cluster config as parameters. func UpdateK3SRKE2Cluster(client *rancher.Client, cluster *v1.SteveAPIObject, updatedCluster *apisV1.Cluster) (*v1.SteveAPIObject, error) { updateCluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(cluster.ID) diff --git a/tests/framework/extensions/machinepools/machinepools.go b/tests/framework/extensions/machinepools/machinepools.go index fb734daddb3..a743d3ed8b0 100644 --- a/tests/framework/extensions/machinepools/machinepools.go +++ b/tests/framework/extensions/machinepools/machinepools.go @@ -4,18 +4,94 @@ import ( "fmt" "strconv" "strings" + "time" apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + "github.com/rancher/rancher/tests/framework/clients/rancher" v1 "github.com/rancher/rancher/tests/framework/clients/rancher/v1" + nodestat "github.com/rancher/rancher/tests/framework/extensions/nodes" + "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + kwait "k8s.io/apimachinery/pkg/util/wait" ) +const ( + active = "active" +) + +// MatchNodeRolesToMachinePool matches the role of machinePools to the nodeRoles. +func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEMachinePool) (int, int32) { + count := int32(0) + for index, machinePoolConfig := range machinePools { + if nodeRoles.ControlPlane != machinePoolConfig.ControlPlaneRole { + continue + } + if nodeRoles.Etcd != machinePoolConfig.EtcdRole { + continue + } + if nodeRoles.Worker != machinePoolConfig.WorkerRole { + continue + } + + count += *machinePoolConfig.Quantity + + return index, count + } + + return -1, count +} + +// updateMachinePoolQuantity is a helper method that will update the desired machine pool with the latest quantity. +func updateMachinePoolQuantity(client *rancher.Client, cluster *v1.SteveAPIObject, nodeRoles NodeRoles) (*v1.SteveAPIObject, error) { + updateCluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(cluster.ID) + if err != nil { + return nil, err + } + + updatedCluster := new(apisV1.Cluster) + err = v1.ConvertToK8sType(cluster, &updatedCluster) + if err != nil { + return nil, err + } + + updatedCluster.ObjectMeta.ResourceVersion = updateCluster.ObjectMeta.ResourceVersion + machineConfig, newQuantity := MatchNodeRolesToMachinePool(nodeRoles, updatedCluster.Spec.RKEConfig.MachinePools) + + newQuantity += nodeRoles.Quantity + updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Quantity = &newQuantity + + logrus.Infof("Scaling the machine pool to %v total nodes", newQuantity) + cluster, err = client.Steve.SteveType("provisioning.cattle.io.cluster").Update(cluster, updatedCluster) + if err != nil { + return nil, err + } + + err = kwait.Poll(500*time.Millisecond, 10*time.Minute, func() (done bool, err error) { + clusterResp, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(cluster.ID) + if err != nil { + return false, err + } + + if clusterResp.ObjectMeta.State.Name == active && nodestat.AllManagementNodeReady(client, cluster.ID) == nil { + return true, nil + } + + return false, nil + }) + if err != nil { + return nil, err + } + + return cluster, nil +} + // NewRKEMachinePool is a constructor that sets up a apisV1.RKEMachinePool object to be used to provision a cluster. func NewRKEMachinePool(controlPlaneRole, etcdRole, workerRole bool, poolName string, quantity int32, machineConfig *v1.SteveAPIObject, hostnameLengthLimit int) apisV1.RKEMachinePool { machineConfigRef := &corev1.ObjectReference{ Kind: machineConfig.Kind, Name: machineConfig.Name, } + machinePool := apisV1.RKEMachinePool{ ControlPlaneRole: controlPlaneRole, EtcdRole: etcdRole, @@ -24,9 +100,11 @@ func NewRKEMachinePool(controlPlaneRole, etcdRole, workerRole bool, poolName str Name: poolName, Quantity: &quantity, } + if hostnameLengthLimit > 0 { machinePool.HostnameLengthLimit = hostnameLengthLimit } + return machinePool } @@ -59,7 +137,6 @@ func (n NodeRoles) String() string { if n.Worker { result = append(result, "worker") } - return fmt.Sprintf("%d %s", n.Quantity, strings.Join(result, "+")) } @@ -67,29 +144,31 @@ func (n NodeRoles) String() string { // `machineConfig` is the *unstructured.Unstructured created by CreateMachineConfig // `nodeRoles` would be in this format // -// []NodeRoles{ -// { -// ControlPlane: true, -// Etcd: false, -// Worker: false, -// Quantity: 1, -// }, -// { -// ControlPlane: false, -// Etcd: true, -// Worker: false, -// Quantity: 1, -// }, -// } +// []NodeRoles{ +// { +// ControlPlane: true, +// Etcd: false, +// Worker: false, +// Quantity: 1, +// }, +// { +// ControlPlane: false, +// Etcd: true, +// Worker: false, +// Quantity: 1, +// }, +// } func CreateAllMachinePools(nodeRoles []NodeRoles, machineConfig *v1.SteveAPIObject, hostnameLengthLimits []HostnameTruncation) []apisV1.RKEMachinePool { machinePools := make([]apisV1.RKEMachinePool, 0, len(nodeRoles)) hostnameLengthLimit := 0 + for index, roles := range nodeRoles { poolName := "pool" + strconv.Itoa(index) if hostnameLengthLimits != nil && len(hostnameLengthLimits) >= index { hostnameLengthLimit = hostnameLengthLimits[index].PoolNameLengthLimit poolName = hostnameLengthLimits[index].Name } + if !roles.Windows { machinePool := NewRKEMachinePool(roles.ControlPlane, roles.Etcd, roles.Worker, poolName, roles.Quantity, machineConfig, hostnameLengthLimit) machinePools = append(machinePools, machinePool) @@ -98,6 +177,17 @@ func CreateAllMachinePools(nodeRoles []NodeRoles, machineConfig *v1.SteveAPIObje machinePools = append(machinePools, machinePool) } } - return machinePools } + +// ScaleMachinePoolNodes is a helper method that will scale the machine pool to the desired quantity. +func ScaleMachinePoolNodes(client *rancher.Client, cluster *v1.SteveAPIObject, nodeRoles NodeRoles) (*v1.SteveAPIObject, error) { + scaledClusterResp, err := updateMachinePoolQuantity(client, cluster, nodeRoles) + if err != nil { + return nil, err + } + + logrus.Infof("Machine pool has been scaled!") + + return scaledClusterResp, nil +} diff --git a/tests/framework/extensions/nodes/node_status.go b/tests/framework/extensions/nodes/node_status.go index a4ed8208222..478be1c0b82 100644 --- a/tests/framework/extensions/nodes/node_status.go +++ b/tests/framework/extensions/nodes/node_status.go @@ -32,7 +32,7 @@ func AllManagementNodeReady(client *rancher.Client, ClusterID string) error { }, }) if err != nil { - return false, err + return false, nil } for _, node := range nodes.Data { @@ -88,6 +88,29 @@ func AllMachineReady(client *rancher.Client, clusterID string) error { return err } +// AllNodeDeleted is a helper method that will loop and check if the node is deleted in the cluster. +func AllNodeDeleted(client *rancher.Client, ClusterID string) error { + err := wait.Poll(500*time.Millisecond, 5*time.Minute, func() (bool, error) { + nodes, err := client.Management.Node.ListAll(&types.ListOpts{ + Filters: map[string]interface{}{ + "clusterId": ClusterID, + }, + }) + if err != nil { + return false, err + } + + if len(nodes.Data) == 0 { + logrus.Infof("All nodes in the cluster are deleted!") + return true, nil + } + + return false, nil + }) + + return err +} + // IsNodeReplaced is a helper method that will loop and check if the node matching its type is replaced in a cluster. // It will return an error if the node is not replaced after set amount of time. func IsNodeReplaced(client *rancher.Client, oldMachineID string, clusterID string, numOfNodesBeforeDeletion int, isEtcd bool, isControlPlane bool, isWorker bool) (bool, error) { diff --git a/tests/framework/extensions/provisioning/verify.go b/tests/framework/extensions/provisioning/verify.go index 4c1e46356c3..4526a40167d 100644 --- a/tests/framework/extensions/provisioning/verify.go +++ b/tests/framework/extensions/provisioning/verify.go @@ -9,6 +9,7 @@ import ( "testing" "time" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" provv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" "github.com/rancher/rancher/tests/framework/clients/rancher" @@ -27,6 +28,7 @@ import ( "github.com/rancher/rancher/tests/framework/pkg/wait" "github.com/rancher/rancher/tests/v2prov/defaults" wranglername "github.com/rancher/wrangler/pkg/name" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -169,6 +171,71 @@ func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *cluster } } +// VerifyDeleteRKE1Cluster validates that a rke1 cluster and its resources are deleted. +func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID string) { + cluster, err := client.Management.Cluster.ByID(clusterID) + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{ + FieldSelector: "metadata.name=" + clusterID, + TimeoutSeconds: &defaults.WatchTimeoutSeconds, + }) + require.NoError(t, err) + + err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { + if event.Type == watch.Error { + return false, fmt.Errorf("error: unable to delete cluster %s", cluster.Name) + } else if event.Type == watch.Deleted { + logrus.Infof("Cluster %s deleted!", cluster.Name) + return true, nil + } + return false, nil + }) + require.NoError(t, err) + + err = nodestat.AllNodeDeleted(client, clusterID) + require.NoError(t, err) +} + +// VerifyDeleteRKE2K3SCluster validates that a non-rke1 cluster and its resources are deleted. +func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID string) { + cluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(clusterID) + require.NoError(t, err) + + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + require.NoError(t, err) + + provKubeClient, err := adminClient.GetKubeAPIProvisioningClient() + require.NoError(t, err) + + watchInterface, err := provKubeClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.Name, + TimeoutSeconds: &defaults.WatchTimeoutSeconds, + }) + require.NoError(t, err) + + err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { + cluster := event.Object.(*apisV1.Cluster) + if event.Type == watch.Error { + return false, fmt.Errorf("error: unable to delete cluster %s", cluster.ObjectMeta.Name) + } else if event.Type == watch.Deleted { + logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name) + return true, nil + } else if cluster == nil { + logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name) + return true, nil + } + return false, nil + }) + require.NoError(t, err) + + err = nodestat.AllNodeDeleted(client, clusterID) + require.NoError(t, err) +} + // CertRotationCompleteCheckFunc returns a watch check function that checks if the certificate rotation is complete func CertRotationCompleteCheckFunc(generation int64) wait.WatchCheckFunc { return func(event watch.Event) (bool, error) { diff --git a/tests/framework/extensions/rke1/nodepools/nodepools.go b/tests/framework/extensions/rke1/nodepools/nodepools.go index 0e70a6c2140..404b3a3c042 100644 --- a/tests/framework/extensions/rke1/nodepools/nodepools.go +++ b/tests/framework/extensions/rke1/nodepools/nodepools.go @@ -2,11 +2,18 @@ package rke1 import ( "strconv" + "time" + "github.com/rancher/norman/types" "github.com/rancher/rancher/tests/framework/clients/rancher" management "github.com/rancher/rancher/tests/framework/clients/rancher/generated/management/v3" nodestat "github.com/rancher/rancher/tests/framework/extensions/nodes" "github.com/sirupsen/logrus" + kwait "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + active = "active" ) type NodeRoles struct { @@ -57,61 +64,78 @@ func NodePoolSetup(client *rancher.Client, nodeRoles []NodeRoles, ClusterID, Nod return &nodePoolConfig, nil } -// ScaleWorkerNodePool is a helper method that will add a worker node pool to the existing RKE1 cluster. Once done, it will scale -// the worker node pool to add a worker node, scale it back down to remove the worker node, and then delete the worker node pool. -func ScaleWorkerNodePool(client *rancher.Client, nodeRoles []NodeRoles, ClusterID, NodeTemplateID string) error { - nodePoolConfig := management.NodePool{ - ClusterID: ClusterID, - ControlPlane: false, - DeleteNotReadyAfterSecs: 0, - Etcd: false, - HostnamePrefix: "auto-rke1-scale-" + ClusterID, - NodeTemplateID: NodeTemplateID, - Quantity: 1, - Worker: true, - } - - logrus.Infof("Creating new worker node pool...") - nodePool, err := client.Management.NodePool.Create(&nodePoolConfig) +// MatchRKE1NodeRoles is a helper method that will return the desired node in the cluster, based on the node role. +func MatchRKE1NodeRoles(client *rancher.Client, cluster *management.Cluster, nodeRoles NodeRoles) (*management.Node, error) { + nodes, err := client.Management.Node.ListAll(&types.ListOpts{ + Filters: map[string]interface{}{ + "clusterId": cluster.ID, + }, + }) if err != nil { - return err + return nil, err } - if nodestat.AllManagementNodeReady(client, ClusterID) != nil { - return err + for _, node := range nodes.Data { + if nodeRoles.ControlPlane != node.ControlPlane { + continue + } + if nodeRoles.Etcd != node.Etcd { + continue + } + if nodeRoles.Worker != node.Worker { + continue + } + + return &node, nil } - logrus.Infof("New node pool is ready!") - nodePoolConfig.Quantity = 2 + return nil, nil +} - logrus.Infof("Scaling node pool to 2 worker nodes...") - updatedPool, err := client.Management.NodePool.Update(nodePool, &nodePoolConfig) +// updateNodePoolQuantity is a helper method that will update the node pool with the desired quantity. +func updateNodePoolQuantity(client *rancher.Client, cluster *management.Cluster, node *management.Node, nodeRoles NodeRoles) (*management.NodePool, error) { + updatedNodePool, err := client.Management.NodePool.ByID(node.NodePoolID) if err != nil { - return err + return nil, err } - if nodestat.AllManagementNodeReady(client, ClusterID) != nil { - return err + updatedNodePool.Quantity += nodeRoles.Quantity + + logrus.Infof("Scaling the machine pool to %v total nodes", updatedNodePool.Quantity) + _, err = client.Management.NodePool.Update(updatedNodePool, &updatedNodePool) + if err != nil { + return nil, err } - logrus.Infof("Node pool is scaled to 2 worker nodes!") - nodePoolConfig.Quantity = 1 + err = kwait.Poll(500*time.Millisecond, 10*time.Minute, func() (done bool, err error) { + clusterResp, err := client.Management.Cluster.ByID(cluster.ID) + if err != nil { + return false, err + } - logrus.Infof("Scaling node pool back to 1 worker node...") - _, err = client.Management.NodePool.Update(updatedPool, &nodePoolConfig) + if clusterResp.State == active && nodestat.AllManagementNodeReady(client, clusterResp.ID) == nil { + logrus.Infof("Node pool is scaled!") + return true, nil + } else { + return false, nil + } + }) if err != nil { - return err + return nil, err } - logrus.Infof("Node pool is scaled back to 1 worker node!") + return updatedNodePool, nil +} - logrus.Infof("Deleting node pool...") - err = client.Management.NodePool.Delete(nodePool) +// ScaleNodePoolNodes is a helper method that will add a new node pool to the existing RKE1 cluster, based on the nodeRoles +// configuration. Once done, it will scale the node pool, scale it back down and then delete the node pool. +func ScaleNodePoolNodes(client *rancher.Client, cluster *management.Cluster, node *management.Node, nodeRoles NodeRoles) (*management.NodePool, error) { + updatedNodePool, err := updateNodePoolQuantity(client, cluster, node, nodeRoles) if err != nil { - return err + return nil, err } - logrus.Infof("Node pool deleted!") + logrus.Infof("Node pool has been scaled!") - return nil + return updatedNodePool, nil } diff --git a/tests/framework/extensions/scalinginput/config.go b/tests/framework/extensions/scalinginput/config.go new file mode 100644 index 00000000000..cf0b561b255 --- /dev/null +++ b/tests/framework/extensions/scalinginput/config.go @@ -0,0 +1,15 @@ +package scalinginput + +import ( + "github.com/rancher/rancher/tests/framework/extensions/machinepools" + nodepools "github.com/rancher/rancher/tests/framework/extensions/rke1/nodepools" +) + +const ( + ConfigurationFileKey = "scalingInput" +) + +type Config struct { + NodesAndRoles *machinepools.NodeRoles `json:"nodesAndRoles" yaml:"nodesAndRoles"` + NodesAndRolesRKE1 *nodepools.NodeRoles `json:"nodesAndRolesRKE1" yaml:"nodesAndRolesRKE1"` +} diff --git a/tests/framework/extensions/workloads/pods/verify.go b/tests/framework/extensions/workloads/pods/verify.go new file mode 100644 index 00000000000..ff71f43d5d3 --- /dev/null +++ b/tests/framework/extensions/workloads/pods/verify.go @@ -0,0 +1,57 @@ +package pods + +import ( + "testing" + "time" + + provv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + projectv3 "github.com/rancher/rancher/pkg/client/generated/project/v3" + "github.com/rancher/rancher/tests/framework/clients/rancher" + steveV1 "github.com/rancher/rancher/tests/framework/clients/rancher/v1" + v1 "github.com/rancher/rancher/tests/framework/clients/rancher/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + DaemonsetSteveType = "apps.daemonset" +) + +// VerifyReadyDaemonsetPods tries to poll the Steve API to verify the expected number of daemonset pods are in the Ready +// state +func VerifyReadyDaemonsetPods(t *testing.T, client *rancher.Client, cluster *v1.SteveAPIObject) { + status := &provv1.ClusterStatus{} + err := steveV1.ConvertToK8sType(cluster.Status, status) + require.NoError(t, err) + + daemonsetequals := false + + err = wait.Poll(500*time.Millisecond, 5*time.Minute, func() (dameonsetequals bool, err error) { + daemonsets, err := client.Steve.SteveType(DaemonsetSteveType).ByID(status.ClusterName) + require.NoError(t, err) + + daemonsetsStatusType := &projectv3.DaemonSetStatus{} + err = v1.ConvertToK8sType(daemonsets.Status, daemonsetsStatusType) + require.NoError(t, err) + + if daemonsetsStatusType.DesiredNumberScheduled == daemonsetsStatusType.NumberAvailable { + return true, nil + } + return false, nil + }) + require.NoError(t, err) + + daemonsets, err := client.Steve.SteveType(DaemonsetSteveType).ByID(status.ClusterName) + require.NoError(t, err) + + daemonsetsStatusType := &projectv3.DaemonSetStatus{} + err = v1.ConvertToK8sType(daemonsets.Status, daemonsetsStatusType) + require.NoError(t, err) + + if daemonsetsStatusType.DesiredNumberScheduled == daemonsetsStatusType.NumberAvailable { + daemonsetequals = true + } + + assert.Truef(t, daemonsetequals, "Ready Daemonset Pods didn't match expected") +} diff --git a/tests/v2/validation/deleting/README.md b/tests/v2/validation/deleting/README.md new file mode 100644 index 00000000000..cc2ccedb661 --- /dev/null +++ b/tests/v2/validation/deleting/README.md @@ -0,0 +1,15 @@ +# Deleting + +## Getting Started +You can find specific tests by checking the test file you plan to run. An example is `-run ^ TestClusterDeleteTestSuite/TestDeletingRKE2K3SCluster$` +In your config file, set the following: +```json +"rancher": { + "host": "rancher_server_address", + "adminToken": "rancher_admin_token", + "userToken": "your_rancher_user_token", + "clusterName": "cluster_to_run_tests_on", + "insecure": true/optional, + "cleanup": false/optional, +} +``` diff --git a/tests/v2/validation/deleting/delete_cluster_k3s_test.go b/tests/v2/validation/deleting/delete_cluster_k3s_test.go new file mode 100644 index 00000000000..4a0ae734a3c --- /dev/null +++ b/tests/v2/validation/deleting/delete_cluster_k3s_test.go @@ -0,0 +1,46 @@ +package deleting + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/clusters" + "github.com/rancher/rancher/tests/framework/extensions/provisioning" + "github.com/rancher/rancher/tests/framework/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type K3SClusterDeleteTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session +} + +func (c *K3SClusterDeleteTestSuite) TearDownSuite() { + c.session.Cleanup() +} + +func (c *K3SClusterDeleteTestSuite) SetupSuite() { + testSession := session.NewSession() + c.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(c.T(), err) + + c.client = client +} + +func (c *K3SClusterDeleteTestSuite) TestDeletingK3SCluster() { + clusterID, err := clusters.GetV1ProvisioningClusterByName(c.client, c.client.RancherConfig.ClusterName) + require.NoError(c.T(), err) + + clusters.DeleteK3SRKE2Cluster(c.client, clusterID) + provisioning.VerifyDeleteRKE2K3SCluster(c.T(), c.client, clusterID) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestK3SClusterDeleteTestSuite(t *testing.T) { + suite.Run(t, new(K3SClusterDeleteTestSuite)) +} diff --git a/tests/v2/validation/deleting/delete_cluster_rke1_test.go b/tests/v2/validation/deleting/delete_cluster_rke1_test.go new file mode 100644 index 00000000000..3ebcc6974d3 --- /dev/null +++ b/tests/v2/validation/deleting/delete_cluster_rke1_test.go @@ -0,0 +1,46 @@ +package deleting + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/clusters" + "github.com/rancher/rancher/tests/framework/extensions/provisioning" + "github.com/rancher/rancher/tests/framework/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type RKE1ClusterDeleteTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session +} + +func (c *RKE1ClusterDeleteTestSuite) TearDownSuite() { + c.session.Cleanup() +} + +func (c *RKE1ClusterDeleteTestSuite) SetupSuite() { + testSession := session.NewSession() + c.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(c.T(), err) + + c.client = client +} + +func (c *RKE1ClusterDeleteTestSuite) TestDeletingRKE1Cluster() { + clusterID, err := clusters.GetClusterIDByName(c.client, c.client.RancherConfig.ClusterName) + require.NoError(c.T(), err) + + clusters.DeleteRKE1Cluster(c.client, clusterID) + provisioning.VerifyDeleteRKE1Cluster(c.T(), c.client, clusterID) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestRKE1ClusterDeleteTestSuite(t *testing.T) { + suite.Run(t, new(RKE1ClusterDeleteTestSuite)) +} diff --git a/tests/v2/validation/deleting/delete_cluster_rke2_test.go b/tests/v2/validation/deleting/delete_cluster_rke2_test.go new file mode 100644 index 00000000000..ede918e7211 --- /dev/null +++ b/tests/v2/validation/deleting/delete_cluster_rke2_test.go @@ -0,0 +1,46 @@ +package deleting + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/clusters" + "github.com/rancher/rancher/tests/framework/extensions/provisioning" + "github.com/rancher/rancher/tests/framework/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type RKE2ClusterDeleteTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session +} + +func (c *RKE2ClusterDeleteTestSuite) TearDownSuite() { + c.session.Cleanup() +} + +func (c *RKE2ClusterDeleteTestSuite) SetupSuite() { + testSession := session.NewSession() + c.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(c.T(), err) + + c.client = client +} + +func (c *RKE2ClusterDeleteTestSuite) TestDeletingRKE2Cluster() { + clusterID, err := clusters.GetV1ProvisioningClusterByName(c.client, c.client.RancherConfig.ClusterName) + require.NoError(c.T(), err) + + clusters.DeleteK3SRKE2Cluster(c.client, clusterID) + provisioning.VerifyDeleteRKE2K3SCluster(c.T(), c.client, clusterID) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestRKE2ClusterDeleteTestSuite(t *testing.T) { + suite.Run(t, new(RKE2ClusterDeleteTestSuite)) +} diff --git a/tests/v2/validation/scaling/README.md b/tests/v2/validation/nodescaling/README.md similarity index 100% rename from tests/v2/validation/scaling/README.md rename to tests/v2/validation/nodescaling/README.md diff --git a/tests/v2/validation/scaling/replace.go b/tests/v2/validation/nodescaling/replace.go similarity index 99% rename from tests/v2/validation/scaling/replace.go rename to tests/v2/validation/nodescaling/replace.go index 9a5e3069297..afae325b4ed 100644 --- a/tests/v2/validation/scaling/replace.go +++ b/tests/v2/validation/nodescaling/replace.go @@ -1,4 +1,4 @@ -package scaling +package nodescaling import ( "testing" diff --git a/tests/v2/validation/scaling/scale_replace_rke1_test.go b/tests/v2/validation/nodescaling/scale_replace_rke1_test.go similarity index 98% rename from tests/v2/validation/scaling/scale_replace_rke1_test.go rename to tests/v2/validation/nodescaling/scale_replace_rke1_test.go index ddd8cac29b0..28b8208deed 100644 --- a/tests/v2/validation/scaling/scale_replace_rke1_test.go +++ b/tests/v2/validation/nodescaling/scale_replace_rke1_test.go @@ -1,4 +1,4 @@ -package scaling +package nodescaling import ( "testing" diff --git a/tests/v2/validation/scaling/scale_replace_test.go b/tests/v2/validation/nodescaling/scale_replace_test.go similarity index 98% rename from tests/v2/validation/scaling/scale_replace_test.go rename to tests/v2/validation/nodescaling/scale_replace_test.go index bbfb976afae..24b3ce63691 100644 --- a/tests/v2/validation/scaling/scale_replace_test.go +++ b/tests/v2/validation/nodescaling/scale_replace_test.go @@ -1,4 +1,4 @@ -package scaling +package nodescaling import ( "testing" diff --git a/tests/v2/validation/nodescaling/scaling_node_driver_k3s_test.go b/tests/v2/validation/nodescaling/scaling_node_driver_k3s_test.go new file mode 100644 index 00000000000..ec845b1cb5c --- /dev/null +++ b/tests/v2/validation/nodescaling/scaling_node_driver_k3s_test.go @@ -0,0 +1,97 @@ +package nodescaling + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/clusters" + "github.com/rancher/rancher/tests/framework/extensions/machinepools" + "github.com/rancher/rancher/tests/framework/extensions/scalinginput" + "github.com/rancher/rancher/tests/framework/pkg/config" + "github.com/rancher/rancher/tests/framework/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type K3SNodeScalingTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + scalingConfig *scalinginput.Config +} + +func (s *K3SNodeScalingTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *K3SNodeScalingTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + s.scalingConfig = new(scalinginput.Config) + config.LoadConfig(scalinginput.ConfigurationFileKey, s.scalingConfig) + + client, err := rancher.NewClient("", testSession) + require.NoError(s.T(), err) + + s.client = client +} + +func (s *K3SNodeScalingTestSuite) TestScalingK3SNodePools() { + nodeRolesEtcd := machinepools.NodeRoles{ + Etcd: true, + Quantity: 1, + } + + nodeRolesControlPlane := machinepools.NodeRoles{ + ControlPlane: true, + Quantity: 1, + } + + nodeRolesWorker := machinepools.NodeRoles{ + Worker: true, + Quantity: 1, + } + + nodeRolesTwoWorkers := machinepools.NodeRoles{ + Worker: true, + Quantity: 2, + } + + tests := []struct { + name string + nodeRoles machinepools.NodeRoles + client *rancher.Client + }{ + {"Scaling control plane machine pool by 1", nodeRolesEtcd, s.client}, + {"Scaling etcd node machine pool by 1", nodeRolesControlPlane, s.client}, + {"Scaling worker node machine pool by 1", nodeRolesWorker, s.client}, + {"Scaling worker node machine pool by 2", nodeRolesTwoWorkers, s.client}, + } + + for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + s.Run(tt.name, func() { + ScalingRKE2K3SNodePools(s.T(), s.client, clusterID, tt.nodeRoles) + }) + } +} + +func (s *K3SNodeScalingTestSuite) TestScalingK3SNodePoolsDynamicInput() { + if s.scalingConfig.NodesAndRoles == nil { + s.T().Skip() + } + + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + ScalingRKE2K3SNodePools(s.T(), s.client, clusterID, *s.scalingConfig.NodesAndRoles) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestK3SNodeScalingTestSuite(t *testing.T) { + suite.Run(t, new(K3SNodeScalingTestSuite)) +} diff --git a/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go b/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go new file mode 100644 index 00000000000..dfdfaa8b1cb --- /dev/null +++ b/tests/v2/validation/nodescaling/scaling_node_driver_rke1_test.go @@ -0,0 +1,93 @@ +package nodescaling + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/clusters" + nodepools "github.com/rancher/rancher/tests/framework/extensions/rke1/nodepools" + "github.com/rancher/rancher/tests/framework/extensions/scalinginput" + "github.com/rancher/rancher/tests/framework/pkg/config" + "github.com/rancher/rancher/tests/framework/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type RKE1NodeScalingTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + scalingConfig *scalinginput.Config +} + +func (s *RKE1NodeScalingTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *RKE1NodeScalingTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + s.scalingConfig = new(scalinginput.Config) + config.LoadConfig(scalinginput.ConfigurationFileKey, s.scalingConfig) + + client, err := rancher.NewClient("", testSession) + require.NoError(s.T(), err) + + s.client = client +} + +func (s *RKE1NodeScalingTestSuite) TestScalingRKE1NodePools() { + nodeRolesEtcd := nodepools.NodeRoles{ + Etcd: true, + Quantity: 1, + } + + nodeRolesControlPlane := nodepools.NodeRoles{ + ControlPlane: true, + Quantity: 1, + } + + nodeRolesWorker := nodepools.NodeRoles{ + Worker: true, + Quantity: 1, + } + + nodeRolesTwoWorkers := nodepools.NodeRoles{ + Worker: true, + Quantity: 2, + } + + tests := []struct { + name string + nodeRoles nodepools.NodeRoles + client *rancher.Client + }{ + {"Scaling control plane machine pool by 1", nodeRolesEtcd, s.client}, + {"Scaling etcd node machine pool by 1", nodeRolesControlPlane, s.client}, + {"Scaling worker node machine pool by 1", nodeRolesWorker, s.client}, + {"Scaling worker node machine pool by 2", nodeRolesTwoWorkers, s.client}, + } + + for _, tt := range tests { + clusterID, err := clusters.GetClusterIDByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + s.Run(tt.name, func() { + ScalingRKE1NodePools(s.T(), s.client, clusterID, tt.nodeRoles) + }) + } +} + +func (s *RKE1NodeScalingTestSuite) TestScalingRKE1NodePoolsDynamicInput() { + clusterID, err := clusters.GetClusterIDByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + ScalingRKE1NodePools(s.T(), s.client, clusterID, *s.scalingConfig.NodesAndRolesRKE1) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestRKE1NodeScalingTestSuite(t *testing.T) { + suite.Run(t, new(RKE1NodeScalingTestSuite)) +} diff --git a/tests/v2/validation/nodescaling/scaling_node_driver_rke2_test.go b/tests/v2/validation/nodescaling/scaling_node_driver_rke2_test.go new file mode 100644 index 00000000000..89ad8b3e1ae --- /dev/null +++ b/tests/v2/validation/nodescaling/scaling_node_driver_rke2_test.go @@ -0,0 +1,97 @@ +package nodescaling + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/clusters" + "github.com/rancher/rancher/tests/framework/extensions/machinepools" + "github.com/rancher/rancher/tests/framework/extensions/scalinginput" + "github.com/rancher/rancher/tests/framework/pkg/config" + "github.com/rancher/rancher/tests/framework/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type RKE2NodeScalingTestSuite struct { + suite.Suite + client *rancher.Client + session *session.Session + scalingConfig *scalinginput.Config +} + +func (s *RKE2NodeScalingTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *RKE2NodeScalingTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + s.scalingConfig = new(scalinginput.Config) + config.LoadConfig(scalinginput.ConfigurationFileKey, s.scalingConfig) + + client, err := rancher.NewClient("", testSession) + require.NoError(s.T(), err) + + s.client = client +} + +func (s *RKE2NodeScalingTestSuite) TestScalingRKE2NodePools() { + nodeRolesEtcd := machinepools.NodeRoles{ + Etcd: true, + Quantity: 1, + } + + nodeRolesControlPlane := machinepools.NodeRoles{ + ControlPlane: true, + Quantity: 1, + } + + nodeRolesWorker := machinepools.NodeRoles{ + Worker: true, + Quantity: 1, + } + + nodeRolesTwoWorkers := machinepools.NodeRoles{ + Worker: true, + Quantity: 2, + } + + tests := []struct { + name string + nodeRoles machinepools.NodeRoles + client *rancher.Client + }{ + {"Scaling control plane machine pool by 1", nodeRolesEtcd, s.client}, + {"Scaling etcd node machine pool by 1", nodeRolesControlPlane, s.client}, + {"Scaling worker node machine pool by 1", nodeRolesWorker, s.client}, + {"Scaling worker node machine pool by 2", nodeRolesTwoWorkers, s.client}, + } + + for _, tt := range tests { + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + s.Run(tt.name, func() { + ScalingRKE2K3SNodePools(s.T(), s.client, clusterID, tt.nodeRoles) + }) + } +} + +func (s *RKE2NodeScalingTestSuite) TestScalingRKE2NodePoolsDynamicInput() { + if s.scalingConfig.NodesAndRoles == nil { + s.T().Skip() + } + + clusterID, err := clusters.GetV1ProvisioningClusterByName(s.client, s.client.RancherConfig.ClusterName) + require.NoError(s.T(), err) + + ScalingRKE2K3SNodePools(s.T(), s.client, clusterID, *s.scalingConfig.NodesAndRoles) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestRKE2NodeScalingTestSuite(t *testing.T) { + suite.Run(t, new(RKE2NodeScalingTestSuite)) +} diff --git a/tests/v2/validation/nodescaling/scaling_nodepools.go b/tests/v2/validation/nodescaling/scaling_nodepools.go new file mode 100644 index 00000000000..06cdad9d026 --- /dev/null +++ b/tests/v2/validation/nodescaling/scaling_nodepools.go @@ -0,0 +1,47 @@ +package nodescaling + +import ( + "testing" + + "github.com/rancher/rancher/tests/framework/clients/rancher" + "github.com/rancher/rancher/tests/framework/extensions/machinepools" + rke1 "github.com/rancher/rancher/tests/framework/extensions/rke1/nodepools" + "github.com/rancher/rancher/tests/framework/extensions/workloads/pods" + "github.com/stretchr/testify/require" +) + +const ( + ProvisioningSteveResourceType = "provisioning.cattle.io.cluster" + defaultNamespace = "fleet-default" +) + +func ScalingRKE2K3SNodePools(t *testing.T, client *rancher.Client, clusterID string, nodeRoles machinepools.NodeRoles) { + cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) + require.NoError(t, err) + + clusterResp, err := machinepools.ScaleMachinePoolNodes(client, cluster, nodeRoles) + require.NoError(t, err) + + pods.VerifyReadyDaemonsetPods(t, client, cluster) + + nodeRoles.Quantity = -nodeRoles.Quantity + scaledClusterResp, err := machinepools.ScaleMachinePoolNodes(client, clusterResp, nodeRoles) + require.NoError(t, err) + + pods.VerifyReadyDaemonsetPods(t, client, scaledClusterResp) +} + +func ScalingRKE1NodePools(t *testing.T, client *rancher.Client, clusterID string, nodeRoles rke1.NodeRoles) { + cluster, err := client.Management.Cluster.ByID(clusterID) + require.NoError(t, err) + + node, err := rke1.MatchRKE1NodeRoles(client, cluster, nodeRoles) + require.NoError(t, err) + + _, err = rke1.ScaleNodePoolNodes(client, cluster, node, nodeRoles) + require.NoError(t, err) + + nodeRoles.Quantity = -nodeRoles.Quantity + _, err = rke1.ScaleNodePoolNodes(client, cluster, node, nodeRoles) + require.NoError(t, err) +} diff --git a/tests/v2/validation/provisioning/airgap/k3s_custom_cluster_test.go b/tests/v2/validation/provisioning/airgap/k3s_custom_cluster_test.go index 1f20a85e181..f13a138d3c5 100644 --- a/tests/v2/validation/provisioning/airgap/k3s_custom_cluster_test.go +++ b/tests/v2/validation/provisioning/airgap/k3s_custom_cluster_test.go @@ -111,7 +111,7 @@ func (a *AirGapK3SCustomClusterTestSuite) TestProvisioningUpgradeK3SCustomCluste clusterObject, err := provisioning.CreateProvisioningAirgapCustomCluster(a.client, testConfig, a.corralPackage) require.NoError(a.T(), err) - provisioning.VerifyCluster(a.T(), a.client, clusterObject) + provisioning.VerifyCluster(a.T(), a.client, testConfig, clusterObject) upgradedCluster, err := provisioning.UpgradeClusterK8sVersion(a.client, &clusterObject.Name, &k3sVersions[numOfK3SVersions-1]) require.NoError(a.T(), err) diff --git a/tests/v2/validation/provisioning/airgap/rke2_custom_cluster_test.go b/tests/v2/validation/provisioning/airgap/rke2_custom_cluster_test.go index f3996ab7f50..f7432bfa970 100644 --- a/tests/v2/validation/provisioning/airgap/rke2_custom_cluster_test.go +++ b/tests/v2/validation/provisioning/airgap/rke2_custom_cluster_test.go @@ -113,7 +113,7 @@ func (a *AirGapRKE2CustomClusterTestSuite) TestProvisioningUpgradeRKE2CustomClus clusterObject, err := provisioning.CreateProvisioningAirgapCustomCluster(a.client, testConfig, a.corralPackage) require.NoError(a.T(), err) - provisioning.VerifyCluster(a.T(), a.client, clusterObject) + provisioning.VerifyCluster(a.T(), a.client, testConfig, clusterObject) upgradedCluster, err := provisioning.UpgradeClusterK8sVersion(a.client, &clusterObject.Name, &rke2Versions[numOfRKE2Versions-1]) require.NoError(a.T(), err) diff --git a/tests/v2/validation/provisioning/hostnametruncation/hostname_truncation_test.go b/tests/v2/validation/provisioning/hostnametruncation/hostname_truncation_test.go index f31635b42e2..3920fdcc58c 100644 --- a/tests/v2/validation/provisioning/hostnametruncation/hostname_truncation_test.go +++ b/tests/v2/validation/provisioning/hostnametruncation/hostname_truncation_test.go @@ -101,7 +101,7 @@ func (r *HostnameTruncationTestSuite) TestProvisioningRKE2ClusterTruncation() { clusterObject, err := provisioning.CreateProvisioningCluster(r.client, *rke2Provider, testConfig, hostnamePools) require.NoError(r.T(), err) - provisioning.VerifyCluster(r.T(), r.client, clusterObject) + provisioning.VerifyCluster(r.T(), r.client, testConfig, clusterObject) provisioning.VerifyHostnameLength(r.T(), r.client, clusterObject) }) } diff --git a/tests/v2/validation/provisioning/registries/registry_test.go b/tests/v2/validation/provisioning/registries/registry_test.go index e37d516e6aa..5ffec65ce8a 100644 --- a/tests/v2/validation/provisioning/registries/registry_test.go +++ b/tests/v2/validation/provisioning/registries/registry_test.go @@ -284,7 +284,7 @@ func (rt *RegistryTestSuite) TestRegistriesK3S() { clusterObject, err := provisioning.CreateProvisioningCluster(subClient, *k3sProvider, testConfig, nil) require.NoError(rt.T(), err) - provisioning.VerifyCluster(rt.T(), subClient, clusterObject) + provisioning.VerifyCluster(rt.T(), subClient, testConfig, clusterObject) }) } @@ -299,7 +299,7 @@ func (rt *RegistryTestSuite) TestRegistriesK3S() { clusterObject, err := provisioning.CreateProvisioningCluster(subClient, *k3sProvider, testConfig, nil) require.NoError(rt.T(), err) - provisioning.VerifyCluster(rt.T(), subClient, clusterObject) + provisioning.VerifyCluster(rt.T(), subClient, testConfig, clusterObject) } podResults, podErrors := pods.StatusPods(rt.client, rt.clusterLocalID) @@ -334,7 +334,7 @@ func (rt *RegistryTestSuite) TestRegistriesRKE2() { clusterObject, err := provisioning.CreateProvisioningCluster(subClient, *rke2Provider, testConfig, nil) require.NoError(rt.T(), err) - provisioning.VerifyCluster(rt.T(), subClient, clusterObject) + provisioning.VerifyCluster(rt.T(), subClient, testConfig, clusterObject) }) } if rt.rancherUsesRegistry { @@ -348,7 +348,7 @@ func (rt *RegistryTestSuite) TestRegistriesRKE2() { clusterObject, err := provisioning.CreateProvisioningCluster(subClient, *rke2Provider, testConfig, nil) require.NoError(rt.T(), err) - provisioning.VerifyCluster(rt.T(), subClient, clusterObject) + provisioning.VerifyCluster(rt.T(), subClient, testConfig, clusterObject) } podResults, podErrors := pods.StatusPods(rt.client, rt.clusterLocalID) diff --git a/tests/v2/validation/provisioning/rke1/README.md b/tests/v2/validation/provisioning/rke1/README.md index b46a0862ded..922aea60227 100644 --- a/tests/v2/validation/provisioning/rke1/README.md +++ b/tests/v2/validation/provisioning/rke1/README.md @@ -294,7 +294,7 @@ Please read up on general k8s to get an idea of correct formatting for: ```json "advancedOptions": { - "flusterAgentCustomization": { // change this to fleetAgentCustomization for fleet agent + "clusterAgentCustomization": { // change this to fleetAgentCustomization for fleet agent "appendTolerations": [ { "key": "Testkey", diff --git a/tests/v2/validation/provisioning/rke1/provisioning_node_driver_test.go b/tests/v2/validation/provisioning/rke1/provisioning_node_driver_test.go index ae16e53b7f2..61a14788e41 100644 --- a/tests/v2/validation/provisioning/rke1/provisioning_node_driver_test.go +++ b/tests/v2/validation/provisioning/rke1/provisioning_node_driver_test.go @@ -43,8 +43,7 @@ func (r *RKE1NodeDriverProvisioningTestSuite) SetupSuite() { r.client = client - r.clustersConfig.RKE1KubernetesVersions, err = kubernetesversions.Default( - r.client, clusters.RKE1ClusterType.String(), r.clustersConfig.RKE1KubernetesVersions) + r.clustersConfig.RKE1KubernetesVersions, err = kubernetesversions.Default(r.client, clusters.RKE1ClusterType.String(), r.clustersConfig.RKE1KubernetesVersions) require.NoError(r.T(), err) enabled := true @@ -133,7 +132,6 @@ func (r *RKE1NodeDriverProvisioningTestSuite) TestProvisioningRKE1Cluster() { } func (r *RKE1NodeDriverProvisioningTestSuite) TestProvisioningRKE1ClusterDynamicInput() { - if len(r.clustersConfig.NodesAndRolesRKE1) == 0 { r.T().Skip() }