Skip to content

Commit

Permalink
Merge pull request rancher#41846 from markusewalker/markus/delete-sca…
Browse files Browse the repository at this point in the history
…le-clusters

[v2.7] Add support for deleting clusters and scaling RKE2/K3S machinepools
  • Loading branch information
Israel Gomez authored Sep 18, 2023
2 parents 897aafe + 7059384 commit 94656f7
Show file tree
Hide file tree
Showing 25 changed files with 863 additions and 68 deletions.
34 changes: 34 additions & 0 deletions tests/framework/extensions/clusters/clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,40 @@ func CreateK3SRKE2Cluster(client *rancher.Client, rke2Cluster *apisV1.Cluster) (
return cluster, nil
}

// DeleteKE1Cluster is a "helper" functions that takes a rancher client, and the rke1 cluster ID as parameters to delete
// the cluster.
func DeleteRKE1Cluster(client *rancher.Client, clusterID string) error {
cluster, err := client.Management.Cluster.ByID(clusterID)
if err != nil {
return err
}

logrus.Infof("Deleting cluster %s...", cluster.Name)
err = client.Management.Cluster.Delete(cluster)
if err != nil {
return err
}

return nil
}

// DeleteK3SRKE2Cluster is a "helper" functions that takes a rancher client, and the non-rke1 cluster ID as parameters to delete
// the cluster.
func DeleteK3SRKE2Cluster(client *rancher.Client, clusterID string) error {
cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID)
if err != nil {
return err
}

logrus.Infof("Deleting cluster %s...", cluster.Name)
err = client.Steve.SteveType(ProvisioningSteveResourceType).Delete(cluster)
if err != nil {
return err
}

return nil
}

// UpdateK3SRKE2Cluster is a "helper" functions that takes a rancher client, old rke2/k3s cluster config, and the new rke2/k3s cluster config as parameters.
func UpdateK3SRKE2Cluster(client *rancher.Client, cluster *v1.SteveAPIObject, updatedCluster *apisV1.Cluster) (*v1.SteveAPIObject, error) {
updateCluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(cluster.ID)
Expand Down
122 changes: 106 additions & 16 deletions tests/framework/extensions/machinepools/machinepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,94 @@ import (
"fmt"
"strconv"
"strings"
"time"

apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
"github.com/rancher/rancher/tests/framework/clients/rancher"
v1 "github.com/rancher/rancher/tests/framework/clients/rancher/v1"
nodestat "github.com/rancher/rancher/tests/framework/extensions/nodes"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
kwait "k8s.io/apimachinery/pkg/util/wait"
)

const (
active = "active"
)

// MatchNodeRolesToMachinePool matches the role of machinePools to the nodeRoles.
func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEMachinePool) (int, int32) {
count := int32(0)
for index, machinePoolConfig := range machinePools {
if nodeRoles.ControlPlane != machinePoolConfig.ControlPlaneRole {
continue
}
if nodeRoles.Etcd != machinePoolConfig.EtcdRole {
continue
}
if nodeRoles.Worker != machinePoolConfig.WorkerRole {
continue
}

count += *machinePoolConfig.Quantity

return index, count
}

return -1, count
}

// updateMachinePoolQuantity is a helper method that will update the desired machine pool with the latest quantity.
func updateMachinePoolQuantity(client *rancher.Client, cluster *v1.SteveAPIObject, nodeRoles NodeRoles) (*v1.SteveAPIObject, error) {
updateCluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(cluster.ID)
if err != nil {
return nil, err
}

updatedCluster := new(apisV1.Cluster)
err = v1.ConvertToK8sType(cluster, &updatedCluster)
if err != nil {
return nil, err
}

updatedCluster.ObjectMeta.ResourceVersion = updateCluster.ObjectMeta.ResourceVersion
machineConfig, newQuantity := MatchNodeRolesToMachinePool(nodeRoles, updatedCluster.Spec.RKEConfig.MachinePools)

newQuantity += nodeRoles.Quantity
updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Quantity = &newQuantity

logrus.Infof("Scaling the machine pool to %v total nodes", newQuantity)
cluster, err = client.Steve.SteveType("provisioning.cattle.io.cluster").Update(cluster, updatedCluster)
if err != nil {
return nil, err
}

err = kwait.Poll(500*time.Millisecond, 10*time.Minute, func() (done bool, err error) {
clusterResp, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(cluster.ID)
if err != nil {
return false, err
}

if clusterResp.ObjectMeta.State.Name == active && nodestat.AllManagementNodeReady(client, cluster.ID) == nil {
return true, nil
}

return false, nil
})
if err != nil {
return nil, err
}

return cluster, nil
}

// NewRKEMachinePool is a constructor that sets up a apisV1.RKEMachinePool object to be used to provision a cluster.
func NewRKEMachinePool(controlPlaneRole, etcdRole, workerRole bool, poolName string, quantity int32, machineConfig *v1.SteveAPIObject, hostnameLengthLimit int) apisV1.RKEMachinePool {
machineConfigRef := &corev1.ObjectReference{
Kind: machineConfig.Kind,
Name: machineConfig.Name,
}

machinePool := apisV1.RKEMachinePool{
ControlPlaneRole: controlPlaneRole,
EtcdRole: etcdRole,
Expand All @@ -24,9 +100,11 @@ func NewRKEMachinePool(controlPlaneRole, etcdRole, workerRole bool, poolName str
Name: poolName,
Quantity: &quantity,
}

if hostnameLengthLimit > 0 {
machinePool.HostnameLengthLimit = hostnameLengthLimit
}

return machinePool
}

Expand Down Expand Up @@ -59,37 +137,38 @@ func (n NodeRoles) String() string {
if n.Worker {
result = append(result, "worker")
}

return fmt.Sprintf("%d %s", n.Quantity, strings.Join(result, "+"))
}

// CreateAllMachinePools is a helper method that will loop and setup multiple node pools with the defined node roles from the `nodeRoles` parameter
// `machineConfig` is the *unstructured.Unstructured created by CreateMachineConfig
// `nodeRoles` would be in this format
//
// []NodeRoles{
// {
// ControlPlane: true,
// Etcd: false,
// Worker: false,
// Quantity: 1,
// },
// {
// ControlPlane: false,
// Etcd: true,
// Worker: false,
// Quantity: 1,
// },
// }
// []NodeRoles{
// {
// ControlPlane: true,
// Etcd: false,
// Worker: false,
// Quantity: 1,
// },
// {
// ControlPlane: false,
// Etcd: true,
// Worker: false,
// Quantity: 1,
// },
// }
func CreateAllMachinePools(nodeRoles []NodeRoles, machineConfig *v1.SteveAPIObject, hostnameLengthLimits []HostnameTruncation) []apisV1.RKEMachinePool {
machinePools := make([]apisV1.RKEMachinePool, 0, len(nodeRoles))
hostnameLengthLimit := 0

for index, roles := range nodeRoles {
poolName := "pool" + strconv.Itoa(index)
if hostnameLengthLimits != nil && len(hostnameLengthLimits) >= index {
hostnameLengthLimit = hostnameLengthLimits[index].PoolNameLengthLimit
poolName = hostnameLengthLimits[index].Name
}

if !roles.Windows {
machinePool := NewRKEMachinePool(roles.ControlPlane, roles.Etcd, roles.Worker, poolName, roles.Quantity, machineConfig, hostnameLengthLimit)
machinePools = append(machinePools, machinePool)
Expand All @@ -98,6 +177,17 @@ func CreateAllMachinePools(nodeRoles []NodeRoles, machineConfig *v1.SteveAPIObje
machinePools = append(machinePools, machinePool)
}
}

return machinePools
}

// ScaleMachinePoolNodes is a helper method that will scale the machine pool to the desired quantity.
func ScaleMachinePoolNodes(client *rancher.Client, cluster *v1.SteveAPIObject, nodeRoles NodeRoles) (*v1.SteveAPIObject, error) {
scaledClusterResp, err := updateMachinePoolQuantity(client, cluster, nodeRoles)
if err != nil {
return nil, err
}

logrus.Infof("Machine pool has been scaled!")

return scaledClusterResp, nil
}
25 changes: 24 additions & 1 deletion tests/framework/extensions/nodes/node_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func AllManagementNodeReady(client *rancher.Client, ClusterID string) error {
},
})
if err != nil {
return false, err
return false, nil
}

for _, node := range nodes.Data {
Expand Down Expand Up @@ -88,6 +88,29 @@ func AllMachineReady(client *rancher.Client, clusterID string) error {
return err
}

// AllNodeDeleted is a helper method that will loop and check if the node is deleted in the cluster.
func AllNodeDeleted(client *rancher.Client, ClusterID string) error {
err := wait.Poll(500*time.Millisecond, 5*time.Minute, func() (bool, error) {
nodes, err := client.Management.Node.ListAll(&types.ListOpts{
Filters: map[string]interface{}{
"clusterId": ClusterID,
},
})
if err != nil {
return false, err
}

if len(nodes.Data) == 0 {
logrus.Infof("All nodes in the cluster are deleted!")
return true, nil
}

return false, nil
})

return err
}

// IsNodeReplaced is a helper method that will loop and check if the node matching its type is replaced in a cluster.
// It will return an error if the node is not replaced after set amount of time.
func IsNodeReplaced(client *rancher.Client, oldMachineID string, clusterID string, numOfNodesBeforeDeletion int, isEtcd bool, isControlPlane bool, isWorker bool) (bool, error) {
Expand Down
67 changes: 67 additions & 0 deletions tests/framework/extensions/provisioning/verify.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"testing"
"time"

apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
provv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
rkev1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1"
"github.com/rancher/rancher/tests/framework/clients/rancher"
Expand All @@ -27,6 +28,7 @@ import (
"github.com/rancher/rancher/tests/framework/pkg/wait"
"github.com/rancher/rancher/tests/v2prov/defaults"
wranglername "github.com/rancher/wrangler/pkg/name"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -169,6 +171,71 @@ func VerifyCluster(t *testing.T, client *rancher.Client, clustersConfig *cluster
}
}

// VerifyDeleteRKE1Cluster validates that a rke1 cluster and its resources are deleted.
func VerifyDeleteRKE1Cluster(t *testing.T, client *rancher.Client, clusterID string) {
cluster, err := client.Management.Cluster.ByID(clusterID)
require.NoError(t, err)

adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session)
require.NoError(t, err)

watchInterface, err := adminClient.GetManagementWatchInterface(management.ClusterType, metav1.ListOptions{
FieldSelector: "metadata.name=" + clusterID,
TimeoutSeconds: &defaults.WatchTimeoutSeconds,
})
require.NoError(t, err)

err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) {
if event.Type == watch.Error {
return false, fmt.Errorf("error: unable to delete cluster %s", cluster.Name)
} else if event.Type == watch.Deleted {
logrus.Infof("Cluster %s deleted!", cluster.Name)
return true, nil
}
return false, nil
})
require.NoError(t, err)

err = nodestat.AllNodeDeleted(client, clusterID)
require.NoError(t, err)
}

// VerifyDeleteRKE2K3SCluster validates that a non-rke1 cluster and its resources are deleted.
func VerifyDeleteRKE2K3SCluster(t *testing.T, client *rancher.Client, clusterID string) {
cluster, err := client.Steve.SteveType("provisioning.cattle.io.cluster").ByID(clusterID)
require.NoError(t, err)

adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session)
require.NoError(t, err)

provKubeClient, err := adminClient.GetKubeAPIProvisioningClient()
require.NoError(t, err)

watchInterface, err := provKubeClient.Clusters(namespace).Watch(context.TODO(), metav1.ListOptions{
FieldSelector: "metadata.name=" + cluster.Name,
TimeoutSeconds: &defaults.WatchTimeoutSeconds,
})
require.NoError(t, err)

err = wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) {
cluster := event.Object.(*apisV1.Cluster)
if event.Type == watch.Error {
return false, fmt.Errorf("error: unable to delete cluster %s", cluster.ObjectMeta.Name)
} else if event.Type == watch.Deleted {
logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name)
return true, nil
} else if cluster == nil {
logrus.Infof("Cluster %s deleted!", cluster.ObjectMeta.Name)
return true, nil
}
return false, nil
})
require.NoError(t, err)

err = nodestat.AllNodeDeleted(client, clusterID)
require.NoError(t, err)
}

// CertRotationCompleteCheckFunc returns a watch check function that checks if the certificate rotation is complete
func CertRotationCompleteCheckFunc(generation int64) wait.WatchCheckFunc {
return func(event watch.Event) (bool, error) {
Expand Down
Loading

0 comments on commit 94656f7

Please sign in to comment.