Skip to content

Commit

Permalink
Merge pull request rancher#46885 from markusewalker/markusv29/task-1430
Browse files Browse the repository at this point in the history
[v2.9] Expand Windows test coverage in nodescaling and provisioning
  • Loading branch information
markusewalker authored Aug 29, 2024
2 parents 7f997b7 + 19ec5cc commit 714e136
Show file tree
Hide file tree
Showing 31 changed files with 350 additions and 151 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ replace (
require (
github.com/antihax/optional v1.0.0
github.com/rancher/rancher/pkg/apis v0.0.0-20240719121207-baeda6b89fe3
github.com/rancher/shepherd v0.0.0-20240821165501-4d17a8625c49
github.com/rancher/shepherd v0.0.0-20240829173041-d255c925da7d
go.qase.io/client v0.0.0-20231114201952-65195ec001fa
)

Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1794,8 +1794,8 @@ github.com/rancher/remotedialer v0.4.0 h1:T9yC5bFMsZFVQ6rK0dNrRg6rRb6Zr/4vsig8S0
github.com/rancher/remotedialer v0.4.0/go.mod h1:Ys004RpJuTLSm+k4aYUCoFiOOad37ubYev3TkOFg/5w=
github.com/rancher/rke v1.6.1 h1:ipktVDW1Xcs2SIR4vB9vCxH09kVrfD+1RmcUtWIPUV8=
github.com/rancher/rke v1.6.1/go.mod h1:5xRbf3L8PxqJRhABjYRfaBqbpVqAnqyH3maUNQEuwvk=
github.com/rancher/shepherd v0.0.0-20240821165501-4d17a8625c49 h1:dOteSLpRpJiGc9dW1UF8WglS4nNAgQAnfjgko1KSnfQ=
github.com/rancher/shepherd v0.0.0-20240821165501-4d17a8625c49/go.mod h1:nVphr8v6qtXd0pth8wMCF9U5eKEPBIaD5+HQCH19uRw=
github.com/rancher/shepherd v0.0.0-20240829173041-d255c925da7d h1:67/7UcaYRqLh5KiG8gX5TM7gXfmkq98yzPXeS77A6vo=
github.com/rancher/shepherd v0.0.0-20240829173041-d255c925da7d/go.mod h1:nVphr8v6qtXd0pth8wMCF9U5eKEPBIaD5+HQCH19uRw=
github.com/rancher/steve v0.0.0-20240806133920-61be17faa3d2 h1:mmm2uQ1NsNCrr6jxq9eAdGxvaf+6061gV4BMvuhcT6I=
github.com/rancher/steve v0.0.0-20240806133920-61be17faa3d2/go.mod h1:Za4nSt0V6kIHRfUo6jTXKkv6ABMMCHINA8EzhzygCfk=
github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20210727200656-10b094e30007 h1:ru+mqGnxMmKeU0Q3XIDxkARvInDIqT1hH2amTcsjxI4=
Expand Down
17 changes: 16 additions & 1 deletion tests/v2/actions/machinepools/machinepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (

const (
active = "active"
osAnnotation = "cattle.io/os"
fleetNamespace = "fleet-default"
initNodeLabelKey = "rke.cattle.io/init-node"
local = "local"
Expand All @@ -31,6 +32,7 @@ const (
clusterNameLabelKey = "cluster.x-k8s.io/cluster-name"
pool = "pool"
True = "true"
windows = "windows"

nodeRoleListLength = 4
)
Expand All @@ -42,9 +44,11 @@ func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEM
if nodeRoles.ControlPlane != machinePoolConfig.ControlPlaneRole {
continue
}

if nodeRoles.Etcd != machinePoolConfig.EtcdRole {
continue
}

if nodeRoles.Worker != machinePoolConfig.WorkerRole {
continue
}
Expand All @@ -54,6 +58,17 @@ func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEM
return index, count
}

// If the nodeRole is for a Windows node, this separate check is needed. This is because
// the machinePoolConfig does not account for Windows nodes. This results in a scaling
// issue when working with Windows nodes.
if nodeRoles.Windows {
for index, machinePoolConfig := range machinePools {
if machinePoolConfig.WorkerRole && machinePoolConfig.Labels[osAnnotation] == windows {
return index, count
}
}
}

return -1, count
}

Expand All @@ -76,7 +91,7 @@ func updateMachinePoolQuantity(client *rancher.Client, cluster *v1.SteveAPIObjec
newQuantity += nodeRoles.Quantity
updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Quantity = &newQuantity

logrus.Infof("Scaling the machine pool to %v total nodes", newQuantity)
logrus.Infof("Scaling machine pool %v to %v total nodes", updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Name, newQuantity)
cluster, err = client.Steve.SteveType("provisioning.cattle.io.cluster").Update(cluster, updatedCluster)
if err != nil {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion tests/v2/actions/psact/createdeployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func CreateNginxDeployment(client *rancher.Client, clusterID string, psact strin
labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.deployment-%v-%v", namespace, workload)

containerTemplate := workloads.NewContainer(containerName, imageName, v1.PullAlways, []v1.VolumeMount{}, []v1.EnvFromSource{}, nil, nil, nil)
podTemplate := workloads.NewPodTemplate([]v1.Container{containerTemplate}, []v1.Volume{}, []v1.LocalObjectReference{}, labels)
podTemplate := workloads.NewPodTemplate([]v1.Container{containerTemplate}, []v1.Volume{}, []v1.LocalObjectReference{}, labels, nil)
deploymentTemplate := workloads.NewDeploymentTemplate(deploymentName, namespace, podTemplate, true, labels)

steveclient, err := client.Steve.ProxyDownstream(clusterID)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,17 @@ import (
"github.com/rancher/shepherd/extensions/workloads"
appv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)

// CreateDeamonset is a helper to create a deamonset
func CreateDeamonset(client *rancher.Client, clusterID, namespaceName string, replicaCount int, secretName, configMapName string, useEnvVars, useVolumes bool) (*appv1.DaemonSet, error) {
var DeamonsetGroupVersionResource = schema.GroupVersionResource{
Group: "apps",
Version: "v1",
Resource: "daemonsets",
}

// CreateDaemonset is a helper to create a daemonset
func CreateDaemonset(client *rancher.Client, clusterID, namespaceName string, replicaCount int, secretName, configMapName string, useEnvVars, useVolumes bool) (*appv1.DaemonSet, error) {
deploymentTemplate, err := deployment.CreateDeployment(client, clusterID, namespaceName, replicaCount, secretName, configMapName, useEnvVars, useVolumes)
if err != nil {
return nil, err
Expand Down
1 change: 1 addition & 0 deletions tests/v2/actions/workloads/deployment/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ func CreateDeployment(client *rancher.Client, clusterID, namespaceName string, r
[]corev1.Volume{},
[]corev1.LocalObjectReference{},
nil,
nil,
)
}

Expand Down
2 changes: 1 addition & 1 deletion tests/v2/actions/workloads/pods/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func NewPodTemplateWithConfig(secretName, configMapName string, useEnvVars, useV

container := workloads.NewContainer(containerName, imageName, pullPolicy, nil, envFrom, nil, nil, nil)
containers := []corev1.Container{container}
return workloads.NewPodTemplate(containers, volumes, nil, nil)
return workloads.NewPodTemplate(containers, volumes, nil, nil, nil)
}

// CheckPodLogsForErrors is a helper to check pod logs for errors
Expand Down
5 changes: 3 additions & 2 deletions tests/v2/validation/charts/monitoring.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"github.com/rancher/shepherd/extensions/clusterrolebindings"
"github.com/rancher/shepherd/extensions/configmaps"
"github.com/rancher/shepherd/extensions/ingresses"
extensionworkloads "github.com/rancher/shepherd/extensions/workloads"
wloads "github.com/rancher/shepherd/extensions/workloads"
"github.com/rancher/shepherd/pkg/namegenerator"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand Down Expand Up @@ -444,7 +444,8 @@ func createAlertWebhookReceiverDeployment(client *rancher.Client, clusterID, nam
}

isCattleLabeled := true
deploymentTemplate := extensionworkloads.NewDeploymentTemplate(deploymentName, namespace, podSpecTemplate, isCattleLabeled, nil)

deploymentTemplate := wloads.NewDeploymentTemplate(deploymentName, namespace, podSpecTemplate, isCattleLabeled, nil)
deployment, err := steveclient.SteveType(workloads.DeploymentSteveType).Create(deploymentTemplate)
if err != nil {
return deployment, err
Expand Down
7 changes: 5 additions & 2 deletions tests/v2/validation/nodescaling/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ rancher:
Node replacement tests require that the given pools have unique, distinct roles and more than 1 node per pool. Typically, a cluster with the following 3 pools is used for testing:
```yaml
provisioningInput:
nodePools: # nodePools is specific for RKE1 clusters.
providers: [""] # Specify to vsphere if you have a Windows node in your cluster
nodePools: # nodePools is specific for RKE1 clusters.
- nodeRoles:
etcd: true
quantity: 3
Expand All @@ -34,7 +35,7 @@ provisioningInput:
- nodeRoles:
worker: true
quantity: 3
machinePools: # machienPools is specific for RKE2/K3s clusters.
machinePools: # machinePools is specific for RKE2/K3s clusters.
- machinePoolConfig:
etcd: true
quantity: 3
Expand All @@ -57,6 +58,8 @@ These tests utilize Go build tags. Due to this, see the below examples on how to
## Scaling Existing Node Pools
Similar to the `provisioning` tests, the node scaling tests have static test cases as well as dynamicInput tests you can specify. In order to run the dynamicInput tests, you will need to define the `scalingInput` block in your config file. This block defines the quantity you would like the pool to be scaled up/down to. See an example below that accounts for node drivers, custom clusters and hosted clusters:
```yaml
provisioningInput: # Optional block, only use if using vsphere
providers: [""] # Specify to vsphere if you have a Windows node in your cluster
scalingInput:
nodeProvider: "ec2"
nodePools:
Expand Down
2 changes: 1 addition & 1 deletion tests/v2/validation/nodescaling/replace.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ const (
clusterLabel = "cluster.x-k8s.io/cluster-name"
)

func MatchNodeToRole(t *testing.T, client *rancher.Client, clusterID string, isEtcd bool, isControlPlane bool, isWorker bool) (int, []management.Node) {
func MatchNodeToRole(t *testing.T, client *rancher.Client, clusterID string, isEtcd, isControlPlane, isWorker bool) (int, []management.Node) {
machines, err := client.Management.Node.List(&types.ListOpts{Filters: map[string]interface{}{
"clusterId": clusterID,
}})
Expand Down
23 changes: 4 additions & 19 deletions tests/v2/validation/nodescaling/scale_replace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,18 @@ import (

apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
"github.com/rancher/rancher/tests/v2/actions/machinepools"
"github.com/rancher/rancher/tests/v2/actions/provisioninginput"
"github.com/rancher/shepherd/clients/rancher"
v1 "github.com/rancher/shepherd/clients/rancher/v1"
"github.com/rancher/shepherd/extensions/clusters"
"github.com/rancher/shepherd/pkg/config"
"github.com/rancher/shepherd/pkg/session"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)

type NodeReplacingTestSuite struct {
suite.Suite
session *session.Session
client *rancher.Client
ns string
clustersConfig *provisioninginput.Config
session *session.Session
client *rancher.Client
}

func (s *NodeReplacingTestSuite) TearDownSuite() {
Expand All @@ -34,11 +30,6 @@ func (s *NodeReplacingTestSuite) SetupSuite() {
testSession := session.NewSession()
s.session = testSession

s.ns = provisioninginput.Namespace

s.clustersConfig = new(provisioninginput.Config)
config.LoadConfig(provisioninginput.ConfigurationFileKey, s.clustersConfig)

client, err := rancher.NewClient("", testSession)
require.NoError(s.T(), err)

Expand All @@ -47,21 +38,15 @@ func (s *NodeReplacingTestSuite) SetupSuite() {

func (s *NodeReplacingTestSuite) TestReplacingNodes() {
nodeRolesEtcd := machinepools.NodeRoles{
Etcd: true,
ControlPlane: false,
Worker: false,
Etcd: true,
}

nodeRolesControlPlane := machinepools.NodeRoles{
Etcd: false,
ControlPlane: true,
Worker: false,
}

nodeRolesWorker := machinepools.NodeRoles{
Etcd: false,
ControlPlane: false,
Worker: true,
Worker: true,
}

tests := []struct {
Expand Down
23 changes: 14 additions & 9 deletions tests/v2/validation/nodescaling/scaling_custom_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,21 +64,22 @@ func (s *CustomClusterNodeScalingTestSuite) TestScalingCustomClusterNodes() {
Quantity: 1,
}

nodeRolesTwoWorkers := machinepools.NodeRoles{
Worker: true,
Quantity: 2,
nodeRolesWindows := machinepools.NodeRoles{
Windows: true,
Quantity: 1,
}

tests := []struct {
name string
nodeRoles machinepools.NodeRoles
client *rancher.Client
isWindows bool
}{
{"control plane by 1", nodeRolesControlPlane, s.client},
{"etcd by 1", nodeRolesEtcd, s.client},
{"etcd and control plane by 1", nodeRolesEtcdControlPlane, s.client},
{"worker by 1", nodeRolesWorker, s.client},
{"worker by 2", nodeRolesTwoWorkers, s.client},
{"control plane by 1", nodeRolesControlPlane, s.client, false},
{"etcd by 1", nodeRolesEtcd, s.client, false},
{"etcd and control plane by 1", nodeRolesEtcdControlPlane, s.client, false},
{"worker by 1", nodeRolesWorker, s.client, false},
{"Windows by 1", nodeRolesWindows, s.client, true},
}

for _, tt := range tests {
Expand All @@ -94,8 +95,12 @@ func (s *CustomClusterNodeScalingTestSuite) TestScalingCustomClusterNodes() {

if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") {
tt.name = "Scaling custom RKE2 " + tt.name
} else {
} else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") {
tt.name = "Scaling custom K3S " + tt.name

if tt.isWindows {
s.T().Skip("Skipping Windows tests")
}
}

s.Run(tt.name, func() {
Expand Down
37 changes: 26 additions & 11 deletions tests/v2/validation/nodescaling/scaling_node_driver_test.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
//go:build (validation || infra.rke2k3s || cluster.nodedriver || extended) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke1 && !cluster.any && !cluster.custom && !sanity && !stress
//go:build (validation || infra.rke1 || cluster.nodedriver || extended) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke2k3s && !cluster.any && !cluster.custom && !sanity && !stress

package nodescaling

import (
"slices"
"strings"
"testing"

apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
"github.com/rancher/rancher/tests/v2/actions/machinepools"
"github.com/rancher/rancher/tests/v2/actions/provisioninginput"
"github.com/rancher/rancher/tests/v2/actions/scalinginput"
"github.com/rancher/shepherd/clients/rancher"
v1 "github.com/rancher/shepherd/clients/rancher/v1"
Expand All @@ -20,9 +22,10 @@ import (

type NodeScalingTestSuite struct {
suite.Suite
client *rancher.Client
session *session.Session
scalingConfig *scalinginput.Config
client *rancher.Client
session *session.Session
scalingConfig *scalinginput.Config
provisioningConfig *provisioninginput.Config
}

func (s *NodeScalingTestSuite) TearDownSuite() {
Expand All @@ -36,6 +39,9 @@ func (s *NodeScalingTestSuite) SetupSuite() {
s.scalingConfig = new(scalinginput.Config)
config.LoadConfig(scalinginput.ConfigurationFileKey, s.scalingConfig)

s.provisioningConfig = new(provisioninginput.Config)
config.LoadConfig(provisioninginput.ConfigurationFileKey, s.provisioningConfig)

client, err := rancher.NewClient("", testSession)
require.NoError(s.T(), err)

Expand All @@ -58,20 +64,21 @@ func (s *NodeScalingTestSuite) TestScalingNodePools() {
Quantity: 1,
}

nodeRolesTwoWorkers := machinepools.NodeRoles{
Worker: true,
Quantity: 2,
nodeRolesWindows := machinepools.NodeRoles{
Windows: true,
Quantity: 1,
}

tests := []struct {
name string
nodeRoles machinepools.NodeRoles
client *rancher.Client
isWindows bool
}{
{"control plane by 1", nodeRolesControlPlane, s.client},
{"etcd by 1", nodeRolesEtcd, s.client},
{"worker by 1", nodeRolesWorker, s.client},
{"worker by 2", nodeRolesTwoWorkers, s.client},
{"control plane by 1", nodeRolesControlPlane, s.client, false},
{"etcd by 1", nodeRolesEtcd, s.client, false},
{"worker by 1", nodeRolesWorker, s.client, false},
{"Windows worker by 1", nodeRolesWindows, s.client, true},
}

for _, tt := range tests {
Expand All @@ -87,8 +94,16 @@ func (s *NodeScalingTestSuite) TestScalingNodePools() {

if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") {
tt.name = "Scaling RKE2 " + tt.name

if !slices.Contains(s.provisioningConfig.Providers, "vsphere") && tt.isWindows {
s.T().Skip("Windows test requires access to vSphere")
}
} else {
tt.name = "Scaling K3S " + tt.name

if tt.isWindows {
s.T().Skip("Skipping Windows tests - not supported on K3S")
}
}

s.Run(tt.name, func() {
Expand Down
11 changes: 10 additions & 1 deletion tests/v2/validation/nodescaling/scaling_nodepools.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ func scalingRKE2K3SNodePools(t *testing.T, client *rancher.Client, clusterID str
cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID)
require.NoError(t, err)

if nodeRoles.Windows {
nodeRoles.Quantity++
}

clusterResp, err := machinepools.ScaleMachinePoolNodes(client, cluster, nodeRoles)
require.NoError(t, err)

Expand All @@ -35,7 +39,12 @@ func scalingRKE2K3SNodePools(t *testing.T, client *rancher.Client, clusterID str
updatedCluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID)
require.NoError(t, err)

nodeRoles.Quantity = -nodeRoles.Quantity
if nodeRoles.Windows {
nodeRoles.Quantity--
} else {
nodeRoles.Quantity = -nodeRoles.Quantity
}

scaledClusterResp, err := machinepools.ScaleMachinePoolNodes(client, updatedCluster, nodeRoles)
require.NoError(t, err)

Expand Down
Loading

0 comments on commit 714e136

Please sign in to comment.