From 19ec5ccf8a8b9c64bf7f644e41c7eace3c28524c Mon Sep 17 00:00:00 2001 From: Markus Walker Date: Wed, 28 Aug 2024 11:30:51 -0700 Subject: [PATCH] Expand Windows test coverage in nodescaling and provisioning --- go.mod | 2 +- go.sum | 4 +- tests/v2/actions/machinepools/machinepools.go | 17 ++- tests/v2/actions/psact/createdeployment.go | 2 +- .../deamonset.go => daemonset/daemonset.go} | 11 +- .../workloads/deployment/deployment.go | 1 + tests/v2/actions/workloads/pods/pods.go | 2 +- tests/v2/validation/charts/monitoring.go | 5 +- tests/v2/validation/nodescaling/README.md | 7 +- tests/v2/validation/nodescaling/replace.go | 2 +- .../nodescaling/scale_replace_test.go | 23 +--- .../scaling_custom_cluster_test.go | 23 ++-- .../nodescaling/scaling_node_driver_test.go | 37 +++++-- .../nodescaling/scaling_nodepools.go | 11 +- .../provisioning/permutations/permutations.go | 15 +-- .../provisioning/rke2/custom_cluster_test.go | 4 +- .../rke2/provisioning_node_driver_test.go | 16 ++- tests/v2/validation/rbac/psa/psa.go | 5 +- tests/v2/validation/snapshot/README.md | 6 ++ tests/v2/validation/snapshot/snapshot.go | 11 +- .../snapshot/snapshot_additional_test.go | 4 +- .../snapshot_restore_k8s_upgrade_test.go | 4 +- .../snapshot/snapshot_restore_test.go | 4 +- .../snapshot_restore_upgrade_strategy_test.go | 4 +- .../snapshot/snapshot_restore_wins_test.go | 80 ++++++++++++++ tests/v2/validation/upgrade/README.md | 3 +- tests/v2/validation/upgrade/kubernetes.go | 8 +- .../v2/validation/upgrade/kubernetes_test.go | 6 +- .../upgrade/kubernetes_wins_test.go | 78 ++++++++++++++ tests/v2/validation/upgrade/workload.go | 102 +++++++----------- .../v2/validation/workloads/workload_test.go | 4 +- 31 files changed, 350 insertions(+), 151 deletions(-) rename tests/v2/actions/workloads/{deamonset/deamonset.go => daemonset/daemonset.go} (76%) create mode 100644 tests/v2/validation/snapshot/snapshot_restore_wins_test.go create mode 100644 tests/v2/validation/upgrade/kubernetes_wins_test.go diff --git a/go.mod b/go.mod index b4919bdad99..8b78bcc280d 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ replace ( require ( github.com/antihax/optional v1.0.0 github.com/rancher/rancher/pkg/apis v0.0.0-20240719121207-baeda6b89fe3 - github.com/rancher/shepherd v0.0.0-20240821165501-4d17a8625c49 + github.com/rancher/shepherd v0.0.0-20240829173041-d255c925da7d go.qase.io/client v0.0.0-20231114201952-65195ec001fa ) diff --git a/go.sum b/go.sum index 03631f1544b..9612fca2315 100644 --- a/go.sum +++ b/go.sum @@ -1794,8 +1794,8 @@ github.com/rancher/remotedialer v0.4.0 h1:T9yC5bFMsZFVQ6rK0dNrRg6rRb6Zr/4vsig8S0 github.com/rancher/remotedialer v0.4.0/go.mod h1:Ys004RpJuTLSm+k4aYUCoFiOOad37ubYev3TkOFg/5w= github.com/rancher/rke v1.6.1 h1:ipktVDW1Xcs2SIR4vB9vCxH09kVrfD+1RmcUtWIPUV8= github.com/rancher/rke v1.6.1/go.mod h1:5xRbf3L8PxqJRhABjYRfaBqbpVqAnqyH3maUNQEuwvk= -github.com/rancher/shepherd v0.0.0-20240821165501-4d17a8625c49 h1:dOteSLpRpJiGc9dW1UF8WglS4nNAgQAnfjgko1KSnfQ= -github.com/rancher/shepherd v0.0.0-20240821165501-4d17a8625c49/go.mod h1:nVphr8v6qtXd0pth8wMCF9U5eKEPBIaD5+HQCH19uRw= +github.com/rancher/shepherd v0.0.0-20240829173041-d255c925da7d h1:67/7UcaYRqLh5KiG8gX5TM7gXfmkq98yzPXeS77A6vo= +github.com/rancher/shepherd v0.0.0-20240829173041-d255c925da7d/go.mod h1:nVphr8v6qtXd0pth8wMCF9U5eKEPBIaD5+HQCH19uRw= github.com/rancher/steve v0.0.0-20240806133920-61be17faa3d2 h1:mmm2uQ1NsNCrr6jxq9eAdGxvaf+6061gV4BMvuhcT6I= github.com/rancher/steve v0.0.0-20240806133920-61be17faa3d2/go.mod h1:Za4nSt0V6kIHRfUo6jTXKkv6ABMMCHINA8EzhzygCfk= github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20210727200656-10b094e30007 h1:ru+mqGnxMmKeU0Q3XIDxkARvInDIqT1hH2amTcsjxI4= diff --git a/tests/v2/actions/machinepools/machinepools.go b/tests/v2/actions/machinepools/machinepools.go index bd6f9625afa..e5e19aa9e18 100644 --- a/tests/v2/actions/machinepools/machinepools.go +++ b/tests/v2/actions/machinepools/machinepools.go @@ -22,6 +22,7 @@ import ( const ( active = "active" + osAnnotation = "cattle.io/os" fleetNamespace = "fleet-default" initNodeLabelKey = "rke.cattle.io/init-node" local = "local" @@ -31,6 +32,7 @@ const ( clusterNameLabelKey = "cluster.x-k8s.io/cluster-name" pool = "pool" True = "true" + windows = "windows" nodeRoleListLength = 4 ) @@ -42,9 +44,11 @@ func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEM if nodeRoles.ControlPlane != machinePoolConfig.ControlPlaneRole { continue } + if nodeRoles.Etcd != machinePoolConfig.EtcdRole { continue } + if nodeRoles.Worker != machinePoolConfig.WorkerRole { continue } @@ -54,6 +58,17 @@ func MatchNodeRolesToMachinePool(nodeRoles NodeRoles, machinePools []apisV1.RKEM return index, count } + // If the nodeRole is for a Windows node, this separate check is needed. This is because + // the machinePoolConfig does not account for Windows nodes. This results in a scaling + // issue when working with Windows nodes. + if nodeRoles.Windows { + for index, machinePoolConfig := range machinePools { + if machinePoolConfig.WorkerRole && machinePoolConfig.Labels[osAnnotation] == windows { + return index, count + } + } + } + return -1, count } @@ -76,7 +91,7 @@ func updateMachinePoolQuantity(client *rancher.Client, cluster *v1.SteveAPIObjec newQuantity += nodeRoles.Quantity updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Quantity = &newQuantity - logrus.Infof("Scaling the machine pool to %v total nodes", newQuantity) + logrus.Infof("Scaling machine pool %v to %v total nodes", updatedCluster.Spec.RKEConfig.MachinePools[machineConfig].Name, newQuantity) cluster, err = client.Steve.SteveType("provisioning.cattle.io.cluster").Update(cluster, updatedCluster) if err != nil { return nil, err diff --git a/tests/v2/actions/psact/createdeployment.go b/tests/v2/actions/psact/createdeployment.go index 9e7299e329e..45bdf4b88ee 100644 --- a/tests/v2/actions/psact/createdeployment.go +++ b/tests/v2/actions/psact/createdeployment.go @@ -33,7 +33,7 @@ func CreateNginxDeployment(client *rancher.Client, clusterID string, psact strin labels["workload.user.cattle.io/workloadselector"] = fmt.Sprintf("apps.deployment-%v-%v", namespace, workload) containerTemplate := workloads.NewContainer(containerName, imageName, v1.PullAlways, []v1.VolumeMount{}, []v1.EnvFromSource{}, nil, nil, nil) - podTemplate := workloads.NewPodTemplate([]v1.Container{containerTemplate}, []v1.Volume{}, []v1.LocalObjectReference{}, labels) + podTemplate := workloads.NewPodTemplate([]v1.Container{containerTemplate}, []v1.Volume{}, []v1.LocalObjectReference{}, labels, nil) deploymentTemplate := workloads.NewDeploymentTemplate(deploymentName, namespace, podTemplate, true, labels) steveclient, err := client.Steve.ProxyDownstream(clusterID) diff --git a/tests/v2/actions/workloads/deamonset/deamonset.go b/tests/v2/actions/workloads/daemonset/daemonset.go similarity index 76% rename from tests/v2/actions/workloads/deamonset/deamonset.go rename to tests/v2/actions/workloads/daemonset/daemonset.go index 9357f5bb804..df7461157bc 100644 --- a/tests/v2/actions/workloads/deamonset/deamonset.go +++ b/tests/v2/actions/workloads/daemonset/daemonset.go @@ -7,10 +7,17 @@ import ( "github.com/rancher/shepherd/extensions/workloads" appv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" ) -// CreateDeamonset is a helper to create a deamonset -func CreateDeamonset(client *rancher.Client, clusterID, namespaceName string, replicaCount int, secretName, configMapName string, useEnvVars, useVolumes bool) (*appv1.DaemonSet, error) { +var DeamonsetGroupVersionResource = schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "daemonsets", +} + +// CreateDaemonset is a helper to create a daemonset +func CreateDaemonset(client *rancher.Client, clusterID, namespaceName string, replicaCount int, secretName, configMapName string, useEnvVars, useVolumes bool) (*appv1.DaemonSet, error) { deploymentTemplate, err := deployment.CreateDeployment(client, clusterID, namespaceName, replicaCount, secretName, configMapName, useEnvVars, useVolumes) if err != nil { return nil, err diff --git a/tests/v2/actions/workloads/deployment/deployment.go b/tests/v2/actions/workloads/deployment/deployment.go index d77469ee933..ab8f21c4383 100644 --- a/tests/v2/actions/workloads/deployment/deployment.go +++ b/tests/v2/actions/workloads/deployment/deployment.go @@ -47,6 +47,7 @@ func CreateDeployment(client *rancher.Client, clusterID, namespaceName string, r []corev1.Volume{}, []corev1.LocalObjectReference{}, nil, + nil, ) } diff --git a/tests/v2/actions/workloads/pods/pods.go b/tests/v2/actions/workloads/pods/pods.go index 9d1eb7fadab..059bc985ea1 100644 --- a/tests/v2/actions/workloads/pods/pods.go +++ b/tests/v2/actions/workloads/pods/pods.go @@ -75,7 +75,7 @@ func NewPodTemplateWithConfig(secretName, configMapName string, useEnvVars, useV container := workloads.NewContainer(containerName, imageName, pullPolicy, nil, envFrom, nil, nil, nil) containers := []corev1.Container{container} - return workloads.NewPodTemplate(containers, volumes, nil, nil) + return workloads.NewPodTemplate(containers, volumes, nil, nil, nil) } // CheckPodLogsForErrors is a helper to check pod logs for errors diff --git a/tests/v2/validation/charts/monitoring.go b/tests/v2/validation/charts/monitoring.go index 8756a290c6a..493718913a2 100644 --- a/tests/v2/validation/charts/monitoring.go +++ b/tests/v2/validation/charts/monitoring.go @@ -21,7 +21,7 @@ import ( "github.com/rancher/shepherd/extensions/clusterrolebindings" "github.com/rancher/shepherd/extensions/configmaps" "github.com/rancher/shepherd/extensions/ingresses" - extensionworkloads "github.com/rancher/shepherd/extensions/workloads" + wloads "github.com/rancher/shepherd/extensions/workloads" "github.com/rancher/shepherd/pkg/namegenerator" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -444,7 +444,8 @@ func createAlertWebhookReceiverDeployment(client *rancher.Client, clusterID, nam } isCattleLabeled := true - deploymentTemplate := extensionworkloads.NewDeploymentTemplate(deploymentName, namespace, podSpecTemplate, isCattleLabeled, nil) + + deploymentTemplate := wloads.NewDeploymentTemplate(deploymentName, namespace, podSpecTemplate, isCattleLabeled, nil) deployment, err := steveclient.SteveType(workloads.DeploymentSteveType).Create(deploymentTemplate) if err != nil { return deployment, err diff --git a/tests/v2/validation/nodescaling/README.md b/tests/v2/validation/nodescaling/README.md index 625ff9285b4..afe8aa5f199 100644 --- a/tests/v2/validation/nodescaling/README.md +++ b/tests/v2/validation/nodescaling/README.md @@ -24,7 +24,8 @@ rancher: Node replacement tests require that the given pools have unique, distinct roles and more than 1 node per pool. Typically, a cluster with the following 3 pools is used for testing: ```yaml provisioningInput: - nodePools: # nodePools is specific for RKE1 clusters. + providers: [""] # Specify to vsphere if you have a Windows node in your cluster + nodePools: # nodePools is specific for RKE1 clusters. - nodeRoles: etcd: true quantity: 3 @@ -34,7 +35,7 @@ provisioningInput: - nodeRoles: worker: true quantity: 3 - machinePools: # machienPools is specific for RKE2/K3s clusters. + machinePools: # machinePools is specific for RKE2/K3s clusters. - machinePoolConfig: etcd: true quantity: 3 @@ -57,6 +58,8 @@ These tests utilize Go build tags. Due to this, see the below examples on how to ## Scaling Existing Node Pools Similar to the `provisioning` tests, the node scaling tests have static test cases as well as dynamicInput tests you can specify. In order to run the dynamicInput tests, you will need to define the `scalingInput` block in your config file. This block defines the quantity you would like the pool to be scaled up/down to. See an example below that accounts for node drivers, custom clusters and hosted clusters: ```yaml +provisioningInput: # Optional block, only use if using vsphere + providers: [""] # Specify to vsphere if you have a Windows node in your cluster scalingInput: nodeProvider: "ec2" nodePools: diff --git a/tests/v2/validation/nodescaling/replace.go b/tests/v2/validation/nodescaling/replace.go index 7f8aa857a1b..093c1d3702b 100644 --- a/tests/v2/validation/nodescaling/replace.go +++ b/tests/v2/validation/nodescaling/replace.go @@ -40,7 +40,7 @@ const ( clusterLabel = "cluster.x-k8s.io/cluster-name" ) -func MatchNodeToRole(t *testing.T, client *rancher.Client, clusterID string, isEtcd bool, isControlPlane bool, isWorker bool) (int, []management.Node) { +func MatchNodeToRole(t *testing.T, client *rancher.Client, clusterID string, isEtcd, isControlPlane, isWorker bool) (int, []management.Node) { machines, err := client.Management.Node.List(&types.ListOpts{Filters: map[string]interface{}{ "clusterId": clusterID, }}) diff --git a/tests/v2/validation/nodescaling/scale_replace_test.go b/tests/v2/validation/nodescaling/scale_replace_test.go index 2ce1febc02a..5caa57b7ef7 100644 --- a/tests/v2/validation/nodescaling/scale_replace_test.go +++ b/tests/v2/validation/nodescaling/scale_replace_test.go @@ -8,11 +8,9 @@ import ( apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/rancher/tests/v2/actions/machinepools" - "github.com/rancher/rancher/tests/v2/actions/provisioninginput" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" - "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/session" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -20,10 +18,8 @@ import ( type NodeReplacingTestSuite struct { suite.Suite - session *session.Session - client *rancher.Client - ns string - clustersConfig *provisioninginput.Config + session *session.Session + client *rancher.Client } func (s *NodeReplacingTestSuite) TearDownSuite() { @@ -34,11 +30,6 @@ func (s *NodeReplacingTestSuite) SetupSuite() { testSession := session.NewSession() s.session = testSession - s.ns = provisioninginput.Namespace - - s.clustersConfig = new(provisioninginput.Config) - config.LoadConfig(provisioninginput.ConfigurationFileKey, s.clustersConfig) - client, err := rancher.NewClient("", testSession) require.NoError(s.T(), err) @@ -47,21 +38,15 @@ func (s *NodeReplacingTestSuite) SetupSuite() { func (s *NodeReplacingTestSuite) TestReplacingNodes() { nodeRolesEtcd := machinepools.NodeRoles{ - Etcd: true, - ControlPlane: false, - Worker: false, + Etcd: true, } nodeRolesControlPlane := machinepools.NodeRoles{ - Etcd: false, ControlPlane: true, - Worker: false, } nodeRolesWorker := machinepools.NodeRoles{ - Etcd: false, - ControlPlane: false, - Worker: true, + Worker: true, } tests := []struct { diff --git a/tests/v2/validation/nodescaling/scaling_custom_cluster_test.go b/tests/v2/validation/nodescaling/scaling_custom_cluster_test.go index f0bf02bca95..202a75180fd 100644 --- a/tests/v2/validation/nodescaling/scaling_custom_cluster_test.go +++ b/tests/v2/validation/nodescaling/scaling_custom_cluster_test.go @@ -64,21 +64,22 @@ func (s *CustomClusterNodeScalingTestSuite) TestScalingCustomClusterNodes() { Quantity: 1, } - nodeRolesTwoWorkers := machinepools.NodeRoles{ - Worker: true, - Quantity: 2, + nodeRolesWindows := machinepools.NodeRoles{ + Windows: true, + Quantity: 1, } tests := []struct { name string nodeRoles machinepools.NodeRoles client *rancher.Client + isWindows bool }{ - {"control plane by 1", nodeRolesControlPlane, s.client}, - {"etcd by 1", nodeRolesEtcd, s.client}, - {"etcd and control plane by 1", nodeRolesEtcdControlPlane, s.client}, - {"worker by 1", nodeRolesWorker, s.client}, - {"worker by 2", nodeRolesTwoWorkers, s.client}, + {"control plane by 1", nodeRolesControlPlane, s.client, false}, + {"etcd by 1", nodeRolesEtcd, s.client, false}, + {"etcd and control plane by 1", nodeRolesEtcdControlPlane, s.client, false}, + {"worker by 1", nodeRolesWorker, s.client, false}, + {"Windows by 1", nodeRolesWindows, s.client, true}, } for _, tt := range tests { @@ -94,8 +95,12 @@ func (s *CustomClusterNodeScalingTestSuite) TestScalingCustomClusterNodes() { if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { tt.name = "Scaling custom RKE2 " + tt.name - } else { + } else if strings.Contains(updatedCluster.Spec.KubernetesVersion, "k3s") { tt.name = "Scaling custom K3S " + tt.name + + if tt.isWindows { + s.T().Skip("Skipping Windows tests") + } } s.Run(tt.name, func() { diff --git a/tests/v2/validation/nodescaling/scaling_node_driver_test.go b/tests/v2/validation/nodescaling/scaling_node_driver_test.go index faf5f1844a6..3ea80596767 100644 --- a/tests/v2/validation/nodescaling/scaling_node_driver_test.go +++ b/tests/v2/validation/nodescaling/scaling_node_driver_test.go @@ -1,13 +1,15 @@ -//go:build (validation || infra.rke2k3s || cluster.nodedriver || extended) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke1 && !cluster.any && !cluster.custom && !sanity && !stress +//go:build (validation || infra.rke1 || cluster.nodedriver || extended) && !infra.any && !infra.aks && !infra.eks && !infra.gke && !infra.rke2k3s && !cluster.any && !cluster.custom && !sanity && !stress package nodescaling import ( + "slices" "strings" "testing" apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" "github.com/rancher/rancher/tests/v2/actions/machinepools" + "github.com/rancher/rancher/tests/v2/actions/provisioninginput" "github.com/rancher/rancher/tests/v2/actions/scalinginput" "github.com/rancher/shepherd/clients/rancher" v1 "github.com/rancher/shepherd/clients/rancher/v1" @@ -20,9 +22,10 @@ import ( type NodeScalingTestSuite struct { suite.Suite - client *rancher.Client - session *session.Session - scalingConfig *scalinginput.Config + client *rancher.Client + session *session.Session + scalingConfig *scalinginput.Config + provisioningConfig *provisioninginput.Config } func (s *NodeScalingTestSuite) TearDownSuite() { @@ -36,6 +39,9 @@ func (s *NodeScalingTestSuite) SetupSuite() { s.scalingConfig = new(scalinginput.Config) config.LoadConfig(scalinginput.ConfigurationFileKey, s.scalingConfig) + s.provisioningConfig = new(provisioninginput.Config) + config.LoadConfig(provisioninginput.ConfigurationFileKey, s.provisioningConfig) + client, err := rancher.NewClient("", testSession) require.NoError(s.T(), err) @@ -58,20 +64,21 @@ func (s *NodeScalingTestSuite) TestScalingNodePools() { Quantity: 1, } - nodeRolesTwoWorkers := machinepools.NodeRoles{ - Worker: true, - Quantity: 2, + nodeRolesWindows := machinepools.NodeRoles{ + Windows: true, + Quantity: 1, } tests := []struct { name string nodeRoles machinepools.NodeRoles client *rancher.Client + isWindows bool }{ - {"control plane by 1", nodeRolesControlPlane, s.client}, - {"etcd by 1", nodeRolesEtcd, s.client}, - {"worker by 1", nodeRolesWorker, s.client}, - {"worker by 2", nodeRolesTwoWorkers, s.client}, + {"control plane by 1", nodeRolesControlPlane, s.client, false}, + {"etcd by 1", nodeRolesEtcd, s.client, false}, + {"worker by 1", nodeRolesWorker, s.client, false}, + {"Windows worker by 1", nodeRolesWindows, s.client, true}, } for _, tt := range tests { @@ -87,8 +94,16 @@ func (s *NodeScalingTestSuite) TestScalingNodePools() { if strings.Contains(updatedCluster.Spec.KubernetesVersion, "rke2") { tt.name = "Scaling RKE2 " + tt.name + + if !slices.Contains(s.provisioningConfig.Providers, "vsphere") && tt.isWindows { + s.T().Skip("Windows test requires access to vSphere") + } } else { tt.name = "Scaling K3S " + tt.name + + if tt.isWindows { + s.T().Skip("Skipping Windows tests - not supported on K3S") + } } s.Run(tt.name, func() { diff --git a/tests/v2/validation/nodescaling/scaling_nodepools.go b/tests/v2/validation/nodescaling/scaling_nodepools.go index 5eeeedc72dc..b33f8ec6d8a 100644 --- a/tests/v2/validation/nodescaling/scaling_nodepools.go +++ b/tests/v2/validation/nodescaling/scaling_nodepools.go @@ -27,6 +27,10 @@ func scalingRKE2K3SNodePools(t *testing.T, client *rancher.Client, clusterID str cluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) require.NoError(t, err) + if nodeRoles.Windows { + nodeRoles.Quantity++ + } + clusterResp, err := machinepools.ScaleMachinePoolNodes(client, cluster, nodeRoles) require.NoError(t, err) @@ -35,7 +39,12 @@ func scalingRKE2K3SNodePools(t *testing.T, client *rancher.Client, clusterID str updatedCluster, err := client.Steve.SteveType(ProvisioningSteveResourceType).ByID(clusterID) require.NoError(t, err) - nodeRoles.Quantity = -nodeRoles.Quantity + if nodeRoles.Windows { + nodeRoles.Quantity-- + } else { + nodeRoles.Quantity = -nodeRoles.Quantity + } + scaledClusterResp, err := machinepools.ScaleMachinePoolNodes(client, updatedCluster, nodeRoles) require.NoError(t, err) diff --git a/tests/v2/validation/provisioning/permutations/permutations.go b/tests/v2/validation/provisioning/permutations/permutations.go index 052bc24a115..ccda1852139 100644 --- a/tests/v2/validation/provisioning/permutations/permutations.go +++ b/tests/v2/validation/provisioning/permutations/permutations.go @@ -29,7 +29,7 @@ import ( steveV1 "github.com/rancher/shepherd/clients/rancher/v1" extensionscharts "github.com/rancher/shepherd/extensions/charts" extensionscluster "github.com/rancher/shepherd/extensions/clusters" - extensionsworkloads "github.com/rancher/shepherd/extensions/workloads" + wloads "github.com/rancher/shepherd/extensions/workloads" "github.com/rancher/shepherd/extensions/workloads/pods" "github.com/rancher/shepherd/pkg/config" "github.com/rancher/shepherd/pkg/namegenerator" @@ -449,9 +449,9 @@ func createNginxDeploymentWithPVC(steveclient *steveV1.Client, containerNamePref }, } - containerTemplate := extensionsworkloads.NewContainer(nginxName, nginxName, corev1.PullAlways, []corev1.VolumeMount{volMount}, []corev1.EnvFromSource{}, nil, nil, nil) - podTemplate := extensionsworkloads.NewPodTemplate([]corev1.Container{containerTemplate}, []corev1.Volume{podVol}, []corev1.LocalObjectReference{}, nil) - deployment := extensionsworkloads.NewDeploymentTemplate(containerName, defaultNamespace, podTemplate, true, nil) + containerTemplate := wloads.NewContainer(nginxName, nginxName, corev1.PullAlways, []corev1.VolumeMount{volMount}, []corev1.EnvFromSource{}, nil, nil, nil) + podTemplate := wloads.NewPodTemplate([]corev1.Container{containerTemplate}, []corev1.Volume{podVol}, []corev1.LocalObjectReference{}, nil, nil) + deployment := wloads.NewDeploymentTemplate(containerName, defaultNamespace, podTemplate, true, nil) deploymentResp, err := steveclient.SteveType(workloads.DeploymentSteveType).Create(deployment) if err != nil { @@ -464,9 +464,10 @@ func createNginxDeploymentWithPVC(steveclient *steveV1.Client, containerNamePref // createNginxDeployment is a helper function that creates a nginx deployment in a cluster's default namespace func createNginxDeployment(steveclient *steveV1.Client, containerNamePrefix string) (*steveV1.SteveAPIObject, error) { containerName := namegenerator.AppendRandomString(containerNamePrefix) - containerTemplate := extensionsworkloads.NewContainer(nginxName, nginxName, corev1.PullAlways, []corev1.VolumeMount{}, []corev1.EnvFromSource{}, nil, nil, nil) - podTemplate := extensionsworkloads.NewPodTemplate([]corev1.Container{containerTemplate}, []corev1.Volume{}, []corev1.LocalObjectReference{}, nil) - deployment := extensionsworkloads.NewDeploymentTemplate(containerName, defaultNamespace, podTemplate, true, nil) + + containerTemplate := wloads.NewContainer(nginxName, nginxName, corev1.PullAlways, []corev1.VolumeMount{}, []corev1.EnvFromSource{}, nil, nil, nil) + podTemplate := wloads.NewPodTemplate([]corev1.Container{containerTemplate}, []corev1.Volume{}, []corev1.LocalObjectReference{}, nil, nil) + deployment := wloads.NewDeploymentTemplate(containerName, defaultNamespace, podTemplate, true, nil) deploymentResp, err := steveclient.SteveType(workloads.DeploymentSteveType).Create(deployment) if err != nil { diff --git a/tests/v2/validation/provisioning/rke2/custom_cluster_test.go b/tests/v2/validation/provisioning/rke2/custom_cluster_test.go index 7cb8be47e3e..7f3a2fdb42d 100644 --- a/tests/v2/validation/provisioning/rke2/custom_cluster_test.go +++ b/tests/v2/validation/provisioning/rke2/custom_cluster_test.go @@ -119,8 +119,8 @@ func (c *CustomClusterProvisioningTestSuite) TestProvisioningRKE2CustomCluster() {"1 Node all roles " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesAll, false, c.client.Flags.GetValue(environmentflag.Short) || c.client.Flags.GetValue(environmentflag.Long)}, {"2 nodes - etcd|cp roles per 1 node " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesShared, false, c.client.Flags.GetValue(environmentflag.Short) || c.client.Flags.GetValue(environmentflag.Long)}, {"3 nodes - 1 role per node " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicated, false, c.client.Flags.GetValue(environmentflag.Long)}, - {"4 nodes - 1 role per node + 1 windows worker " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, - {"5 nodes - 1 role per node + 2 windows workers " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedTwoWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, + {"4 nodes - 1 role per node + 1 Windows worker " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, + {"5 nodes - 1 role per node + 2 Windows workers " + provisioninginput.StandardClientName.String(), c.standardUserClient, nodeRolesDedicatedTwoWindows, true, c.client.Flags.GetValue(environmentflag.Long)}, } for _, tt := range tests { if !tt.runFlag { diff --git a/tests/v2/validation/provisioning/rke2/provisioning_node_driver_test.go b/tests/v2/validation/provisioning/rke2/provisioning_node_driver_test.go index 38c9d10430f..789361ce108 100644 --- a/tests/v2/validation/provisioning/rke2/provisioning_node_driver_test.go +++ b/tests/v2/validation/provisioning/rke2/provisioning_node_driver_test.go @@ -3,6 +3,7 @@ package rke2 import ( + "slices" "testing" "github.com/rancher/rancher/tests/v2/actions/provisioninginput" @@ -72,16 +73,19 @@ func (r *RKE2NodeDriverProvisioningTestSuite) TestProvisioningRKE2Cluster() { nodeRolesAll := []provisioninginput.MachinePools{provisioninginput.AllRolesMachinePool} nodeRolesShared := []provisioninginput.MachinePools{provisioninginput.EtcdControlPlaneMachinePool, provisioninginput.WorkerMachinePool} nodeRolesDedicated := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool} + nodeRolesWindows := []provisioninginput.MachinePools{provisioninginput.EtcdMachinePool, provisioninginput.ControlPlaneMachinePool, provisioninginput.WorkerMachinePool, provisioninginput.WindowsMachinePool} tests := []struct { name string machinePools []provisioninginput.MachinePools client *rancher.Client + isWindows bool runFlag bool }{ - {"1 Node all roles " + provisioninginput.StandardClientName.String(), nodeRolesAll, r.standardUserClient, r.client.Flags.GetValue(environmentflag.Short) || r.client.Flags.GetValue(environmentflag.Long)}, - {"2 nodes - etcd|cp roles per 1 node " + provisioninginput.StandardClientName.String(), nodeRolesShared, r.standardUserClient, r.client.Flags.GetValue(environmentflag.Short) || r.client.Flags.GetValue(environmentflag.Long)}, - {"3 nodes - 1 role per node " + provisioninginput.StandardClientName.String(), nodeRolesDedicated, r.standardUserClient, r.client.Flags.GetValue(environmentflag.Long)}, + {"1 Node all roles " + provisioninginput.StandardClientName.String(), nodeRolesAll, r.standardUserClient, false, r.client.Flags.GetValue(environmentflag.Short) || r.client.Flags.GetValue(environmentflag.Long)}, + {"2 nodes - etcd|cp roles per 1 node " + provisioninginput.StandardClientName.String(), nodeRolesShared, r.standardUserClient, false, r.client.Flags.GetValue(environmentflag.Short) || r.client.Flags.GetValue(environmentflag.Long)}, + {"3 nodes - 1 role per node " + provisioninginput.StandardClientName.String(), nodeRolesDedicated, r.standardUserClient, false, r.client.Flags.GetValue(environmentflag.Long)}, + {"4 nodes - 1 role per node + 1 Windows worker " + provisioninginput.StandardClientName.String(), nodeRolesWindows, r.standardUserClient, true, r.client.Flags.GetValue(environmentflag.Long)}, } for _, tt := range tests { @@ -89,8 +93,14 @@ func (r *RKE2NodeDriverProvisioningTestSuite) TestProvisioningRKE2Cluster() { r.T().Logf("SKIPPED") continue } + provisioningConfig := *r.provisioningConfig provisioningConfig.MachinePools = tt.machinePools + + if !slices.Contains(provisioningConfig.Providers, "vsphere") && tt.isWindows { + r.T().Skip("Windows test requires access to vsphere") + } + permutations.RunTestPermutations(&r.Suite, tt.name, tt.client, &provisioningConfig, permutations.RKE2ProvisionCluster, nil, nil) } } diff --git a/tests/v2/validation/rbac/psa/psa.go b/tests/v2/validation/rbac/psa/psa.go index a84afea232c..0df694a8af7 100644 --- a/tests/v2/validation/rbac/psa/psa.go +++ b/tests/v2/validation/rbac/psa/psa.go @@ -12,6 +12,7 @@ import ( v1 "github.com/rancher/shepherd/clients/rancher/v1" "github.com/rancher/shepherd/extensions/clusters" extensionsworkloads "github.com/rancher/shepherd/extensions/workloads" + wloads "github.com/rancher/shepherd/extensions/workloads" namegen "github.com/rancher/shepherd/pkg/namegenerator" appv1 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" @@ -44,8 +45,8 @@ func createDeploymentAndWait(steveclient *v1.Client, containerName string, image deploymentName := namegen.AppendRandomString("rbac-") containerTemplate := extensionsworkloads.NewContainer(containerName, image, coreV1.PullAlways, []coreV1.VolumeMount{}, []coreV1.EnvFromSource{}, nil, nil, nil) - podTemplate := extensionsworkloads.NewPodTemplate([]coreV1.Container{containerTemplate}, []coreV1.Volume{}, []coreV1.LocalObjectReference{}, nil) - deployment := extensionsworkloads.NewDeploymentTemplate(deploymentName, namespaceName, podTemplate, isCattleLabeled, nil) + podTemplate := wloads.NewPodTemplate([]coreV1.Container{containerTemplate}, []coreV1.Volume{}, []coreV1.LocalObjectReference{}, nil, nil) + deployment := wloads.NewDeploymentTemplate(deploymentName, namespaceName, podTemplate, isCattleLabeled, nil) deploymentResp, err := steveclient.SteveType(workloads.DeploymentSteveType).Create(deployment) if err != nil { diff --git a/tests/v2/validation/snapshot/README.md b/tests/v2/validation/snapshot/README.md index b2d6d4f963d..c60cb71051a 100644 --- a/tests/v2/validation/snapshot/README.md +++ b/tests/v2/validation/snapshot/README.md @@ -42,5 +42,11 @@ These tests utilize Go build tags. Due to this, see the below example on how to `gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/snapshot --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRestoreUpgradeStrategyTestSuite/TestSnapshotRestoreUpgradeStrategy"` \ `gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/snapshot --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRestoreUpgradeStrategyTestSuite/TestSnapshotRestoreUpgradeStrategyDynamicInput"` +### Sanpshot restore - Windows clusters +Note: This test will only work with Windows nodes existing in the cluster. Run this test with a vSphere Windows node driver cluster or a custom cluster with a Windows node present. + +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/snapshot --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRestoreWindowsTestSuite/TestSnapshotRestoreWindows"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/snapshot --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotRestoreWindowsTestSuite/TestSnapshotRestoreWindowsDynamicInput"` + ### Sanpshot additional tests `gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/snapshot --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestSnapshotAdditionalTestsTestSuite$"` \ No newline at end of file diff --git a/tests/v2/validation/snapshot/snapshot.go b/tests/v2/validation/snapshot/snapshot.go index 9565d1fa8ce..14460311591 100644 --- a/tests/v2/validation/snapshot/snapshot.go +++ b/tests/v2/validation/snapshot/snapshot.go @@ -56,9 +56,11 @@ const ( RKE2 = "rke2" serviceAppendName = "service-" serviceType = "service" + windowsContainerImage = "mcr.microsoft.com/windows/servercore/iis" + windowsContainerName = "iis" ) -func snapshotRestore(t *testing.T, client *rancher.Client, clusterName string, etcdRestore *etcdsnapshot.Config) { +func snapshotRestore(t *testing.T, client *rancher.Client, clusterName string, etcdRestore *etcdsnapshot.Config, containerImage string) { initialIngressName := namegen.AppendRandomString(initialIngress) initialWorkloadName := namegen.AppendRandomString(initialWorkload) @@ -78,8 +80,11 @@ func snapshotRestore(t *testing.T, client *rancher.Client, clusterName string, e isRKE1 = true } - containerTemplate := workloads.NewContainer(containerName, containerImage, corev1.PullAlways, []corev1.VolumeMount{}, []corev1.EnvFromSource{}, nil, nil, nil) - podTemplate := workloads.NewPodTemplate([]corev1.Container{containerTemplate}, []corev1.Volume{}, []corev1.LocalObjectReference{}, nil) + var containerTemplate corev1.Container + + containerTemplate = workloads.NewContainer(containerName, containerImage, corev1.PullAlways, []corev1.VolumeMount{}, []corev1.EnvFromSource{}, nil, nil, nil) + + podTemplate := workloads.NewPodTemplate([]corev1.Container{containerTemplate}, []corev1.Volume{}, []corev1.LocalObjectReference{}, nil, map[string]string{}) deployment := workloads.NewDeploymentTemplate(initialWorkloadName, defaultNamespace, podTemplate, isCattleLabeled, nil) service := corev1.Service{ diff --git a/tests/v2/validation/snapshot/snapshot_additional_test.go b/tests/v2/validation/snapshot/snapshot_additional_test.go index 075b72d30ed..cc6cf8cf315 100644 --- a/tests/v2/validation/snapshot/snapshot_additional_test.go +++ b/tests/v2/validation/snapshot/snapshot_additional_test.go @@ -114,7 +114,7 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() { } s.Run(tt.name, func() { - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) }) } } @@ -173,7 +173,7 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotRecurringRestores() { } s.Run(tt.name, func() { - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) }) } } diff --git a/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go b/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go index 83231fbbd5c..d91a6112c2d 100644 --- a/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go +++ b/tests/v2/validation/snapshot/snapshot_restore_k8s_upgrade_test.go @@ -97,7 +97,7 @@ func (s *SnapshotRestoreK8sUpgradeTestSuite) TestSnapshotRestoreK8sUpgrade() { } s.Run(tt.name, func() { - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) }) } } @@ -107,7 +107,7 @@ func (s *SnapshotRestoreK8sUpgradeTestSuite) TestSnapshotRestoreK8sUpgradeDynami s.T().Skip() } - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig, containerImage) } // In order for 'go test' to run this suite, we need to create diff --git a/tests/v2/validation/snapshot/snapshot_restore_test.go b/tests/v2/validation/snapshot/snapshot_restore_test.go index 758905a63e5..b1caa9652b3 100644 --- a/tests/v2/validation/snapshot/snapshot_restore_test.go +++ b/tests/v2/validation/snapshot/snapshot_restore_test.go @@ -97,7 +97,7 @@ func (s *SnapshotRestoreTestSuite) TestSnapshotRestoreETCDOnly() { } s.Run(tt.name, func() { - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) }) } } @@ -107,7 +107,7 @@ func (s *SnapshotRestoreTestSuite) TestSnapshotRestoreETCDOnlyDynamicInput() { s.T().Skip() } - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig, containerImage) } // In order for 'go test' to run this suite, we need to create diff --git a/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go b/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go index f53a209328f..0acb66d1724 100644 --- a/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go +++ b/tests/v2/validation/snapshot/snapshot_restore_upgrade_strategy_test.go @@ -101,7 +101,7 @@ func (s *SnapshotRestoreUpgradeStrategyTestSuite) TestSnapshotRestoreUpgradeStra } s.Run(tt.name, func() { - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) }) } } @@ -111,7 +111,7 @@ func (s *SnapshotRestoreUpgradeStrategyTestSuite) TestSnapshotRestoreUpgradeStra s.T().Skip() } - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig) + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig, containerImage) } // In order for 'go test' to run this suite, we need to create diff --git a/tests/v2/validation/snapshot/snapshot_restore_wins_test.go b/tests/v2/validation/snapshot/snapshot_restore_wins_test.go new file mode 100644 index 00000000000..fd40b28e723 --- /dev/null +++ b/tests/v2/validation/snapshot/snapshot_restore_wins_test.go @@ -0,0 +1,80 @@ +//go:build validation + +package snapshot + +import ( + "testing" + + "github.com/rancher/rancher/tests/v2/actions/etcdsnapshot" + "github.com/rancher/shepherd/clients/rancher" + + "github.com/rancher/shepherd/pkg/config" + "github.com/rancher/shepherd/pkg/session" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type SnapshotRestoreWindowsTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + clustersConfig *etcdsnapshot.Config +} + +func (s *SnapshotRestoreWindowsTestSuite) TearDownSuite() { + s.session.Cleanup() +} + +func (s *SnapshotRestoreWindowsTestSuite) SetupSuite() { + testSession := session.NewSession() + s.session = testSession + + s.clustersConfig = new(etcdsnapshot.Config) + config.LoadConfig(etcdsnapshot.ConfigurationFileKey, s.clustersConfig) + + client, err := rancher.NewClient("", testSession) + require.NoError(s.T(), err) + + s.client = client +} + +func (s *SnapshotRestoreWindowsTestSuite) TestSnapshotRestoreWindows() { + snapshotRestoreAll := &etcdsnapshot.Config{ + UpgradeKubernetesVersion: "", + SnapshotRestore: "all", + ControlPlaneConcurrencyValue: "15%", + ControlPlaneUnavailableValue: "3", + WorkerConcurrencyValue: "20%", + WorkerUnavailableValue: "15%", + RecurringRestores: 1, + } + + tests := []struct { + name string + etcdSnapshot *etcdsnapshot.Config + client *rancher.Client + }{ + {"Restore Windows cluster config, Kubernetes version and etcd", snapshotRestoreAll, s.client}, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, windowsContainerImage) + }) + } +} + +func (s *SnapshotRestoreWindowsTestSuite) TestSnapshotRestoreWindowsDynamicInput() { + if s.clustersConfig == nil { + s.T().Skip() + } + + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, s.clustersConfig, windowsContainerImage) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestSnapshotRestoreWindowsTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotRestoreWindowsTestSuite)) +} diff --git a/tests/v2/validation/upgrade/README.md b/tests/v2/validation/upgrade/README.md index a4d1a3f1b52..152b04ca847 100644 --- a/tests/v2/validation/upgrade/README.md +++ b/tests/v2/validation/upgrade/README.md @@ -25,7 +25,8 @@ Note: To see the `provisioningInput` in further detail, please review over the [ See below how to run the test: ### Kubernetes Upgrade -`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/upgrade --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestKubernetesUpgradeTestSuite/TestUpgradeKubernetes"` +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/upgrade --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestKubernetesUpgradeTestSuite/TestUpgradeKubernetes"` \ +`gotestsum --format standard-verbose --packages=github.com/rancher/rancher/tests/v2/validation/upgrade --junitfile results.xml -- -timeout=60m -tags=validation -v -run "TestWindowsKubernetesUpgradeTestSuite/TestUpgradeWindowsKubernetes"` ## Cloud Provider Migration Migrates a cluster's cloud provider from in-tree to out-of-tree diff --git a/tests/v2/validation/upgrade/kubernetes.go b/tests/v2/validation/upgrade/kubernetes.go index 0b5c66aac45..1a745d9c7f9 100644 --- a/tests/v2/validation/upgrade/kubernetes.go +++ b/tests/v2/validation/upgrade/kubernetes.go @@ -36,7 +36,7 @@ const ( ) // upgradeLocalCluster is a function to upgrade a local cluster. -func upgradeLocalCluster(u *suite.Suite, testName string, client *rancher.Client, clusterName string, testConfig *clusters.ClusterConfig, cluster upgradeinput.Cluster) { +func upgradeLocalCluster(u *suite.Suite, testName string, client *rancher.Client, clusterName string, testConfig *clusters.ClusterConfig, cluster upgradeinput.Cluster, containerImage string) { clusterObject, err := extensionscluster.GetClusterIDByName(client, clusterName) require.NoError(u.T(), err) @@ -55,7 +55,7 @@ func upgradeLocalCluster(u *suite.Suite, testName string, client *rancher.Client } u.Run(testName, func() { - createPreUpgradeWorkloads(u.T(), client, clusterName, cluster.FeaturesToTest) + createPreUpgradeWorkloads(u.T(), client, clusterName, cluster.FeaturesToTest, nil, containerImage) clusterMeta, err := extensionscluster.NewClusterMeta(client, clusterName) require.NoError(u.T(), err) @@ -84,7 +84,7 @@ func upgradeLocalCluster(u *suite.Suite, testName string, client *rancher.Client } // upgradeDownstreamCluster is a function to upgrade a downstream cluster. -func upgradeDownstreamCluster(u *suite.Suite, testName string, client *rancher.Client, clusterName string, testConfig *clusters.ClusterConfig, cluster upgradeinput.Cluster) { +func upgradeDownstreamCluster(u *suite.Suite, testName string, client *rancher.Client, clusterName string, testConfig *clusters.ClusterConfig, cluster upgradeinput.Cluster, nodeSelector map[string]string, containerImage string) { var isRKE1 = false clusterObject, _, _ := extensionscluster.GetProvisioningClusterByName(client, clusterName, namespace) @@ -120,7 +120,7 @@ func upgradeDownstreamCluster(u *suite.Suite, testName string, client *rancher.C } u.Run(testName, func() { - createPreUpgradeWorkloads(u.T(), client, clusterName, cluster.FeaturesToTest) + createPreUpgradeWorkloads(u.T(), client, clusterName, cluster.FeaturesToTest, nodeSelector, containerImage) if isRKE1 { upgradedCluster, err := upgradeRKE1Cluster(u.T(), client, cluster, testConfig) diff --git a/tests/v2/validation/upgrade/kubernetes_test.go b/tests/v2/validation/upgrade/kubernetes_test.go index 06e682a21d4..2adda09f3a0 100644 --- a/tests/v2/validation/upgrade/kubernetes_test.go +++ b/tests/v2/validation/upgrade/kubernetes_test.go @@ -1,3 +1,5 @@ +//go:build validation + package upgrade import ( @@ -50,9 +52,9 @@ func (u *UpgradeKubernetesTestSuite) TestUpgradeKubernetes() { testConfig := clusters.ConvertConfigToClusterConfig(&cluster.ProvisioningInput) if cluster.Name == local { - upgradeLocalCluster(&u.Suite, tt.name, tt.client, cluster.Name, testConfig, cluster) + upgradeLocalCluster(&u.Suite, tt.name, tt.client, cluster.Name, testConfig, cluster, containerImage) } else { - upgradeDownstreamCluster(&u.Suite, tt.name, tt.client, cluster.Name, testConfig, cluster) + upgradeDownstreamCluster(&u.Suite, tt.name, tt.client, cluster.Name, testConfig, cluster, nil, containerImage) } } } diff --git a/tests/v2/validation/upgrade/kubernetes_wins_test.go b/tests/v2/validation/upgrade/kubernetes_wins_test.go new file mode 100644 index 00000000000..ff244a4f344 --- /dev/null +++ b/tests/v2/validation/upgrade/kubernetes_wins_test.go @@ -0,0 +1,78 @@ +//go:build validation + +package upgrade + +import ( + "testing" + + "github.com/rancher/norman/types" + "github.com/rancher/rancher/tests/v2/actions/clusters" + "github.com/rancher/rancher/tests/v2/actions/upgradeinput" + "github.com/rancher/shepherd/clients/rancher" + extClusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/pkg/session" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type UpgradeWindowsKubernetesTestSuite struct { + suite.Suite + session *session.Session + client *rancher.Client + clusters []upgradeinput.Cluster +} + +func (u *UpgradeWindowsKubernetesTestSuite) TearDownSuite() { + u.session.Cleanup() +} + +func (u *UpgradeWindowsKubernetesTestSuite) SetupSuite() { + testSession := session.NewSession() + u.session = testSession + + client, err := rancher.NewClient("", testSession) + require.NoError(u.T(), err) + + u.client = client + + clusters, err := upgradeinput.LoadUpgradeKubernetesConfig(client) + require.NoError(u.T(), err) + + u.clusters = clusters +} + +func (u *UpgradeWindowsKubernetesTestSuite) TestUpgradeWindowsKubernetes() { + tests := []struct { + name string + client *rancher.Client + nodeSelector map[string]string + }{ + {"Upgrading Windows ", u.client, map[string]string{"kubernetes.io/os": "windows"}}, + } + + for _, tt := range tests { + for _, cluster := range u.clusters { + updatedClusterID, err := extClusters.GetClusterIDByName(tt.client, cluster.Name) + require.NoError(u.T(), err) + + nodes, err := tt.client.Management.Node.ListAll(&types.ListOpts{ + Filters: map[string]interface{}{ + "clusterId": updatedClusterID, + }, + }) + + for _, node := range nodes.Data { + if tt.nodeSelector["kubernetes.io/os"] == "windows" { + node.Labels["kubernetes.io/os"] = "windows" + } + } + + testConfig := clusters.ConvertConfigToClusterConfig(&cluster.ProvisioningInput) + upgradeDownstreamCluster(&u.Suite, tt.name, tt.client, cluster.Name, testConfig, cluster, tt.nodeSelector, windowsContainerImage) + } + } +} + +func TestWindowsKubernetesUpgradeTestSuite(t *testing.T) { + suite.Run(t, new(UpgradeWindowsKubernetesTestSuite)) +} diff --git a/tests/v2/validation/upgrade/workload.go b/tests/v2/validation/upgrade/workload.go index 31aed1e035b..a219bc23f2c 100644 --- a/tests/v2/validation/upgrade/workload.go +++ b/tests/v2/validation/upgrade/workload.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/rancher/norman/types" "github.com/rancher/rancher/pkg/api/scheme" "github.com/rancher/rancher/tests/v2/actions/charts" kubeingress "github.com/rancher/rancher/tests/v2/actions/kubeapi/ingresses" @@ -25,6 +24,7 @@ import ( "github.com/rancher/shepherd/extensions/clusters" "github.com/rancher/shepherd/extensions/ingresses" extensionsworkloads "github.com/rancher/shepherd/extensions/workloads" + wloads "github.com/rancher/shepherd/extensions/workloads" "github.com/rancher/shepherd/pkg/namegenerator" "github.com/rancher/shepherd/pkg/wait" "github.com/sirupsen/logrus" @@ -68,10 +68,11 @@ const ( servicePortName = "port" servicePortNumber = 80 volumeMountPath = "/root/usr/" + windowsContainerImage = "mcr.microsoft.com/windows/servercore/iis" ) // createPreUpgradeWorkloads creates workloads in the downstream cluster before the upgrade. -func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName string, featuresToTest upgradeinput.Features) { +func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName string, featuresToTest upgradeinput.Features, nodeSelector map[string]string, containerImage string) { isCattleLabeled := true names := newNames() @@ -86,10 +87,11 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName require.NoError(t, err) assert.Equal(t, namespace.Name, names.random[namespaceName]) - testContainerPodTemplate := newPodTemplateWithTestContainer() + testContainerPodTemplate := newPodTemplateWithTestContainer(containerImage, nodeSelector) logrus.Infof("Creating deployment: %v", names.random[deploymentName]) - deploymentTemplate := extensionsworkloads.NewDeploymentTemplate(names.random[deploymentName], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) + + deploymentTemplate := wloads.NewDeploymentTemplate(names.random[deploymentName], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) createdDeployment, err := steveClient.SteveType(workloads.DeploymentSteveType).Create(deploymentTemplate) require.NoError(t, err) assert.Equal(t, createdDeployment.Name, names.random[deploymentName]) @@ -99,7 +101,8 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName require.NoError(t, err) logrus.Infof("Creating daemonset: %v", names.random[daemonsetName]) - daemonsetTemplate := extensionsworkloads.NewDaemonSetTemplate(names.random[daemonsetName], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) + + daemonsetTemplate := wloads.NewDaemonSetTemplate(names.random[daemonsetName], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) createdDaemonSet, err := steveClient.SteveType(workloads.DaemonsetSteveType).Create(daemonsetTemplate) require.NoError(t, err) assert.Equal(t, createdDaemonSet.Name, names.random[daemonsetName]) @@ -108,9 +111,6 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName err = extensionscharts.WatchAndWaitDaemonSets(client, project.ClusterID, namespace.Name, metav1.ListOptions{}) require.NoError(t, err) - logrus.Infof("Validating daemonset %v available replicas number are equal to the worker nodes...", names.random[daemonsetName]) - validateDaemonset(t, client, project.ClusterID, namespace.Name, names.random[daemonsetName]) - secretTemplate := secrets.NewSecretTemplate(names.random[secretName], namespace.Name, map[string][]byte{"test": []byte("test")}, corev1.SecretTypeOpaque) logrus.Infof("Creating secret: %v", names.random[secretName]) @@ -118,16 +118,18 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName require.NoError(t, err) assert.Equal(t, createdSecret.Name, names.random[secretName]) - podTemplateWithSecretVolume := newPodTemplateWithSecretVolume(names.random[secretName]) + podTemplateWithSecretVolume := newPodTemplateWithSecretVolume(names.random[secretName], containerImage, nodeSelector) logrus.Infof("Creating deployment %v with the test container and secret as volume...", names.random[deploymentNameForVolumeSecret]) - deploymentWithSecretTemplate := extensionsworkloads.NewDeploymentTemplate(names.random[deploymentNameForVolumeSecret], namespace.Name, podTemplateWithSecretVolume, isCattleLabeled, nil) + + deploymentWithSecretTemplate := wloads.NewDeploymentTemplate(names.random[deploymentNameForVolumeSecret], namespace.Name, podTemplateWithSecretVolume, isCattleLabeled, nil) createdDeploymentWithSecretVolume, err := steveClient.SteveType(workloads.DeploymentSteveType).Create(deploymentWithSecretTemplate) require.NoError(t, err) assert.Equal(t, createdDeploymentWithSecretVolume.Name, names.random[deploymentNameForVolumeSecret]) logrus.Infof("Creating daemonset %v with the test container and secret as volume...", names.random[daemonsetNameForVolumeSecret]) - daemonsetWithSecretTemplate := extensionsworkloads.NewDaemonSetTemplate(names.random[daemonsetNameForVolumeSecret], namespace.Name, podTemplateWithSecretVolume, isCattleLabeled, nil) + + daemonsetWithSecretTemplate := wloads.NewDaemonSetTemplate(names.random[daemonsetNameForVolumeSecret], namespace.Name, podTemplateWithSecretVolume, isCattleLabeled, nil) createdDaemonSetWithSecretVolume, err := steveClient.SteveType(workloads.DaemonsetSteveType).Create(daemonsetWithSecretTemplate) require.NoError(t, err) assert.Equal(t, createdDaemonSetWithSecretVolume.Name, names.random[daemonsetNameForVolumeSecret]) @@ -136,19 +138,18 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName err = extensionscharts.WatchAndWaitDaemonSets(client, project.ClusterID, namespace.Name, metav1.ListOptions{}) require.NoError(t, err) - logrus.Infof("Validating daemonset %v available replicas number are equal to the worker nodes...", names.random[daemonsetNameForVolumeSecret]) - validateDaemonset(t, client, project.ClusterID, namespace.Name, names.random[daemonsetNameForVolumeSecret]) - - podTemplateWithSecretEnvironmentVariable := newPodTemplateWithSecretEnvironmentVariable(names.random[secretName]) + podTemplateWithSecretEnvironmentVariable := newPodTemplateWithSecretEnvironmentVariable(names.random[secretName], containerImage, nodeSelector) logrus.Infof("Creating deployment %v with the test container and secret as environment variable...", names.random[deploymentNameForEnvironmentVariableSecret]) - deploymentEnvironmentWithSecretTemplate := extensionsworkloads.NewDeploymentTemplate(names.random[deploymentNameForEnvironmentVariableSecret], namespace.Name, podTemplateWithSecretEnvironmentVariable, isCattleLabeled, nil) + + deploymentEnvironmentWithSecretTemplate := wloads.NewDeploymentTemplate(names.random[deploymentNameForEnvironmentVariableSecret], namespace.Name, podTemplateWithSecretEnvironmentVariable, isCattleLabeled, nil) createdDeploymentEnvironmentVariableSecret, err := steveClient.SteveType(workloads.DeploymentSteveType).Create(deploymentEnvironmentWithSecretTemplate) require.NoError(t, err) assert.Equal(t, createdDeploymentEnvironmentVariableSecret.Name, names.random[deploymentNameForEnvironmentVariableSecret]) logrus.Infof("Creating daemonset %v with the test container and secret as environment variable...", names.random[daemonsetNameForEnvironmentVariableSecret]) - daemonSetEnvironmentWithSecretTemplate := extensionsworkloads.NewDaemonSetTemplate(names.random[daemonsetNameForEnvironmentVariableSecret], namespace.Name, podTemplateWithSecretEnvironmentVariable, isCattleLabeled, nil) + + daemonSetEnvironmentWithSecretTemplate := wloads.NewDaemonSetTemplate(names.random[daemonsetNameForEnvironmentVariableSecret], namespace.Name, podTemplateWithSecretEnvironmentVariable, isCattleLabeled, nil) createdDaemonSetEnvironmentVariableSecret, err := steveClient.SteveType(workloads.DaemonsetSteveType).Create(daemonSetEnvironmentWithSecretTemplate) require.NoError(t, err) assert.Equal(t, createdDaemonSetEnvironmentVariableSecret.Name, names.random[daemonsetNameForEnvironmentVariableSecret]) @@ -157,12 +158,10 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName err = extensionscharts.WatchAndWaitDaemonSets(client, project.ClusterID, namespace.Name, metav1.ListOptions{}) require.NoError(t, err) - logrus.Infof("Validating daemonset %v available replicas number is equal to worker nodes...", names.random[daemonsetNameForEnvironmentVariableSecret]) - validateDaemonset(t, client, project.ClusterID, namespace.Name, names.random[daemonsetNameForEnvironmentVariableSecret]) - if *featuresToTest.Ingress { logrus.Infof("Creating deployment %v with the test container for ingress...", names.random[deploymentNameForIngress]) - deploymentForIngressTemplate := extensionsworkloads.NewDeploymentTemplate(names.random[deploymentNameForIngress], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) + + deploymentForIngressTemplate := wloads.NewDeploymentTemplate(names.random[deploymentNameForIngress], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) createdDeploymentForIngress, err := steveClient.SteveType(workloads.DeploymentSteveType).Create(deploymentForIngressTemplate) require.NoError(t, err) assert.Equal(t, createdDeploymentForIngress.Name, names.random[deploymentNameForIngress]) @@ -202,7 +201,8 @@ func createPreUpgradeWorkloads(t *testing.T, client *rancher.Client, clusterName assert.True(t, isIngressForDeploymentAccessible) logrus.Infof("Creating daemonset %v with the test container for ingress...", names.random[daemonsetNameForIngress]) - daemonSetForIngressTemplate := extensionsworkloads.NewDaemonSetTemplate(names.random[daemonsetNameForIngress], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) + + daemonSetForIngressTemplate := wloads.NewDaemonSetTemplate(names.random[daemonsetNameForIngress], namespace.Name, testContainerPodTemplate, isCattleLabeled, nil) createdDaemonSetForIngress, err := steveClient.SteveType(workloads.DaemonsetSteveType).Create(daemonSetForIngressTemplate) require.NoError(t, err) assert.Equal(t, createdDaemonSetForIngress.Name, names.random[daemonsetNameForIngress]) @@ -442,21 +442,22 @@ func newServiceTemplate(serviceName, namespaceName string, selector map[string]s } // newTestContainerMinimal is a private constructor that returns container for minimal workload creations -func newTestContainerMinimal() corev1.Container { +func newTestContainerMinimal(containerImage string) corev1.Container { pullPolicy := corev1.PullAlways - return extensionsworkloads.NewContainer(containerName, containerImage, pullPolicy, nil, nil, nil, nil, nil) + + return wloads.NewContainer(containerName, containerImage, pullPolicy, nil, nil, nil, nil, nil) } // newPodTemplateWithTestContainer is a private constructor that returns pod template spec for workload creations -func newPodTemplateWithTestContainer() corev1.PodTemplateSpec { - testContainer := newTestContainerMinimal() +func newPodTemplateWithTestContainer(containerImage string, nodeSelector map[string]string) corev1.PodTemplateSpec { + testContainer := newTestContainerMinimal(containerImage) containers := []corev1.Container{testContainer} - return extensionsworkloads.NewPodTemplate(containers, nil, nil, nil) + return extensionsworkloads.NewPodTemplate(containers, nil, nil, nil, nodeSelector) } // newPodTemplateWithSecretVolume is a private constructor that returns pod template spec with volume option for workload creations -func newPodTemplateWithSecretVolume(secretName string) corev1.PodTemplateSpec { - testContainer := newTestContainerMinimal() +func newPodTemplateWithSecretVolume(secretName, containerImage string, nodeSelector map[string]string) corev1.PodTemplateSpec { + testContainer := newTestContainerMinimal(containerImage) testContainer.VolumeMounts = []corev1.VolumeMount{{Name: secretAsVolumeName, MountPath: volumeMountPath}} containers := []corev1.Container{testContainer} volumes := []corev1.Volume{ @@ -470,11 +471,11 @@ func newPodTemplateWithSecretVolume(secretName string) corev1.PodTemplateSpec { }, } - return extensionsworkloads.NewPodTemplate(containers, volumes, nil, nil) + return extensionsworkloads.NewPodTemplate(containers, volumes, nil, nil, nodeSelector) } // newPodTemplateWithSecretEnvironmentVariable is a private constructor that returns pod template spec with envFrom option for workload creations -func newPodTemplateWithSecretEnvironmentVariable(secretName string) corev1.PodTemplateSpec { +func newPodTemplateWithSecretEnvironmentVariable(secretName, containerImage string, nodeSelector map[string]string) corev1.PodTemplateSpec { pullPolicy := corev1.PullAlways envFrom := []corev1.EnvFromSource{ { @@ -483,10 +484,14 @@ func newPodTemplateWithSecretEnvironmentVariable(secretName string) corev1.PodTe }, }, } - container := extensionsworkloads.NewContainer(containerName, containerImage, pullPolicy, nil, envFrom, nil, nil, nil) + + var container corev1.Container + + container = wloads.NewContainer(containerName, containerImage, pullPolicy, nil, envFrom, nil, nil, nil) + containers := []corev1.Container{container} - return extensionsworkloads.NewPodTemplate(containers, nil, nil, nil) + return extensionsworkloads.NewPodTemplate(containers, nil, nil, nil, nodeSelector) } // waitUntilIngressIsAccessible waits until the ingress is accessible @@ -569,37 +574,6 @@ func checkPrefix(name string, prefix string) bool { return strings.HasPrefix(name, prefix) } -// validateDaemonset checks that the available number of daemonsets equals the number of workers in a downstream cluster or the number of nodes in the local cluster -func validateDaemonset(t *testing.T, client *rancher.Client, clusterID, namespaceName, daemonsetName string) { - t.Helper() - - listFilter := &types.ListOpts{ - Filters: map[string]interface{}{ - "clusterId": clusterID, - }, - } - - if clusterID != local { - listFilter.Filters["worker"] = true - } - - nodesCollection, err := client.Management.Node.List(listFilter) - require.NoError(t, err) - - steveClient, err := client.Steve.ProxyDownstream(clusterID) - require.NoError(t, err) - - daemonSetID := getSteveID(namespaceName, daemonsetName) - daemonsetResp, err := steveClient.SteveType(workloads.DaemonsetSteveType).ByID(daemonSetID) - require.NoError(t, err) - - daemonsetStatus := &appv1.DaemonSetStatus{} - err = v1.ConvertToK8sType(daemonsetResp.Status, daemonsetStatus) - require.NoError(t, err) - - assert.Equalf(t, int(daemonsetStatus.NumberAvailable), len(nodesCollection.Data), "Daemonset %v doesn't have the required ready", daemonsetName) -} - // newNames returns a new resourceNames struct // it creates a random names with random suffix for each resource by using core and coreWithSuffix names func newNames() *resourceNames { diff --git a/tests/v2/validation/workloads/workload_test.go b/tests/v2/validation/workloads/workload_test.go index 3bc60b7e08b..595776bccfd 100644 --- a/tests/v2/validation/workloads/workload_test.go +++ b/tests/v2/validation/workloads/workload_test.go @@ -4,7 +4,7 @@ import ( "testing" projectsapi "github.com/rancher/rancher/tests/v2/actions/projects" - "github.com/rancher/rancher/tests/v2/actions/workloads/deamonset" + deamonset "github.com/rancher/rancher/tests/v2/actions/workloads/daemonset" deployment "github.com/rancher/rancher/tests/v2/actions/workloads/deployment" "github.com/rancher/rancher/tests/v2/actions/workloads/pods" "github.com/rancher/shepherd/clients/rancher" @@ -102,7 +102,7 @@ func (w *WorkloadTestSuite) TestWorkloadDaemonSet() { _, namespace, err := projectsapi.CreateProjectAndNamespace(w.client, w.cluster.ID) require.NoError(w.T(), err) - _, err = deamonset.CreateDeamonset(w.client, w.cluster.ID, namespace.Name, 1, "", "", false, false) + _, err = deamonset.CreateDaemonset(w.client, w.cluster.ID, namespace.Name, 1, "", "", false, false) require.NoError(w.T(), err) }