From 0a9e50fc311cdf1654f765398a207c7b036dce69 Mon Sep 17 00:00:00 2001 From: Markus Walker Date: Wed, 11 Sep 2024 15:17:54 -0700 Subject: [PATCH] Enhance replace worker nodes to replace all cluster roles --- tests/v2/actions/etcdsnapshot/config.go | 22 ++++++---- tests/v2/validation/snapshot/README.md | 4 ++ tests/v2/validation/snapshot/snapshot.go | 8 ++-- .../snapshot/snapshot_additional_test.go | 40 ++++++++++++------- 4 files changed, 47 insertions(+), 27 deletions(-) diff --git a/tests/v2/actions/etcdsnapshot/config.go b/tests/v2/actions/etcdsnapshot/config.go index 5210d616841..d017c9a391e 100644 --- a/tests/v2/actions/etcdsnapshot/config.go +++ b/tests/v2/actions/etcdsnapshot/config.go @@ -4,13 +4,19 @@ const ( ConfigurationFileKey = "snapshotInput" ) +type ReplaceRoles struct { + Etcd bool `json:"etcd" yaml:"etcd"` + ControlPlane bool `json:"controlPlane" yaml:"controlPlane"` + Worker bool `json:"worker" yaml:"worker"` +} + type Config struct { - UpgradeKubernetesVersion string `json:"upgradeKubernetesVersion" yaml:"upgradeKubernetesVersion"` - SnapshotRestore string `json:"snapshotRestore" yaml:"snapshotRestore"` - ControlPlaneConcurrencyValue string `json:"controlPlaneConcurrencyValue" yaml:"controlPlaneConcurrencyValue"` - ControlPlaneUnavailableValue string `json:"controlPlaneUnavailableValue" yaml:"controlPlaneUnavailableValue"` - WorkerConcurrencyValue string `json:"workerConcurrencyValue" yaml:"workerConcurrencyValue"` - WorkerUnavailableValue string `json:"workerUnavailableValue" yaml:"workerUnavailableValue"` - RecurringRestores int `json:"recurringRestores" yaml:"recurringRestores"` - ReplaceWorkerNode bool `json:"replaceWorkerNode" yaml:"replaceWorkerNode"` + UpgradeKubernetesVersion string `json:"upgradeKubernetesVersion" yaml:"upgradeKubernetesVersion"` + SnapshotRestore string `json:"snapshotRestore" yaml:"snapshotRestore"` + ControlPlaneConcurrencyValue string `json:"controlPlaneConcurrencyValue" yaml:"controlPlaneConcurrencyValue"` + ControlPlaneUnavailableValue string `json:"controlPlaneUnavailableValue" yaml:"controlPlaneUnavailableValue"` + WorkerConcurrencyValue string `json:"workerConcurrencyValue" yaml:"workerConcurrencyValue"` + WorkerUnavailableValue string `json:"workerUnavailableValue" yaml:"workerUnavailableValue"` + RecurringRestores int `json:"recurringRestores" yaml:"recurringRestores"` + ReplaceRoles *ReplaceRoles `json:"replaceRoles" yaml:"replaceRoles"` } diff --git a/tests/v2/validation/snapshot/README.md b/tests/v2/validation/snapshot/README.md index c60cb71051a..7d5572b7d59 100644 --- a/tests/v2/validation/snapshot/README.md +++ b/tests/v2/validation/snapshot/README.md @@ -24,6 +24,10 @@ snapshotInput: controlPlaneUnavailableValue: "1" workerUnavailableValue: "10%" recurringRestores: 1 # By default, this is set to 1 if this field is not included in the config. + replaceRoles: # If selected, S3 must be properly configured on the cluster. This test is specific to S3 etcd snapshots. + etcd: false + controlplane: false + worker: false ``` Additionally, S3 is a supported restore option. If you choose to use S3, then you must have it already enabled on the downstream cluster. diff --git a/tests/v2/validation/snapshot/snapshot.go b/tests/v2/validation/snapshot/snapshot.go index 14460311591..d435616a94d 100644 --- a/tests/v2/validation/snapshot/snapshot.go +++ b/tests/v2/validation/snapshot/snapshot.go @@ -171,8 +171,8 @@ func snapshotRKE1(t *testing.T, client *rancher.Client, podTemplate corev1.PodTe cluster, err := client.Management.Cluster.ByID(clusterID) require.NoError(t, err) - if etcdRestore.ReplaceWorkerNode { - scaling.ReplaceRKE1Nodes(t, client, clusterName, false, false, true) + if etcdRestore.ReplaceRoles != nil && cluster.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig != nil { + scaling.ReplaceRKE1Nodes(t, client, clusterName, etcdRestore.ReplaceRoles.Etcd, etcdRestore.ReplaceRoles.ControlPlane, etcdRestore.ReplaceRoles.Worker) } podErrors := pods.StatusPods(client, clusterID) @@ -276,8 +276,8 @@ func snapshotV2Prov(t *testing.T, client *rancher.Client, podTemplate corev1.Pod cluster, _, err := clusters.GetProvisioningClusterByName(client, clusterName, namespace) require.NoError(t, err) - if etcdRestore.ReplaceWorkerNode { - scaling.ReplaceNodes(t, client, clusterName, false, false, true) + if etcdRestore.ReplaceRoles != nil && cluster.Spec.RKEConfig.ETCD.S3 != nil { + scaling.ReplaceNodes(t, client, clusterName, etcdRestore.ReplaceRoles.Etcd, etcdRestore.ReplaceRoles.ControlPlane, etcdRestore.ReplaceRoles.Worker) } podErrors := pods.StatusPods(client, clusterID) diff --git a/tests/v2/validation/snapshot/snapshot_additional_test.go b/tests/v2/validation/snapshot/snapshot_additional_test.go index cc6cf8cf315..54657be595c 100644 --- a/tests/v2/validation/snapshot/snapshot_additional_test.go +++ b/tests/v2/validation/snapshot/snapshot_additional_test.go @@ -43,26 +43,32 @@ func (s *SnapshotAdditionalTestsTestSuite) SetupSuite() { s.client = client } -func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() { - snapshotRestoreAll := &etcdsnapshot.Config{ +func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceNodes() { + controlPlaneSnapshotRestore := &etcdsnapshot.Config{ UpgradeKubernetesVersion: "", - SnapshotRestore: "all", + SnapshotRestore: "none", RecurringRestores: 1, - ReplaceWorkerNode: true, + ReplaceRoles: &etcdsnapshot.ReplaceRoles{ + ControlPlane: true, + }, } - snapshotRestoreK8sVersion := &etcdsnapshot.Config{ + etcdSnapshotRestore := &etcdsnapshot.Config{ UpgradeKubernetesVersion: "", - SnapshotRestore: "kubernetesVersion", + SnapshotRestore: "none", RecurringRestores: 1, - ReplaceWorkerNode: true, + ReplaceRoles: &etcdsnapshot.ReplaceRoles{ + Etcd: true, + }, } - snapshotRestoreNone := &etcdsnapshot.Config{ + workerSnapshotRestore := &etcdsnapshot.Config{ UpgradeKubernetesVersion: "", SnapshotRestore: "none", RecurringRestores: 1, - ReplaceWorkerNode: true, + ReplaceRoles: &etcdsnapshot.ReplaceRoles{ + Worker: true, + }, } tests := []struct { @@ -70,9 +76,9 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() { etcdSnapshot *etcdsnapshot.Config client *rancher.Client }{ - {"Replace worker nodes and restore cluster config, Kubernetes version and etcd", snapshotRestoreAll, s.client}, - {"Replace worker nodes and restore Kubernetes version and etcd", snapshotRestoreK8sVersion, s.client}, - {"Replace worker nodes and restore etcd only", snapshotRestoreNone, s.client}, + {"Replace control plane nodes", controlPlaneSnapshotRestore, s.client}, + {"Replace etcd nodes", etcdSnapshotRestore, s.client}, + {"Replace worker nodes", workerSnapshotRestore, s.client}, } for _, tt := range tests { @@ -113,9 +119,13 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() { } } - s.Run(tt.name, func() { - snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) - }) + if strings.Contains(tt.name, "S3") { + s.Run(tt.name, func() { + snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage) + }) + } else { + s.T().Skip("Skipping test; only S3 enabled clusters are enabled for this test") + } } }