Skip to content

Commit

Permalink
Merge pull request rancher#47095 from markusewalker/markusv27/task-1252
Browse files Browse the repository at this point in the history
[v2.7] Enhance replace worker nodes to replace all cluster roles
  • Loading branch information
markusewalker authored Sep 12, 2024
2 parents 94a2206 + 0a9e50f commit aae81bf
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 27 deletions.
22 changes: 14 additions & 8 deletions tests/v2/actions/etcdsnapshot/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,19 @@ const (
ConfigurationFileKey = "snapshotInput"
)

type ReplaceRoles struct {
Etcd bool `json:"etcd" yaml:"etcd"`
ControlPlane bool `json:"controlPlane" yaml:"controlPlane"`
Worker bool `json:"worker" yaml:"worker"`
}

type Config struct {
UpgradeKubernetesVersion string `json:"upgradeKubernetesVersion" yaml:"upgradeKubernetesVersion"`
SnapshotRestore string `json:"snapshotRestore" yaml:"snapshotRestore"`
ControlPlaneConcurrencyValue string `json:"controlPlaneConcurrencyValue" yaml:"controlPlaneConcurrencyValue"`
ControlPlaneUnavailableValue string `json:"controlPlaneUnavailableValue" yaml:"controlPlaneUnavailableValue"`
WorkerConcurrencyValue string `json:"workerConcurrencyValue" yaml:"workerConcurrencyValue"`
WorkerUnavailableValue string `json:"workerUnavailableValue" yaml:"workerUnavailableValue"`
RecurringRestores int `json:"recurringRestores" yaml:"recurringRestores"`
ReplaceWorkerNode bool `json:"replaceWorkerNode" yaml:"replaceWorkerNode"`
UpgradeKubernetesVersion string `json:"upgradeKubernetesVersion" yaml:"upgradeKubernetesVersion"`
SnapshotRestore string `json:"snapshotRestore" yaml:"snapshotRestore"`
ControlPlaneConcurrencyValue string `json:"controlPlaneConcurrencyValue" yaml:"controlPlaneConcurrencyValue"`
ControlPlaneUnavailableValue string `json:"controlPlaneUnavailableValue" yaml:"controlPlaneUnavailableValue"`
WorkerConcurrencyValue string `json:"workerConcurrencyValue" yaml:"workerConcurrencyValue"`
WorkerUnavailableValue string `json:"workerUnavailableValue" yaml:"workerUnavailableValue"`
RecurringRestores int `json:"recurringRestores" yaml:"recurringRestores"`
ReplaceRoles *ReplaceRoles `json:"replaceRoles" yaml:"replaceRoles"`
}
4 changes: 4 additions & 0 deletions tests/v2/validation/snapshot/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ snapshotInput:
controlPlaneUnavailableValue: "1"
workerUnavailableValue: "10%"
recurringRestores: 1 # By default, this is set to 1 if this field is not included in the config.
replaceRoles: # If selected, S3 must be properly configured on the cluster. This test is specific to S3 etcd snapshots.
etcd: false
controlplane: false
worker: false
```
Additionally, S3 is a supported restore option. If you choose to use S3, then you must have it already enabled on the downstream cluster.
Expand Down
8 changes: 4 additions & 4 deletions tests/v2/validation/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,8 +171,8 @@ func snapshotRKE1(t *testing.T, client *rancher.Client, podTemplate corev1.PodTe
cluster, err := client.Management.Cluster.ByID(clusterID)
require.NoError(t, err)

if etcdRestore.ReplaceWorkerNode {
scaling.ReplaceRKE1Nodes(t, client, clusterName, false, false, true)
if etcdRestore.ReplaceRoles != nil && cluster.RancherKubernetesEngineConfig.Services.Etcd.BackupConfig.S3BackupConfig != nil {
scaling.ReplaceRKE1Nodes(t, client, clusterName, etcdRestore.ReplaceRoles.Etcd, etcdRestore.ReplaceRoles.ControlPlane, etcdRestore.ReplaceRoles.Worker)
}

podErrors := pods.StatusPods(client, clusterID)
Expand Down Expand Up @@ -276,8 +276,8 @@ func snapshotV2Prov(t *testing.T, client *rancher.Client, podTemplate corev1.Pod
cluster, _, err := clusters.GetProvisioningClusterByName(client, clusterName, namespace)
require.NoError(t, err)

if etcdRestore.ReplaceWorkerNode {
scaling.ReplaceNodes(t, client, clusterName, false, false, true)
if etcdRestore.ReplaceRoles != nil && cluster.Spec.RKEConfig.ETCD.S3 != nil {
scaling.ReplaceNodes(t, client, clusterName, etcdRestore.ReplaceRoles.Etcd, etcdRestore.ReplaceRoles.ControlPlane, etcdRestore.ReplaceRoles.Worker)
}

podErrors := pods.StatusPods(client, clusterID)
Expand Down
40 changes: 25 additions & 15 deletions tests/v2/validation/snapshot/snapshot_additional_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,36 +43,42 @@ func (s *SnapshotAdditionalTestsTestSuite) SetupSuite() {
s.client = client
}

func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() {
snapshotRestoreAll := &etcdsnapshot.Config{
func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceNodes() {
controlPlaneSnapshotRestore := &etcdsnapshot.Config{
UpgradeKubernetesVersion: "",
SnapshotRestore: "all",
SnapshotRestore: "none",
RecurringRestores: 1,
ReplaceWorkerNode: true,
ReplaceRoles: &etcdsnapshot.ReplaceRoles{
ControlPlane: true,
},
}

snapshotRestoreK8sVersion := &etcdsnapshot.Config{
etcdSnapshotRestore := &etcdsnapshot.Config{
UpgradeKubernetesVersion: "",
SnapshotRestore: "kubernetesVersion",
SnapshotRestore: "none",
RecurringRestores: 1,
ReplaceWorkerNode: true,
ReplaceRoles: &etcdsnapshot.ReplaceRoles{
Etcd: true,
},
}

snapshotRestoreNone := &etcdsnapshot.Config{
workerSnapshotRestore := &etcdsnapshot.Config{
UpgradeKubernetesVersion: "",
SnapshotRestore: "none",
RecurringRestores: 1,
ReplaceWorkerNode: true,
ReplaceRoles: &etcdsnapshot.ReplaceRoles{
Worker: true,
},
}

tests := []struct {
name string
etcdSnapshot *etcdsnapshot.Config
client *rancher.Client
}{
{"Replace worker nodes and restore cluster config, Kubernetes version and etcd", snapshotRestoreAll, s.client},
{"Replace worker nodes and restore Kubernetes version and etcd", snapshotRestoreK8sVersion, s.client},
{"Replace worker nodes and restore etcd only", snapshotRestoreNone, s.client},
{"Replace control plane nodes", controlPlaneSnapshotRestore, s.client},
{"Replace etcd nodes", etcdSnapshotRestore, s.client},
{"Replace worker nodes", workerSnapshotRestore, s.client},
}

for _, tt := range tests {
Expand Down Expand Up @@ -113,9 +119,13 @@ func (s *SnapshotAdditionalTestsTestSuite) TestSnapshotReplaceWorkerNode() {
}
}

s.Run(tt.name, func() {
snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage)
})
if strings.Contains(tt.name, "S3") {
s.Run(tt.name, func() {
snapshotRestore(s.T(), s.client, s.client.RancherConfig.ClusterName, tt.etcdSnapshot, containerImage)
})
} else {
s.T().Skip("Skipping test; only S3 enabled clusters are enabled for this test")
}
}
}

Expand Down

0 comments on commit aae81bf

Please sign in to comment.