Skip to content

Commit

Permalink
Merge pull request #10675 from fabriziopandini/release-1.7-use-kind-f…
Browse files Browse the repository at this point in the history
…or-clusterctl-tests

[release-1.7] 🌱 Use kind as a secondary management cluster for clusterctl E2E tests
  • Loading branch information
k8s-ci-robot authored May 24, 2024
2 parents 68bf7ab + 5b0b83b commit 3925333
Show file tree
Hide file tree
Showing 4 changed files with 151 additions and 84 deletions.
157 changes: 103 additions & 54 deletions test/e2e/clusterctl_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,12 @@ type ClusterctlUpgradeSpecInput struct {
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string

// UseKindForManagementCluster instruct the test to use kind for creating the management cluster (instead to use the actual infrastructure provider).
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
// So we are creating a new management cluster where to install older version of providers
UseKindForManagementCluster bool

// InitWithBinary can be used to override the INIT_WITH_BINARY e2e config variable with the URL of the clusterctl binary of the old version of Cluster API. The spec will interpolate the
// strings `{OS}` and `{ARCH}` to `runtime.GOOS` and `runtime.GOARCH` respectively, e.g. https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.23/clusterctl-{OS}-{ARCH}
InitWithBinary string
Expand Down Expand Up @@ -195,16 +201,22 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
managementClusterNamespace *corev1.Namespace
managementClusterCancelWatches context.CancelFunc
managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
managementClusterProvider bootstrap.ClusterProvider
managementClusterProxy framework.ClusterProxy

initClusterctlBinaryURL string
initContract string
initKubernetesVersion string

workloadClusterName string

scheme *apiruntime.Scheme
)

BeforeEach(func() {
scheme = apiruntime.NewScheme()
framework.TryAddDefaultSchemes(scheme)

Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expand Down Expand Up @@ -251,73 +263,104 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion))
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
managementClusterNamespace, managementClusterCancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
// If the test is not being run in a separated kind cluster, setup a Namespace in the current bootstrap cluster where to host objects for this spec and create a watcher for the namespace events.
if !input.UseKindForManagementCluster {
managementClusterNamespace, managementClusterCancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated)
}
managementClusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})

It("Should create a management cluster and then upgrade all the providers", func() {
By("Creating a workload cluster to be used as a new management cluster")
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers
infrastructureProvider := clusterctl.DefaultInfrastructureProvider
if input.InfrastructureProvider != nil {
infrastructureProvider = *input.InfrastructureProvider
}
managementClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: input.MgmtFlavor,
Namespace: managementClusterNamespace.Name,
ClusterName: managementClusterName,
KubernetesVersion: initKubernetesVersion,
ControlPlaneMachineCount: ptr.To[int64](1),
WorkerMachineCount: ptr.To[int64](1),
},
PreWaitForCluster: func() {
if input.PreWaitForCluster != nil {
input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName)
}
},
CNIManifestPath: input.CNIManifestPath,
ControlPlaneWaiters: input.ControlPlaneWaiters,
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, managementClusterResources)

By("Turning the workload cluster into a management cluster with older versions of providers")

// If the cluster is a DockerCluster, we should load controller images into the nodes.
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
// this approach because this allows to have a single source of truth for images, the e2e config
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
// built locally and loaded into kind
cluster := managementClusterResources.Cluster
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{
Name: cluster.Name,
Images: input.E2EConfig.Images,
})).To(Succeed())
// NOTE: given that the bootstrap cluster could be shared by several tests, it is not practical to use it for testing clusterctl upgrades.
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers
managementClusterName = fmt.Sprintf("%s-management-%s", specName, util.RandomString(6))
managementClusterLogFolder := filepath.Join(input.ArtifactFolder, "clusters", managementClusterName)
if input.UseKindForManagementCluster {
By("Creating a kind cluster to be used as a new management cluster")

managementClusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{
Name: managementClusterName,
KubernetesVersion: initKubernetesVersion,
RequiresDockerSock: input.E2EConfig.HasDockerProvider(),
// Note: most of this images won't be used while starting the controllers, because it is used to spin up older versions of CAPI. Those images will be eventually used when upgrading to current.
Images: input.E2EConfig.Images,
IPFamily: input.E2EConfig.GetVariable(IPFamily),
LogFolder: filepath.Join(managementClusterLogFolder, "logs-kind"),
})
Expect(managementClusterProvider).ToNot(BeNil(), "Failed to create a kind cluster")

kubeconfigPath := managementClusterProvider.GetKubeconfigPath()
Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the kind cluster")

managementClusterProxy = framework.NewClusterProxy(managementClusterName, kubeconfigPath, scheme)
Expect(managementClusterProxy).ToNot(BeNil(), "Failed to get a kind cluster proxy")

managementClusterResources.Cluster = &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: managementClusterName,
},
}
} else {
By("Creating a workload cluster to be used as a new management cluster")

clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: input.MgmtFlavor,
Namespace: managementClusterNamespace.Name,
ClusterName: managementClusterName,
KubernetesVersion: initKubernetesVersion,
ControlPlaneMachineCount: ptr.To[int64](1),
WorkerMachineCount: ptr.To[int64](1),
},
PreWaitForCluster: func() {
if input.PreWaitForCluster != nil {
input.PreWaitForCluster(input.BootstrapClusterProxy, managementClusterNamespace.Name, managementClusterName)
}
},
CNIManifestPath: input.CNIManifestPath,
ControlPlaneWaiters: input.ControlPlaneWaiters,
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, managementClusterResources)

// If the cluster is a DockerCluster, we should load controller images into the nodes.
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
// this approach because this allows to have a single source of truth for images, the e2e config
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
// built locally and loaded into kind
cluster := managementClusterResources.Cluster
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
Expect(bootstrap.LoadImagesToKindCluster(ctx, bootstrap.LoadImagesToKindClusterInput{
Name: cluster.Name,
Images: input.E2EConfig.Images,
})).To(Succeed())
}

// Get a ClusterProxy so we can interact with the workload cluster
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
}

// Get a ClusterProxy so we can interact with the workload cluster
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name, framework.WithMachineLogCollector(input.BootstrapClusterProxy.GetLogCollector()))
By("Turning the new cluster into a management cluster with older versions of providers")

// Download the clusterctl version that should be used to initially set up the management cluster (which is later upgraded).
Byf("Downloading clusterctl binary from %s", initClusterctlBinaryURL)
clusterctlBinaryPath, clusterctlConfigPath := setupClusterctl(ctx, initClusterctlBinaryURL, input.ClusterctlConfigPath)
defer os.Remove(clusterctlBinaryPath) // clean up

By("Initializing the workload cluster with older versions of providers")
By("Initializing the new management cluster with older versions of providers")

if input.PreInit != nil {
By("Running Pre-init steps against the management cluster")
By("Running Pre-init steps against the new management cluster")
input.PreInit(managementClusterProxy)
}

Expand Down Expand Up @@ -360,7 +403,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
IPAMProviders: ipamProviders,
RuntimeExtensionProviders: runtimeExtensionProviders,
AddonProviders: addonProviders,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
LogFolder: managementClusterLogFolder,
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)

By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!")
Expand All @@ -384,7 +427,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
// In this case ApplyClusterTemplateAndWait can't be used because this helper is linked to the last version of the API;
// so we are getting a template using the downloaded version of clusterctl, applying it, and wait for machines to be provisioned.

workloadClusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6))
workloadClusterName = fmt.Sprintf("%s-workload-%s", specName, util.RandomString(6))
workloadClusterNamespace := testNamespace.Name
kubernetesVersion := input.WorkloadKubernetesVersion
if kubernetesVersion == "" {
Expand Down Expand Up @@ -552,7 +595,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
IPAMProviders: upgrade.IPAMProviders,
RuntimeExtensionProviders: upgrade.RuntimeExtensionProviders,
AddonProviders: upgrade.AddonProviders,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
LogFolder: managementClusterLogFolder,
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
} else {
Byf("[%d] Upgrading providers to the latest version available", i)
Expand All @@ -562,7 +605,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
ClusterctlVariables: input.UpgradeClusterctlVariables,
ClusterProxy: managementClusterProxy,
Contract: upgrade.Contract,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
LogFolder: managementClusterLogFolder,
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)
}

Expand Down Expand Up @@ -714,8 +757,14 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg
By("Running PreCleanupManagementCluster steps against the management cluster")
input.PreCleanupManagementCluster(managementClusterProxy)
}

// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
if input.UseKindForManagementCluster {
managementClusterProxy.Dispose(ctx)
managementClusterProvider.Dispose(ctx)
} else {
framework.DumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
}
})
}

Expand Down
59 changes: 33 additions & 26 deletions test/e2e/clusterctl_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,9 @@ var _ = Describe("When testing clusterctl upgrades (v0.3=>v1.5=>current)", func(
UpgradeClusterctlVariables: map[string]string{
"CLUSTER_TOPOLOGY": "false",
},
MgmtFlavor: "topology",
WorkloadFlavor: "",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand Down Expand Up @@ -165,10 +166,11 @@ var _ = Describe("When testing clusterctl upgrades (v0.4=>v1.6=>current)", func(
},
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v0.4/bases.
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand Down Expand Up @@ -199,10 +201,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.0=>current)", func() {
InitWithRuntimeExtensionProviders: []string{},
// NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/data/infrastructure-docker/v1.0/bases.
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.23.17",
WorkloadKubernetesVersion: "v1.23.17",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -229,10 +232,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.5=>current)", func() {
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease)},
InitWithProvidersContract: "v1beta1",
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -259,10 +263,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.5=>cur
InitWithInfrastructureProviders: []string{fmt.Sprintf(providerDockerPrefix, stableRelease)},
InitWithProvidersContract: "v1beta1",
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
InitWithKubernetesVersion: "v1.28.0",
WorkloadKubernetesVersion: "v1.28.0",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -283,10 +288,11 @@ var _ = Describe("When testing clusterctl upgrades (v1.6=>current)", func() {
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease),
InitWithProvidersContract: "v1beta1",
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.29.2",
WorkloadKubernetesVersion: "v1.29.2",
MgmtFlavor: "topology",
WorkloadFlavor: "",
InitWithKubernetesVersion: "v1.29.2",
WorkloadKubernetesVersion: "v1.29.2",
MgmtFlavor: "topology",
WorkloadFlavor: "",
UseKindForManagementCluster: true,
}
})
})
Expand All @@ -307,10 +313,11 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.6=>cur
InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, stableRelease),
InitWithProvidersContract: "v1beta1",
// Note: Both InitWithKubernetesVersion and WorkloadKubernetesVersion should be the highest mgmt cluster version supported by the source Cluster API version.
InitWithKubernetesVersion: "v1.29.2",
WorkloadKubernetesVersion: "v1.29.2",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
InitWithKubernetesVersion: "v1.29.2",
WorkloadKubernetesVersion: "v1.29.2",
MgmtFlavor: "topology",
WorkloadFlavor: "topology",
UseKindForManagementCluster: true,
}
})
})
Loading

0 comments on commit 3925333

Please sign in to comment.