Skip to content

Commit

Permalink
Simplify context handling.
Browse files Browse the repository at this point in the history
  • Loading branch information
friedrichwilken committed Dec 8, 2023
1 parent e4c497b commit 0ab82ba
Showing 1 changed file with 23 additions and 36 deletions.
59 changes: 23 additions & 36 deletions e2e/setup/setup_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
//go:build e2e
// +build e2e

// Package setup_test is part of the end-to-end-tests. This package contains tests that evaluate the creation of a
// NATS-server CR and the creation of all correlated Kubernetes resources.
// To run the tests a Kubernetes cluster and a NATS-CR need to be available and configured. For this reason, the tests
Expand Down Expand Up @@ -59,11 +56,10 @@ func TestMain(m *testing.M) {
logger.Fatal(err.Error())
}

ctx := context.TODO()
// Create the Namespace used for testing.
err = Retry(attempts, interval, func() error {
// It's fine if the Namespace already exists.
return client.IgnoreAlreadyExists(k8sClient.Create(ctx, Namespace()))
return client.IgnoreAlreadyExists(k8sClient.Create(context.TODO(), Namespace()))
})
if err != nil {
logger.Fatal(err.Error())
Expand All @@ -85,7 +81,7 @@ func TestMain(m *testing.M) {

// Create the NATS CR used for testing.
err = Retry(attempts, interval, func() error {
errNATS := k8sClient.Create(ctx, NATSCR())
errNATS := k8sClient.Create(context.TODO(), NATSCR())
if k8serrors.IsAlreadyExists(errNATS) {
logger.Warn(
"error while creating NATS CR, resource already exist; test will continue with existing NATS CR",
Expand All @@ -98,7 +94,7 @@ func TestMain(m *testing.M) {
logger.Fatal(err.Error())
}

// wait for an interval for reconciliation to update status.
// Wait for an interval for reconciliation to update status.
time.Sleep(interval)

// Wait for NATS CR to get ready.
Expand All @@ -115,10 +111,9 @@ func TestMain(m *testing.M) {
func Test_CR(t *testing.T) {
want := NATSCR()

ctx := context.TODO()
// Get the NATS CR from the cluster.
actual, err := RetryGet(attempts, interval, func() (*natsv1alpha1.NATS, error) {
return getNATSCR(ctx, want.Name, want.Namespace)
return getNATSCR(want.Name, want.Namespace)
})
require.NoError(t, err)

Expand All @@ -131,10 +126,8 @@ func Test_CR(t *testing.T) {
// Test_PriorityClass will get the PriorityClass name from the StatefulSet and checks if a PriorityClass with that
// name exists in the cluster.
func Test_PriorityClass(t *testing.T) {
ctx := context.TODO()

err := Retry(attempts, interval, func() error {
sts, stsErr := clientSet.AppsV1().StatefulSets(NamespaceName).Get(ctx, STSName, metav1.GetOptions{})
sts, stsErr := clientSet.AppsV1().StatefulSets(NamespaceName).Get(context.TODO(), STSName, metav1.GetOptions{})
if stsErr != nil {
return stsErr
}
Expand All @@ -149,7 +142,7 @@ func Test_PriorityClass(t *testing.T) {
return fmt.Errorf("PriorityClassName was expected to be %s but was %s", PriorityClassName, pcName)
}

_, pcErr := clientSet.SchedulingV1().PriorityClasses().Get(ctx, pcName, metav1.GetOptions{})
_, pcErr := clientSet.SchedulingV1().PriorityClasses().Get(context.TODO(), pcName, metav1.GetOptions{})
return pcErr
})

Expand All @@ -158,10 +151,8 @@ func Test_PriorityClass(t *testing.T) {

// Test_ConfigMap tests the ConfigMap that the NATS-Manger creates when we define a CR.
func Test_ConfigMap(t *testing.T) {
ctx := context.TODO()

err := Retry(attempts, interval, func() error {
cm, cmErr := clientSet.CoreV1().ConfigMaps(NamespaceName).Get(ctx, CMName, metav1.GetOptions{})
cm, cmErr := clientSet.CoreV1().ConfigMaps(NamespaceName).Get(context.TODO(), CMName, metav1.GetOptions{})
if cmErr != nil {
return cmErr
}
Expand Down Expand Up @@ -195,11 +186,10 @@ func Test_ConfigMap(t *testing.T) {
func Test_PodsResources(t *testing.T) {
t.Parallel()

ctx := context.TODO()
// RetryGet the NATS Pods and test them.
err := Retry(attempts, interval, func() error {
// RetryGet the NATS Pods via labels.
pods, err := clientSet.CoreV1().Pods(NamespaceName).List(ctx, PodListOpts())
pods, err := clientSet.CoreV1().Pods(NamespaceName).List(context.TODO(), PodListOpts())
if err != nil {
return err
}
Expand Down Expand Up @@ -251,18 +241,17 @@ func Test_PodsResources(t *testing.T) {
func Test_PodsReady(t *testing.T) {
t.Parallel()

ctx := context.TODO()
// RetryGet the NATS CR. It will tell us how many Pods we should expect.
natsCR, err := RetryGet(attempts, interval, func() (*natsv1alpha1.NATS, error) {
return getNATSCR(ctx, CRName, NamespaceName)
return getNATSCR(CRName, NamespaceName)
})
require.NoError(t, err)

// RetryGet the NATS Pods and test them.
err = Retry(attempts, interval, func() error {
var pods *v1.PodList
// RetryGet the NATS Pods via labels.
pods, err = clientSet.CoreV1().Pods(NamespaceName).List(ctx, PodListOpts())
pods, err = clientSet.CoreV1().Pods(NamespaceName).List(context.TODO(), PodListOpts())
if err != nil {
return err
}
Expand Down Expand Up @@ -306,13 +295,12 @@ func Test_PVCs(t *testing.T) {
t.Parallel()

// Get the PersistentVolumeClaims --PVCs-- and test them.
ctx := context.TODO()
var pvcs *v1.PersistentVolumeClaimList
err := Retry(attempts, interval, func() error {
// RetryGet PVCs via a label.
var err error
pvcs, err = RetryGet(attempts, interval, func() (*v1.PersistentVolumeClaimList, error) {
return clientSet.CoreV1().PersistentVolumeClaims(NamespaceName).List(ctx, PVCListOpts())
return clientSet.CoreV1().PersistentVolumeClaims(NamespaceName).List(context.TODO(), PVCListOpts())
})
if err != nil {
return err
Expand Down Expand Up @@ -340,9 +328,9 @@ func Test_PVCs(t *testing.T) {
// Test_Secret tests if the Secret was created.
func Test_Secret(t *testing.T) {
t.Parallel()
ctx := context.TODO()

err := Retry(attempts, interval, func() error {
_, secErr := clientSet.CoreV1().Secrets(NamespaceName).Get(ctx, SecretName, metav1.GetOptions{})
_, secErr := clientSet.CoreV1().Secrets(NamespaceName).Get(context.TODO(), SecretName, metav1.GetOptions{})
if secErr != nil {
return secErr
}
Expand All @@ -351,17 +339,17 @@ func Test_Secret(t *testing.T) {
require.NoError(t, err)
}

func getNATSCR(ctx context.Context, name, namespace string) (*natsv1alpha1.NATS, error) {
func getNATSCR(name, namespace string) (*natsv1alpha1.NATS, error) {
var natsCR natsv1alpha1.NATS
err := k8sClient.Get(ctx, k8stypes.NamespacedName{
err := k8sClient.Get(context.TODO(), k8stypes.NamespacedName{
Name: name,
Namespace: namespace,
}, &natsCR)
return &natsCR, err
}

func getDeployment(ctx context.Context, name, namespace string) (*appsv1.Deployment, error) {
return clientSet.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
func getDeployment(name, namespace string) (*appsv1.Deployment, error) {
return clientSet.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}

func cmToMap(cm string) map[string]string {
Expand Down Expand Up @@ -402,10 +390,9 @@ func waitForNATSCRReady() error {
logger.Debug(fmt.Sprintf("waiting for NATS CR to get ready. "+
"CR name: %s, namespace: %s", want.Name, want.Namespace))

ctx := context.TODO()
// Get the NATS CR from the cluster.
gotNATSCR, err := RetryGet(attempts, interval, func() (*natsv1alpha1.NATS, error) {
return getNATSCR(ctx, want.Name, want.Namespace)
return getNATSCR(want.Name, want.Namespace)
})
if err != nil {
return err
Expand All @@ -426,19 +413,19 @@ func waitForNATSCRReady() error {

// Wait for NATS-manager deployment to get ready with correct image.
func waitForNATSManagerDeploymentReady(image string) error {
// RetryGet the NATS Manager and test status.
// Retry to get the NATS Manager and test status.
return Retry(attempts, interval, func() error {
logger.Debug(fmt.Sprintf("waiting for nats-manager deployment to get ready with image: %s", image))
ctx := context.TODO()

// Get the NATS-manager deployment from the cluster.
gotDeployment, err := RetryGet(attempts, interval, func() (*appsv1.Deployment, error) {
return getDeployment(ctx, ManagerDeploymentName, NamespaceName)
return getDeployment(ManagerDeploymentName, NamespaceName)
})
if err != nil {
return err
}

// if image is provided, then check if the deployment has correct image.
// If image is provided, then check if the deployment has correct image.
if image != "" && gotDeployment.Spec.Template.Spec.Containers[0].Image != image {
err := fmt.Errorf("expected NATS-manager image to be: %s, but found: %s", image,
gotDeployment.Spec.Template.Spec.Containers[0].Image,
Expand All @@ -447,7 +434,7 @@ func waitForNATSManagerDeploymentReady(image string) error {
return err
}

// check if the deployment is ready.
// Check if the deployment is ready.
if *gotDeployment.Spec.Replicas != gotDeployment.Status.UpdatedReplicas ||
*gotDeployment.Spec.Replicas != gotDeployment.Status.ReadyReplicas ||
*gotDeployment.Spec.Replicas != gotDeployment.Status.AvailableReplicas {
Expand Down

0 comments on commit 0ab82ba

Please sign in to comment.