Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor e2e test #243

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 6 additions & 12 deletions e2e/cleanup/cleanup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,8 @@ func TestMain(m *testing.M) {
}

// Delete the NATS CR.
ctx := context.TODO()
err = Retry(attempts, interval, func() error {
errDel := k8sClient.Delete(ctx, NATSCR())
errDel := k8sClient.Delete(context.TODO(), NATSCR())
// If it is gone already, that's fine too.
if k8serrors.IsNotFound(errDel) {
return nil
Expand All @@ -80,10 +79,9 @@ func TestMain(m *testing.M) {
func Test_NoPodsExists(t *testing.T) {
t.Parallel()

ctx := context.TODO()
err := Retry(attempts, interval, func() error {
// Try to get the Pods.
pods, podErr := clientSet.CoreV1().Pods(NamespaceName).List(ctx, PodListOpts())
pods, podErr := clientSet.CoreV1().Pods(NamespaceName).List(context.TODO(), PodListOpts())
if podErr != nil {
return podErr
}
Expand All @@ -101,10 +99,9 @@ func Test_NoPodsExists(t *testing.T) {
func Test_NoPVCsExists(t *testing.T) {
t.Parallel()

ctx := context.TODO()
err := Retry(attempts, interval, func() error {
// Try to get the PVCs.
pvcs, pvcErr := clientSet.CoreV1().PersistentVolumeClaims(NamespaceName).List(ctx, PVCListOpts())
pvcs, pvcErr := clientSet.CoreV1().PersistentVolumeClaims(NamespaceName).List(context.TODO(), PVCListOpts())
if pvcErr != nil {
return pvcErr
}
Expand All @@ -122,10 +119,9 @@ func Test_NoPVCsExists(t *testing.T) {
func Test_NoSTSExists(t *testing.T) {
t.Parallel()

ctx := context.TODO()
err := Retry(attempts, interval, func() error {
// Try to get the STS.
_, stsErr := clientSet.AppsV1().StatefulSets(NamespaceName).Get(ctx, STSName, v1.GetOptions{})
_, stsErr := clientSet.AppsV1().StatefulSets(NamespaceName).Get(context.TODO(), STSName, v1.GetOptions{})
// This is what we want here.
if k8serrors.IsNotFound(stsErr) {
return nil
Expand All @@ -143,9 +139,8 @@ func Test_NoSTSExists(t *testing.T) {
func Test_NoNATSSecretExists(t *testing.T) {
t.Parallel()

ctx := context.TODO()
err := Retry(attempts, interval, func() error {
_, secErr := clientSet.CoreV1().Secrets(NamespaceName).Get(ctx, SecretName, v1.GetOptions{})
_, secErr := clientSet.CoreV1().Secrets(NamespaceName).Get(context.TODO(), SecretName, v1.GetOptions{})
// This is what we want here.
if k8serrors.IsNotFound(secErr) {
return nil
Expand All @@ -163,9 +158,8 @@ func Test_NoNATSSecretExists(t *testing.T) {
func Test_NoNATSCRExists(t *testing.T) {
t.Parallel()

ctx := context.TODO()
err := Retry(attempts, interval, func() error {
_, crErr := getNATSCR(ctx, CRName, NamespaceName)
_, crErr := getNATSCR(context.TODO(), CRName, NamespaceName)
// This is what we want here.
if k8serrors.IsNotFound(crErr) {
return nil
Expand Down
32 changes: 16 additions & 16 deletions e2e/common/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,31 +8,31 @@ func Retry(attempts int, interval time.Duration, fn func() error) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
var err error
for { //nolint:gosimple//There is no range here.
select {
case <-ticker.C:
attempts--
err = fn()
if err == nil || attempts == 0 {
return err
}

for attempts > 0 {
<-ticker.C
attempts--
err = fn()
if err == nil {
return nil
}
}
return err
}

func RetryGet[T any](attempts int, interval time.Duration, fn func() (*T, error)) (*T, error) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
var err error
var obj *T
for { //nolint:gosimple//There is no range here.
select {
case <-ticker.C:
attempts--
obj, err = fn()
if err == nil || attempts == 0 {
return obj, err
}

for attempts > 0 {
<-ticker.C
attempts--
obj, err = fn()
if err == nil {
return obj, nil
}
}
return obj, err
}
12 changes: 6 additions & 6 deletions e2e/scripts/natsbench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@ sleep 1
# This will will all the port-forwarding and delete the stream. We need this to be in a function so we can even call it,
# if our tests fails since `set -e` would stop the script in case of an failing test.
function cleanup() {
# Forcefully purge the stream.
nats stream purge benchstream -f
# Forcefully delete the stream.
nats stream rm benchstream -f
kill ${PID}
# Forcefully purge the stream.
nats stream purge benchstream -f
# Forcefully delete the stream.
nats stream rm benchstream -f

kill ${PID}
}

# This kills the port-forwards even if the test fails.
Expand Down
6 changes: 3 additions & 3 deletions e2e/scripts/natsserver.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ PID3=$!
# This will will all the port-forwarding. We need this to be in a function so we can even call it, if our tests fails
# since `set -e` would stop the script in case of an failing test.
function kill_port_forward() {
kill ${PID1}
kill ${PID2}
kill ${PID3}
kill ${PID1}
kill ${PID2}
kill ${PID3}
}
# This kills the port-forwards even if the test fails.
trap kill_port_forward ERR
Expand Down
62 changes: 26 additions & 36 deletions e2e/setup/setup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,10 @@ func TestMain(m *testing.M) {
logger.Fatal(err.Error())
}

ctx := context.TODO()
// Create the Namespace used for testing.
err = Retry(attempts, interval, func() error {
// It's fine if the Namespace already exists.
return client.IgnoreAlreadyExists(k8sClient.Create(ctx, Namespace()))
return client.IgnoreAlreadyExists(k8sClient.Create(context.TODO(), Namespace()))
})
if err != nil {
logger.Fatal(err.Error())
Expand All @@ -85,7 +84,7 @@ func TestMain(m *testing.M) {

// Create the NATS CR used for testing.
err = Retry(attempts, interval, func() error {
errNATS := k8sClient.Create(ctx, NATSCR())
errNATS := k8sClient.Create(context.TODO(), NATSCR())
if k8serrors.IsAlreadyExists(errNATS) {
logger.Warn(
"error while creating NATS CR, resource already exist; test will continue with existing NATS CR",
Expand All @@ -98,7 +97,7 @@ func TestMain(m *testing.M) {
logger.Fatal(err.Error())
}

// wait for an interval for reconciliation to update status.
// Wait for an interval for reconciliation to update status.
time.Sleep(interval)

// Wait for NATS CR to get ready.
Expand All @@ -115,10 +114,9 @@ func TestMain(m *testing.M) {
func Test_CR(t *testing.T) {
want := NATSCR()

ctx := context.TODO()
// Get the NATS CR from the cluster.
actual, err := RetryGet(attempts, interval, func() (*natsv1alpha1.NATS, error) {
return getNATSCR(ctx, want.Name, want.Namespace)
return getNATSCR(context.TODO(), want.Name, want.Namespace)
})
require.NoError(t, err)

Expand All @@ -131,10 +129,8 @@ func Test_CR(t *testing.T) {
// Test_PriorityClass will get the PriorityClass name from the StatefulSet and checks if a PriorityClass with that
// name exists in the cluster.
func Test_PriorityClass(t *testing.T) {
ctx := context.TODO()

err := Retry(attempts, interval, func() error {
sts, stsErr := clientSet.AppsV1().StatefulSets(NamespaceName).Get(ctx, STSName, metav1.GetOptions{})
sts, stsErr := clientSet.AppsV1().StatefulSets(NamespaceName).Get(context.TODO(), STSName, metav1.GetOptions{})
if stsErr != nil {
return stsErr
}
Expand All @@ -149,7 +145,7 @@ func Test_PriorityClass(t *testing.T) {
return fmt.Errorf("PriorityClassName was expected to be %s but was %s", PriorityClassName, pcName)
}

_, pcErr := clientSet.SchedulingV1().PriorityClasses().Get(ctx, pcName, metav1.GetOptions{})
_, pcErr := clientSet.SchedulingV1().PriorityClasses().Get(context.TODO(), pcName, metav1.GetOptions{})
return pcErr
})

Expand All @@ -158,10 +154,8 @@ func Test_PriorityClass(t *testing.T) {

// Test_ConfigMap tests the ConfigMap that the NATS-Manger creates when we define a CR.
func Test_ConfigMap(t *testing.T) {
ctx := context.TODO()

err := Retry(attempts, interval, func() error {
cm, cmErr := clientSet.CoreV1().ConfigMaps(NamespaceName).Get(ctx, CMName, metav1.GetOptions{})
cm, cmErr := clientSet.CoreV1().ConfigMaps(NamespaceName).Get(context.TODO(), CMName, metav1.GetOptions{})
if cmErr != nil {
return cmErr
}
Expand Down Expand Up @@ -195,21 +189,20 @@ func Test_ConfigMap(t *testing.T) {
func Test_PodsResources(t *testing.T) {
t.Parallel()

ctx := context.TODO()
// RetryGet the NATS Pods and test them.
err := Retry(attempts, interval, func() error {
// RetryGet the NATS Pods via labels.
pods, err := clientSet.CoreV1().Pods(NamespaceName).List(ctx, PodListOpts())
pods, err := clientSet.CoreV1().Pods(NamespaceName).List(context.TODO(), PodListOpts())
if err != nil {
return err
}

// The number of Pods must be equal NATS.spec.cluster.size. We check this in the retry, because it may take
// some time for all Pods to be there.
if len(pods.Items) != NATSCR().Spec.Cluster.Size {
if len(pods.Items) != NATSCR().Spec.Size {
return fmt.Errorf(
"error while fetching Pods; wanted %v Pods but got %v",
NATSCR().Spec.Cluster.Size,
NATSCR().Spec.Size,
pods.Items,
)
}
Expand All @@ -219,7 +212,7 @@ func Test_PodsResources(t *testing.T) {
foundContainers := 0
for _, pod := range pods.Items {
for _, container := range pod.Spec.Containers {
if !(container.Name == ContainerName) {
if container.Name != ContainerName {
continue
}
foundContainers += 1
Expand All @@ -233,10 +226,10 @@ func Test_PodsResources(t *testing.T) {
}
}
}
if foundContainers != NATSCR().Spec.Cluster.Size {
if foundContainers != NATSCR().Spec.Size {
return fmt.Errorf(
"error while fethching 'natsCR' Containers: expected %v but found %v",
NATSCR().Spec.Cluster.Size,
NATSCR().Spec.Size,
foundContainers,
)
}
Expand All @@ -251,27 +244,26 @@ func Test_PodsResources(t *testing.T) {
func Test_PodsReady(t *testing.T) {
t.Parallel()

ctx := context.TODO()
// RetryGet the NATS CR. It will tell us how many Pods we should expect.
natsCR, err := RetryGet(attempts, interval, func() (*natsv1alpha1.NATS, error) {
return getNATSCR(ctx, CRName, NamespaceName)
return getNATSCR(context.TODO(), CRName, NamespaceName)
})
require.NoError(t, err)

// RetryGet the NATS Pods and test them.
err = Retry(attempts, interval, func() error {
var pods *v1.PodList
// RetryGet the NATS Pods via labels.
pods, err = clientSet.CoreV1().Pods(NamespaceName).List(ctx, PodListOpts())
pods, err = clientSet.CoreV1().Pods(NamespaceName).List(context.TODO(), PodListOpts())
if err != nil {
return err
}

// The number of Pods must be equal NATS.spec.cluster.size. We check this in the retry, because it may take
// some time for all Pods to be there.
if len(pods.Items) != natsCR.Spec.Cluster.Size {
if len(pods.Items) != natsCR.Spec.Size {
return fmt.Errorf(
"Error while fetching pods; wanted %v Pods but got %v", natsCR.Spec.Cluster.Size, pods.Items,
"Error while fetching pods; wanted %v Pods but got %v", natsCR.Spec.Size, pods.Items,
)
}

Expand Down Expand Up @@ -306,21 +298,20 @@ func Test_PVCs(t *testing.T) {
t.Parallel()

// Get the PersistentVolumeClaims --PVCs-- and test them.
ctx := context.TODO()
var pvcs *v1.PersistentVolumeClaimList
err := Retry(attempts, interval, func() error {
// RetryGet PVCs via a label.
var err error
pvcs, err = RetryGet(attempts, interval, func() (*v1.PersistentVolumeClaimList, error) {
return clientSet.CoreV1().PersistentVolumeClaims(NamespaceName).List(ctx, PVCListOpts())
return clientSet.CoreV1().PersistentVolumeClaims(NamespaceName).List(context.TODO(), PVCListOpts())
})
if err != nil {
return err
}

// Check if the amount of PVCs is equal to the spec.cluster.size in the NATS CR. We do this in the retry,
// because it may take some time for all PVCs to be there.
want, actual := NATSCR().Spec.Cluster.Size, len(pvcs.Items)
want, actual := NATSCR().Spec.Size, len(pvcs.Items)
if want != actual {
return fmt.Errorf("error while fetching PVSs; wanted %v PVCs but got %v", want, actual)
}
Expand All @@ -340,9 +331,9 @@ func Test_PVCs(t *testing.T) {
// Test_Secret tests if the Secret was created.
func Test_Secret(t *testing.T) {
t.Parallel()
ctx := context.TODO()

err := Retry(attempts, interval, func() error {
_, secErr := clientSet.CoreV1().Secrets(NamespaceName).Get(ctx, SecretName, metav1.GetOptions{})
_, secErr := clientSet.CoreV1().Secrets(NamespaceName).Get(context.TODO(), SecretName, metav1.GetOptions{})
if secErr != nil {
return secErr
}
Expand Down Expand Up @@ -402,10 +393,9 @@ func waitForNATSCRReady() error {
logger.Debug(fmt.Sprintf("waiting for NATS CR to get ready. "+
"CR name: %s, namespace: %s", want.Name, want.Namespace))

ctx := context.TODO()
// Get the NATS CR from the cluster.
gotNATSCR, err := RetryGet(attempts, interval, func() (*natsv1alpha1.NATS, error) {
return getNATSCR(ctx, want.Name, want.Namespace)
return getNATSCR(context.TODO(), want.Name, want.Namespace)
})
if err != nil {
return err
Expand All @@ -429,16 +419,16 @@ func waitForNATSManagerDeploymentReady(image string) error {
// RetryGet the NATS Manager and test status.
return Retry(attempts, interval, func() error {
logger.Debug(fmt.Sprintf("waiting for nats-manager deployment to get ready with image: %s", image))
ctx := context.TODO()

// Get the NATS-manager deployment from the cluster.
gotDeployment, err := RetryGet(attempts, interval, func() (*appsv1.Deployment, error) {
return getDeployment(ctx, ManagerDeploymentName, NamespaceName)
return getDeployment(context.TODO(), ManagerDeploymentName, NamespaceName)
})
if err != nil {
return err
}

// if image is provided, then check if the deployment has correct image.
// If the image is provided, then check if the deployment has the correct image.
if image != "" && gotDeployment.Spec.Template.Spec.Containers[0].Image != image {
err := fmt.Errorf("expected NATS-manager image to be: %s, but found: %s", image,
gotDeployment.Spec.Template.Spec.Containers[0].Image,
Expand All @@ -447,7 +437,7 @@ func waitForNATSManagerDeploymentReady(image string) error {
return err
}

// check if the deployment is ready.
// Check if the deployment is ready.
if *gotDeployment.Spec.Replicas != gotDeployment.Status.UpdatedReplicas ||
*gotDeployment.Spec.Replicas != gotDeployment.Status.ReadyReplicas ||
*gotDeployment.Spec.Replicas != gotDeployment.Status.AvailableReplicas {
Expand Down