diff --git a/controllers/datamovementmanager_controller.go b/controllers/datamovementmanager_controller.go index 5c60b60a..08b5fdbc 100644 --- a/controllers/datamovementmanager_controller.go +++ b/controllers/datamovementmanager_controller.go @@ -399,7 +399,7 @@ func (r *DataMovementManagerReconciler) removeLustreFileSystemsFinalizersIfNeces // Now the DS does not have any lustre filesystems that are being deleted, verify that the // daemonset's pods (i.e. dm worker pods) have restarted d := ds.Status.DesiredNumberScheduled - if ds.ObjectMeta.Generation != ds.Status.ObservedGeneration || ds.Status.UpdatedNumberScheduled != d || ds.Status.NumberReady != d { + if ds.Status.ObservedGeneration != ds.ObjectMeta.Generation || ds.Status.UpdatedNumberScheduled != d || ds.Status.NumberReady != d { // wait for pods to restart log.Info("Requeue: wait for daemonset to restart pods after dropping lustrefilesystem volume", "desired", d, "updated", ds.Status.UpdatedNumberScheduled, "ready", ds.Status.NumberReady) diff --git a/controllers/datamovementmanager_controller_test.go b/controllers/datamovementmanager_controller_test.go index a94e10b1..b634f2b7 100644 --- a/controllers/datamovementmanager_controller_test.go +++ b/controllers/datamovementmanager_controller_test.go @@ -210,11 +210,18 @@ var _ = Describe("Data Movement Manager Test" /*Ordered, (Ginkgo v2)*/, func() { desired := daemonset.Status.DesiredNumberScheduled updated := daemonset.Status.UpdatedNumberScheduled ready := daemonset.Status.NumberReady + expectedGen := daemonset.ObjectMeta.Generation + gen := daemonset.Status.ObservedGeneration + + // Fake the updates to the daemonset since the daemonset controller doesn't run + fakeDSUpdates(daemonset, g) + if v.Name == lustre.Name { // If the volume still exists, then so should lustre + finalizer g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(lustre), lustre)).Should(Succeed()) g.Expect(controllerutil.ContainsFinalizer(lustre, finalizer)).To(BeTrue()) - } else if updated != desired && ready != desired { + + } else if gen != expectedGen && updated != desired && ready != desired { // If pods have not restarted, lustre + finalizer should still be there g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(lustre), lustre)).Should(Succeed()) g.Expect(controllerutil.ContainsFinalizer(lustre, finalizer)).To(BeTrue()) @@ -223,7 +230,34 @@ var _ = Describe("Data Movement Manager Test" /*Ordered, (Ginkgo v2)*/, func() { return k8sClient.Get(ctx, client.ObjectKeyFromObject(lustre), lustre) } } + return nil - }).ShouldNot(Succeed()) + }, "15s").ShouldNot(Succeed()) }) }) + +// Envtest does not run the built-in controllers (e.g. daemonset controller). This function fakes +// that out. Walk the counters up by one each time so we can exercise the controller watching these +// through a few iterations. +func fakeDSUpdates(ds *appsv1.DaemonSet, g Gomega) { + const desired = 5 // number of nnf nodes + + ds.Status.DesiredNumberScheduled = desired + + ds.Status.ObservedGeneration++ + if ds.Status.ObservedGeneration > ds.ObjectMeta.Generation { + ds.Status.ObservedGeneration = ds.ObjectMeta.Generation + } + + ds.Status.UpdatedNumberScheduled++ + if ds.Status.UpdatedNumberScheduled > desired { + ds.Status.UpdatedNumberScheduled = desired + } + + ds.Status.NumberReady++ + if ds.Status.NumberReady > desired { + ds.Status.NumberReady = desired + } + + g.Expect(k8sClient.Status().Update(ctx, ds)).Should(Succeed()) +}