diff --git a/controllers/container_image/deployment_handler.go b/controllers/container_image/deployment_handler.go index f1e375b94..4c69201a0 100644 --- a/controllers/container_image/deployment_handler.go +++ b/controllers/container_image/deployment_handler.go @@ -124,14 +124,10 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { if created { logger.Info("Created CronJob", "namespace", desired.Namespace, "name", desired.Name) - err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger) - if err != nil { - logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name) - return err - } } else if !k8s.AreCronJobsEqual(*existing, *desired) { existing.Spec.JobTemplate = desired.Spec.JobTemplate existing.Spec.Schedule = desired.Spec.Schedule + existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy existing.SetOwnerReferences(desired.GetOwnerReferences()) // Remove any old jobs because they won't be updated when the cronjob changes diff --git a/controllers/container_image/deployment_handler_test.go b/controllers/container_image/deployment_handler_test.go index 8808469bb..ee597c19f 100644 --- a/controllers/container_image/deployment_handler_test.go +++ b/controllers/container_image/deployment_handler_test.go @@ -138,30 +138,6 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomSchedule() { s.Equal(created.Spec.Schedule, customSchedule) } -func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomScheduleFail() { - d := s.createDeploymentHandler() - mondooAuditConfig := &s.auditConfig - s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) - - customSchedule := "this is not valid" - s.auditConfig.Spec.Containers.Schedule = customSchedule - - result, err := d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - image, err := s.containerImageResolver.CnspecImage("", "", false) - s.NoError(err) - - expected := CronJob(image, "", test.KubeSystemNamespaceUid, "", &s.auditConfig, mondoov1alpha2.MondooOperatorConfig{}) - created := &batchv1.CronJob{} - created.Name = expected.Name - created.Namespace = expected.Namespace - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - - s.NotEqual(created.Spec.Schedule, customSchedule) -} - func (s *DeploymentHandlerSuite) TestReconcile_Create_PrivateRegistriesSecret() { d := s.createDeploymentHandler() mondooAuditConfig := &s.auditConfig diff --git a/controllers/container_image/resources.go b/controllers/container_image/resources.go index cc16fe5c7..71f49e8f3 100644 --- a/controllers/container_image/resources.go +++ b/controllers/container_image/resources.go @@ -6,10 +6,9 @@ package container_image import ( "fmt" "strings" - "time" // That's the mod k8s relies on https://github.com/kubernetes/kubernetes/blob/master/go.mod#L63 - "github.com/robfig/cron/v3" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" "go.mondoo.com/mondoo-operator/api/v1alpha2" "go.mondoo.com/mondoo-operator/pkg/constants" @@ -48,22 +47,6 @@ func CronJob(image, integrationMrn, clusterUid, privateImageScanningSecretName s envVars := feature_flags.AllFeatureFlagsAsEnv() envVars = append(envVars, corev1.EnvVar{Name: "MONDOO_AUTO_UPDATE", Value: "false"}) - // We want to start the cron job one minute after it was enabled. - cronStart := time.Now().Add(1 * time.Minute) - cronTab := fmt.Sprintf("%d %d * * *", cronStart.Minute(), cronStart.Hour()) - if m.Spec.Containers.Schedule != "" { - _, err := cron.ParseStandard(m.Spec.Containers.Schedule) - if err != nil { - logger.Error(err, "invalid cron schedule specified in MondooAuditConfig Spec.Containers.Schedule; using default") - } else { - logger.Info("using cron custom schedule", "crontab", m.Spec.Containers.Schedule) - cronTab = m.Spec.Containers.Schedule - } - } else { - logger.Info("using default cron schedule", "crontab", cronTab) - m.Spec.Containers.Schedule = cronTab - } - cronjob := &batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: CronJobName(m.Name), @@ -71,7 +54,7 @@ func CronJob(image, integrationMrn, clusterUid, privateImageScanningSecretName s Labels: ls, }, Spec: batchv1.CronJobSpec{ - Schedule: cronTab, + Schedule: m.Spec.Containers.Schedule, ConcurrencyPolicy: batchv1.ForbidConcurrent, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: ls}, diff --git a/controllers/k8s_scan/deployment_handler.go b/controllers/k8s_scan/deployment_handler.go index 4f4933547..05e466b00 100644 --- a/controllers/k8s_scan/deployment_handler.go +++ b/controllers/k8s_scan/deployment_handler.go @@ -91,14 +91,10 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { if created { logger.Info("Created CronJob", "namespace", desired.Namespace, "name", desired.Name) - err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger) - if err != nil { - logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name) - return err - } } else if !k8s.AreCronJobsEqual(*existing, *desired) { existing.Spec.JobTemplate = desired.Spec.JobTemplate existing.Spec.Schedule = desired.Spec.Schedule + existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy existing.SetOwnerReferences(desired.GetOwnerReferences()) // Remove any old jobs because they won't be updated when the cronjob changes diff --git a/controllers/k8s_scan/deployment_handler_test.go b/controllers/k8s_scan/deployment_handler_test.go index 79ec3cafe..09a42983f 100644 --- a/controllers/k8s_scan/deployment_handler_test.go +++ b/controllers/k8s_scan/deployment_handler_test.go @@ -351,26 +351,6 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateWithDefaultSchedule() { created.Name = expected.Name created.Namespace = expected.Namespace s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - - // Wait a bit to longer, to later check, whether the CronJob schedule was changed. - time.Sleep(61 * time.Second) - - s.scanApiStoreMock.EXPECT().Add(&scan_api_store.ScanApiStoreAddOpts{ - Url: scanApiUrl, - Token: "token", - }).Times(1) - result, err = d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - foundMAC := &mondoov1alpha2.MondooAuditConfig{} - foundMAC.Name = mondooAuditConfig.Name - foundMAC.Namespace = mondooAuditConfig.Namespace - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(foundMAC), foundMAC)) - - s.Equal(created.Spec.Schedule, expected.Spec.Schedule) - s.Equal(foundMAC.Spec.KubernetesResources.Schedule, expected.Spec.Schedule) } func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomSchedule() { @@ -404,37 +384,6 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomSchedule() { s.Equal(created.Spec.Schedule, customSchedule) } -func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomScheduleFail() { - d := s.createDeploymentHandler() - mondooAuditConfig := &s.auditConfig - s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) - - scanApiUrl := scanapi.ScanApiServiceUrl(*d.Mondoo) - s.scanApiStoreMock.EXPECT().Add(&scan_api_store.ScanApiStoreAddOpts{ - Url: scanApiUrl, - Token: "token", - }).Times(1) - - customSchedule := "this is not valid" - s.auditConfig.Spec.KubernetesResources.Schedule = customSchedule - - result, err := d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - image, err := s.containerImageResolver.CnspecImage("", "", false) - s.NoError(err) - - expected := CronJob(image, "", test.KubeSystemNamespaceUid, &s.auditConfig) - - created := &batchv1.CronJob{} - created.Name = expected.Name - created.Namespace = expected.Namespace - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - - s.NotEqual(created.Spec.Schedule, customSchedule) -} - func (s *DeploymentHandlerSuite) createDeploymentHandler() DeploymentHandler { return DeploymentHandler{ KubeClient: s.fakeClientBuilder.Build(), diff --git a/controllers/k8s_scan/resources.go b/controllers/k8s_scan/resources.go index a8963d7ce..cda4b97eb 100644 --- a/controllers/k8s_scan/resources.go +++ b/controllers/k8s_scan/resources.go @@ -6,10 +6,9 @@ package k8s_scan import ( "fmt" "strings" - "time" // That's the mod k8s relies on https://github.com/kubernetes/kubernetes/blob/master/go.mod#L63 - "github.com/robfig/cron/v3" + "go.mondoo.com/mondoo-operator/api/v1alpha2" "go.mondoo.com/mondoo-operator/controllers/scanapi" "go.mondoo.com/mondoo-operator/pkg/feature_flags" @@ -24,20 +23,6 @@ const CronJobNameSuffix = "-k8s-scan" func CronJob(image, integrationMrn, clusterUid string, m *v1alpha2.MondooAuditConfig) *batchv1.CronJob { ls := CronJobLabels(*m) - - cronTab := fmt.Sprintf("%d * * * *", time.Now().Add(1*time.Minute).Minute()) - if m.Spec.KubernetesResources.Schedule != "" { - _, err := cron.ParseStandard(m.Spec.KubernetesResources.Schedule) - if err != nil { - logger.Error(err, "invalid cron schedule specified in MondooAuditConfig Spec.KubernetesResources.Schedule; using default") - } else { - logger.Info("using cron custom schedule", "crontab", m.Spec.KubernetesResources.Schedule) - cronTab = m.Spec.KubernetesResources.Schedule - } - } else { - logger.Info("using default cron schedule", "crontab", cronTab) - m.Spec.KubernetesResources.Schedule = cronTab - } scanApiUrl := scanapi.ScanApiServiceUrl(*m) containerArgs := []string{ @@ -73,7 +58,7 @@ func CronJob(image, integrationMrn, clusterUid string, m *v1alpha2.MondooAuditCo Labels: ls, }, Spec: batchv1.CronJobSpec{ - Schedule: cronTab, + Schedule: m.Spec.KubernetesResources.Schedule, ConcurrencyPolicy: batchv1.ForbidConcurrent, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: ls}, diff --git a/controllers/mondooauditconfig_controller.go b/controllers/mondooauditconfig_controller.go index 9a150272a..cb90f44a8 100644 --- a/controllers/mondooauditconfig_controller.go +++ b/controllers/mondooauditconfig_controller.go @@ -5,6 +5,7 @@ package controllers import ( "context" + "fmt" "time" "github.com/go-logr/logr" @@ -166,6 +167,30 @@ func (r *MondooAuditConfigReconciler) Reconcile(ctx context.Context, req ctrl.Re } } + // Set the default cron tab if none is set + shouldUpdate := false + if mondooAuditConfig.Spec.Nodes.Enable && mondooAuditConfig.Spec.Nodes.Schedule == "" { + mondooAuditConfig.Spec.Nodes.Schedule = fmt.Sprintf("%d * * * *", time.Now().Add(1*time.Minute).Minute()) + shouldUpdate = true + } + if mondooAuditConfig.Spec.KubernetesResources.Enable && mondooAuditConfig.Spec.KubernetesResources.Schedule == "" { + mondooAuditConfig.Spec.KubernetesResources.Schedule = fmt.Sprintf("%d * * * *", time.Now().Add(1*time.Minute).Minute()) + shouldUpdate = true + } + if mondooAuditConfig.Spec.Containers.Enable && mondooAuditConfig.Spec.Containers.Schedule == "" { + cronStart := time.Now().Add(1 * time.Minute) + mondooAuditConfig.Spec.Containers.Schedule = fmt.Sprintf("%d %d * * *", cronStart.Minute(), cronStart.Hour()) + shouldUpdate = true + } + if shouldUpdate { + err := r.Update(ctx, mondooAuditConfig) + if err != nil { + log.Error(reconcileError, "failed to update MondooAuditConfig with default schedule") + return ctrl.Result{}, reconcileError + } + return ctrl.Result{Requeue: true}, nil + } + mondooAuditConfigCopy := mondooAuditConfig.DeepCopy() // Conditions might be updated before this reconciler reaches the end diff --git a/controllers/mondooauditconfig_controller_test.go b/controllers/mondooauditconfig_controller_test.go index 979ca9434..3268442e6 100644 --- a/controllers/mondooauditconfig_controller_test.go +++ b/controllers/mondooauditconfig_controller_test.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "testing" + "time" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -404,6 +405,139 @@ func TestMondooAuditConfigStatus(t *testing.T) { assert.Equalf(t, mondooAuditConfig.Status.ReconciledByOperatorVersion, version.Version, "expected versions to be equal") } +func TestMondooAuditConfig_Nodes_Schedule(t *testing.T) { + utilruntime.Must(v1alpha2.AddToScheme(scheme.Scheme)) + + testMondooServiceAccount.PrivateKey = credentials.MondooServiceAccount(t) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mClient := mockmondoo.NewMockMondooClient(mockCtrl) + testMondooClientBuilder := func(mondooclient.MondooClientOptions) (mondooclient.MondooClient, error) { + return mClient, nil + } + + mondooAuditConfig := testMondooAuditConfig() + mondooAuditConfig.Spec.Nodes.Enable = true + + fakeClient := fake.NewClientBuilder(). + WithStatusSubresource(mondooAuditConfig). + WithObjects(mondooAuditConfig). + Build() + + ctx := context.Background() + scanApiStore := scan_api_store.NewScanApiStore(ctx) + go scanApiStore.Start() + reconciler := &MondooAuditConfigReconciler{ + MondooClientBuilder: testMondooClientBuilder, + Client: fakeClient, + ScanApiStore: scanApiStore, + } + + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testMondooAuditConfigName, + Namespace: testNamespace, + }, + }) + require.NoError(t, err) + + err = fakeClient.Get(ctx, client.ObjectKeyFromObject(mondooAuditConfig), mondooAuditConfig) + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf("%d * * * *", time.Now().Add(1*time.Minute).Minute()), mondooAuditConfig.Spec.Nodes.Schedule) +} + +func TestMondooAuditConfig_KubernetesResources_Schedule(t *testing.T) { + utilruntime.Must(v1alpha2.AddToScheme(scheme.Scheme)) + + testMondooServiceAccount.PrivateKey = credentials.MondooServiceAccount(t) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mClient := mockmondoo.NewMockMondooClient(mockCtrl) + testMondooClientBuilder := func(mondooclient.MondooClientOptions) (mondooclient.MondooClient, error) { + return mClient, nil + } + + mondooAuditConfig := testMondooAuditConfig() + mondooAuditConfig.Spec.KubernetesResources.Enable = true + + fakeClient := fake.NewClientBuilder(). + WithStatusSubresource(mondooAuditConfig). + WithObjects(mondooAuditConfig). + Build() + + ctx := context.Background() + scanApiStore := scan_api_store.NewScanApiStore(ctx) + go scanApiStore.Start() + reconciler := &MondooAuditConfigReconciler{ + MondooClientBuilder: testMondooClientBuilder, + Client: fakeClient, + ScanApiStore: scanApiStore, + } + + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testMondooAuditConfigName, + Namespace: testNamespace, + }, + }) + require.NoError(t, err) + + err = fakeClient.Get(ctx, client.ObjectKeyFromObject(mondooAuditConfig), mondooAuditConfig) + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf("%d * * * *", time.Now().Add(1*time.Minute).Minute()), mondooAuditConfig.Spec.KubernetesResources.Schedule) +} + +func TestMondooAuditConfig_Containers_Schedule(t *testing.T) { + utilruntime.Must(v1alpha2.AddToScheme(scheme.Scheme)) + + testMondooServiceAccount.PrivateKey = credentials.MondooServiceAccount(t) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + mClient := mockmondoo.NewMockMondooClient(mockCtrl) + testMondooClientBuilder := func(mondooclient.MondooClientOptions) (mondooclient.MondooClient, error) { + return mClient, nil + } + + mondooAuditConfig := testMondooAuditConfig() + mondooAuditConfig.Spec.Containers.Enable = true + + fakeClient := fake.NewClientBuilder(). + WithStatusSubresource(mondooAuditConfig). + WithObjects(mondooAuditConfig). + Build() + + ctx := context.Background() + scanApiStore := scan_api_store.NewScanApiStore(ctx) + go scanApiStore.Start() + reconciler := &MondooAuditConfigReconciler{ + MondooClientBuilder: testMondooClientBuilder, + Client: fakeClient, + ScanApiStore: scanApiStore, + } + + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: testMondooAuditConfigName, + Namespace: testNamespace, + }, + }) + require.NoError(t, err) + + err = fakeClient.Get(ctx, client.ObjectKeyFromObject(mondooAuditConfig), mondooAuditConfig) + require.NoError(t, err) + + cronStart := time.Now().Add(1 * time.Minute) + assert.Equal(t, fmt.Sprintf("%d %d * * *", cronStart.Minute(), cronStart.Hour()), mondooAuditConfig.Spec.Containers.Schedule) +} + func testMondooAuditConfig() *v1alpha2.MondooAuditConfig { return &v1alpha2.MondooAuditConfig{ ObjectMeta: metav1.ObjectMeta{ diff --git a/controllers/nodes/deployment_handler.go b/controllers/nodes/deployment_handler.go index 067d6e209..0a1616378 100644 --- a/controllers/nodes/deployment_handler.go +++ b/controllers/nodes/deployment_handler.go @@ -110,6 +110,7 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { if !k8s.AreCronJobsEqual(*existing, *desired) { existing.Spec.JobTemplate = desired.Spec.JobTemplate existing.Spec.Schedule = desired.Spec.Schedule + existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy existing.SetOwnerReferences(desired.GetOwnerReferences()) // Remove any old jobs because they won't be updated when the cronjob changes @@ -287,6 +288,7 @@ func (n *DeploymentHandler) syncGCCronjob(ctx context.Context, mondooOperatorIma if !k8s.AreCronJobsEqual(*existing, *desired) { existing.Spec.JobTemplate = desired.Spec.JobTemplate + existing.Spec.ConcurrencyPolicy = desired.Spec.ConcurrencyPolicy existing.SetOwnerReferences(desired.GetOwnerReferences()) if err := n.KubeClient.Update(ctx, existing); err != nil { diff --git a/controllers/nodes/deployment_handler_test.go b/controllers/nodes/deployment_handler_test.go index 479febd26..c14a0501c 100644 --- a/controllers/nodes/deployment_handler_test.go +++ b/controllers/nodes/deployment_handler_test.go @@ -603,35 +603,6 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomSchedule() { s.Equal(created.Spec.Schedule, customSchedule) } -func (s *DeploymentHandlerSuite) TestReconcile_CreateWithCustomScheduleFail() { - s.seedNodes() - d := s.createDeploymentHandler() - mondooAuditConfig := &s.auditConfig - s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) - - customSchedule := "this is not valid" - s.auditConfig.Spec.Nodes.Schedule = customSchedule - - result, err := d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - nodes := &corev1.NodeList{} - s.NoError(d.KubeClient.List(s.ctx, nodes)) - - image, err := s.containerImageResolver.CnspecImage("", "", false) - s.NoError(err) - - expected := CronJob(image, nodes.Items[0], &s.auditConfig, false, v1alpha2.MondooOperatorConfig{}) - - created := &batchv1.CronJob{} - created.Name = expected.Name - created.Namespace = expected.Namespace - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(created), created)) - - s.NotEqual(created.Spec.Schedule, customSchedule) -} - func (s *DeploymentHandlerSuite) createDeploymentHandler() DeploymentHandler { return DeploymentHandler{ KubeClient: s.fakeClientBuilder.Build(), diff --git a/controllers/nodes/resources.go b/controllers/nodes/resources.go index 30d345e70..c6f17590a 100644 --- a/controllers/nodes/resources.go +++ b/controllers/nodes/resources.go @@ -17,7 +17,7 @@ import ( "sigs.k8s.io/yaml" // That's the mod k8s relies on https://github.com/kubernetes/kubernetes/blob/master/go.mod#L63 - "github.com/robfig/cron/v3" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" "go.mondoo.com/mondoo-operator/api/v1alpha2" "go.mondoo.com/mondoo-operator/controllers/scanapi" @@ -38,20 +38,6 @@ const ( func CronJob(image string, node corev1.Node, m *v1alpha2.MondooAuditConfig, isOpenshift bool, cfg v1alpha2.MondooOperatorConfig) *batchv1.CronJob { ls := CronJobLabels(*m) - - cronTab := fmt.Sprintf("%d * * * *", time.Now().Add(1*time.Minute).Minute()) - if m.Spec.Nodes.Schedule != "" { - _, err := cron.ParseStandard(m.Spec.Nodes.Schedule) - if err != nil { - logger.Error(err, "invalid cron schedule specified in MondooAuditConfig Spec.Nodes.Schedule; using default") - } else { - logger.Info("using cron custom schedule", "crontab", m.Spec.Nodes.Schedule) - cronTab = m.Spec.Nodes.Schedule - } - } else { - logger.Info("using default cron schedule", "crontab", cronTab) - m.Spec.Nodes.Schedule = cronTab - } unsetHostPath := corev1.HostPathUnset name := "cnspec" @@ -76,7 +62,7 @@ func CronJob(image string, node corev1.Node, m *v1alpha2.MondooAuditConfig, isOp Labels: CronJobLabels(*m), }, Spec: batchv1.CronJobSpec{ - Schedule: cronTab, + Schedule: m.Spec.Nodes.Schedule, ConcurrencyPolicy: batchv1.ForbidConcurrent, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ diff --git a/go.mod b/go.mod index 309a9dde0..9a5ff96c2 100644 --- a/go.mod +++ b/go.mod @@ -342,7 +342,6 @@ require ( github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/robfig/cron/v3 v3.0.1 github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 diff --git a/go.sum b/go.sum index 7f900d21e..ca904948f 100644 --- a/go.sum +++ b/go.sum @@ -918,8 +918,6 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= diff --git a/pkg/utils/k8s/equality.go b/pkg/utils/k8s/equality.go index 6e894e3b5..b0fa8765f 100644 --- a/pkg/utils/k8s/equality.go +++ b/pkg/utils/k8s/equality.go @@ -67,23 +67,60 @@ func AreServicesEqual(a, b corev1.Service) bool { func AreCronJobsEqual(a, b batchv1.CronJob) bool { aPodSpec := a.Spec.JobTemplate.Spec.Template.Spec bPodSpec := b.Spec.JobTemplate.Spec.Template.Spec - return len(aPodSpec.Containers) == len(bPodSpec.Containers) && - aPodSpec.ServiceAccountName == bPodSpec.ServiceAccountName && - reflect.DeepEqual(aPodSpec.Tolerations, bPodSpec.Tolerations) && - reflect.DeepEqual(aPodSpec.NodeName, bPodSpec.NodeName) && - reflect.DeepEqual(aPodSpec.Containers[0].Image, bPodSpec.Containers[0].Image) && - reflect.DeepEqual(aPodSpec.Containers[0].Command, bPodSpec.Containers[0].Command) && - reflect.DeepEqual(aPodSpec.Containers[0].Args, bPodSpec.Containers[0].Args) && - reflect.DeepEqual(aPodSpec.Containers[0].VolumeMounts, bPodSpec.Containers[0].VolumeMounts) && - AreEnvVarsEqual(aPodSpec.Containers[0].Env, bPodSpec.Containers[0].Env) && - AreResouceRequirementsEqual(aPodSpec.Containers[0].Resources, bPodSpec.Containers[0].Resources) && - AreSecurityContextsEqual(aPodSpec.Containers[0].SecurityContext, bPodSpec.Containers[0].SecurityContext) && - reflect.DeepEqual(aPodSpec.Volumes, bPodSpec.Volumes) && - reflect.DeepEqual(a.Spec.SuccessfulJobsHistoryLimit, b.Spec.SuccessfulJobsHistoryLimit) && - reflect.DeepEqual(a.Spec.ConcurrencyPolicy, b.Spec.ConcurrencyPolicy) && - a.Spec.Schedule == b.Spec.Schedule && - reflect.DeepEqual(a.Spec.FailedJobsHistoryLimit, b.Spec.FailedJobsHistoryLimit) && - reflect.DeepEqual(a.GetOwnerReferences(), b.GetOwnerReferences()) + + if len(aPodSpec.Containers) != len(bPodSpec.Containers) { + return false + } + if aPodSpec.ServiceAccountName != bPodSpec.ServiceAccountName { + return false + } + if !reflect.DeepEqual(aPodSpec.Tolerations, bPodSpec.Tolerations) { + return false + } + if !reflect.DeepEqual(aPodSpec.NodeName, bPodSpec.NodeName) { + return false + } + if !reflect.DeepEqual(aPodSpec.Containers[0].Image, bPodSpec.Containers[0].Image) { + return false + } + if !reflect.DeepEqual(aPodSpec.Containers[0].Command, bPodSpec.Containers[0].Command) { + return false + } + if !reflect.DeepEqual(aPodSpec.Containers[0].Args, bPodSpec.Containers[0].Args) { + return false + } + if !reflect.DeepEqual(aPodSpec.Containers[0].VolumeMounts, bPodSpec.Containers[0].VolumeMounts) { + return false + } + if !AreEnvVarsEqual(aPodSpec.Containers[0].Env, bPodSpec.Containers[0].Env) { + return false + } + if !AreResouceRequirementsEqual(aPodSpec.Containers[0].Resources, bPodSpec.Containers[0].Resources) { + return false + } + if !AreSecurityContextsEqual(aPodSpec.Containers[0].SecurityContext, bPodSpec.Containers[0].SecurityContext) { + return false + } + if !reflect.DeepEqual(aPodSpec.Volumes, bPodSpec.Volumes) { + return false + } + if !reflect.DeepEqual(a.Spec.SuccessfulJobsHistoryLimit, b.Spec.SuccessfulJobsHistoryLimit) { + return false + } + if a.Spec.ConcurrencyPolicy != b.Spec.ConcurrencyPolicy { + return false + } + if a.Spec.Schedule != b.Spec.Schedule { + return false + } + if !reflect.DeepEqual(a.Spec.FailedJobsHistoryLimit, b.Spec.FailedJobsHistoryLimit) { + return false + } + if !reflect.DeepEqual(a.GetOwnerReferences(), b.GetOwnerReferences()) { + return false + } + + return true } // AreResouceRequirementsEqual returns a value indicating whether 2 resource requirements are equal. diff --git a/tests/framework/utils/audit_config.go b/tests/framework/utils/audit_config.go index 4e6fb3573..9717fc6f7 100644 --- a/tests/framework/utils/audit_config.go +++ b/tests/framework/utils/audit_config.go @@ -69,9 +69,9 @@ func DefaultAuditConfig(ns string, workloads, containers, nodes, admission bool) }, Spec: mondoov2.MondooAuditConfigSpec{ MondooCredsSecretRef: corev1.LocalObjectReference{Name: MondooClientSecret}, - KubernetesResources: mondoov2.KubernetesResources{Enable: workloads}, - Containers: mondoov2.Containers{Enable: containers}, - Nodes: mondoov2.Nodes{Enable: nodes}, + KubernetesResources: mondoov2.KubernetesResources{Enable: workloads, Schedule: "0 * * * *"}, + Containers: mondoov2.Containers{Enable: containers, Schedule: "0 0 * * *"}, + Nodes: mondoov2.Nodes{Enable: nodes, Schedule: "0 * * * *"}, Admission: mondoov2.Admission{Enable: admission}, Scanner: mondoov2.Scanner{ Image: mondoov2.Image{Name: "test", Tag: "latest"},