From d5614fff2eaeddf7860164938e1bd6b6c9dd9ee8 Mon Sep 17 00:00:00 2001 From: Yi Rae Kim Date: Thu, 23 Nov 2023 12:29:44 -0500 Subject: [PATCH] PR review -1 Signed-off-by: Yi Rae Kim --- .github/workflows/ci_tests.yaml | 2 +- .github/workflows/olm_tests.yaml | 10 +- Makefile | 11 +- api/v1alpha1/gatekeeper_types.go | 2 + .../operator.gatekeeper.sh_gatekeepers.yaml | 4 + .../operator.gatekeeper.sh_gatekeepers.yaml | 4 + controllers/constraintstatus_controller.go | 260 +++++------------- controllers/cps_controller_helper.go | 228 ++++++++++----- controllers/discovery_storage.go | 140 ++++++++++ controllers/gatekeeper_controller.go | 44 +-- docs/upgrading-gatekeeper.md | 10 +- main.go | 28 +- test/e2e/case1_audit_from_cache_test.go | 52 +++- test/e2e/e2e_suite_test.go | 16 ++ 14 files changed, 493 insertions(+), 318 deletions(-) create mode 100644 controllers/discovery_storage.go diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index d267a0aae..c9eb73ec6 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -92,7 +92,7 @@ jobs: echo "::group::Operator Logs" cat operator.log echo "::endgroup::" - + gatekeeper-e2e-tests: name: Run gatekeeper e2e tests runs-on: ubuntu-latest diff --git a/.github/workflows/olm_tests.yaml b/.github/workflows/olm_tests.yaml index 086bf4b0e..9e4b68908 100644 --- a/.github/workflows/olm_tests.yaml +++ b/.github/workflows/olm_tests.yaml @@ -59,13 +59,13 @@ jobs: echo "Waiting for operator deployment"; \ sleep 2; \ done - kubectl -n gatekeeper-system wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s + kubectl -n mygatekeeper wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s - name: E2E Tests run: | - kubectl -n gatekeeper-system logs deployment/gatekeeper-operator-controller -c manager -f > operator.log & - make test-e2e NAMESPACE=gatekeeper-system - kubectl delete --wait namespace gatekeeper-system + kubectl -n mygatekeeper logs deployment/gatekeeper-operator-controller -c manager -f > operator.log & + make test-e2e NAMESPACE=mygatekeeper + kubectl delete --wait namespace mygatekeeper - name: Debug if: ${{ failure() }} @@ -75,5 +75,5 @@ jobs: echo "::endgroup::" echo "::group::Deployments" - kubectl -n gatekeeper-system get deployments -o yaml + kubectl -n mygatekeeper get deployments -o yaml echo "::endgroup::" diff --git a/Makefile b/Makefile index ae62e27f4..57669adce 100644 --- a/Makefile +++ b/Makefile @@ -182,22 +182,15 @@ download-binaries: kustomize go-bindata envtest controller-gen rm -rf bats-core-${BATS_VERSION} v${BATS_VERSION}.tar.gz; \ fi -DEV_IMG=localhost:5000/gatekeeper-operator:dev .PHONY: kind-bootstrap-cluster kind-bootstrap-cluster: test-cluster install dev-build kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/audit=privileged kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/enforce=privileged kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/warn=privileged - kind load docker-image $(DEV_IMG) - $(MAKE) deploy-ci NAMESPACE=$(NAMESPACE) IMG=$(DEV_IMG) + kind load docker-image $(IMG) + $(MAKE) deploy-ci NAMESPACE=$(NAMESPACE) IMG=$(IMG) kubectl -n $(NAMESPACE) wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s -.PHONY: dev-build -dev-build: export DOCKER_DEFAULT_PLATFORM=linux/amd64 -dev-build: ## Build docker image with the manager for Mac user - $(DOCKER) build --build-arg GOOS=linux --build-arg GOARCH=amd64 --build-arg LDFLAGS=${LDFLAGS} -t ${DEV_IMG} . -##@ Build - .PHONY: build build: generate fmt vet ## Build manager binary. CGO_ENABLED=1 GOFLAGS=$(GOFLAGS) go build -ldflags $(LDFLAGS) -o bin/manager main.go diff --git a/api/v1alpha1/gatekeeper_types.go b/api/v1alpha1/gatekeeper_types.go index ee7458d39..a77c8aeea 100644 --- a/api/v1alpha1/gatekeeper_types.go +++ b/api/v1alpha1/gatekeeper_types.go @@ -92,6 +92,8 @@ type AuditConfig struct { // +optional ConstraintViolationLimit *uint64 `json:"constraintViolationLimit,omitempty"` // +optional + // Setting Automatic lets the Gatekeeper operator manage syncOnly in the config resource. + // It is not recommended to use Automatic when using referential constraints since those are not detected. AuditFromCache *AuditFromCacheMode `json:"auditFromCache,omitempty"` // +kubebuilder:validation:Minimum:=0 // +optional diff --git a/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml b/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml index 1b0cfe1d9..08e0edb42 100644 --- a/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml +++ b/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml @@ -864,6 +864,10 @@ spec: minimum: 0 type: integer auditFromCache: + description: Setting Automatic lets the Gatekeeper operator manage + syncOnly in the config resource. It is not recommended to use + Automatic when using referential constraints since those are + not detected. enum: - Enabled - Disabled diff --git a/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml b/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml index 49574946a..c7f323dd7 100644 --- a/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml +++ b/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml @@ -864,6 +864,10 @@ spec: minimum: 0 type: integer auditFromCache: + description: Setting Automatic lets the Gatekeeper operator manage + syncOnly in the config resource. It is not recommended to use + Automatic when using referential constraints since those are + not detected. enum: - Enabled - Disabled diff --git a/controllers/constraintstatus_controller.go b/controllers/constraintstatus_controller.go index 6a45dfc18..480600486 100644 --- a/controllers/constraintstatus_controller.go +++ b/controllers/constraintstatus_controller.go @@ -2,9 +2,8 @@ package controllers import ( "context" - "fmt" "reflect" - "strings" + "sort" "time" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" @@ -16,44 +15,34 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" "k8s.io/utils/strings/slices" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var ( - ControllerName = "constraintstatus_reconciler" - ErrNotFoundDiscovery = errors.New("There is no matched apiGroup, version or kind") -) - -type discoveryInfo struct { - apiResourceList []*metav1.APIResourceList - discoveryLastRefreshed time.Time -} +var ControllerName = "constraintstatus_reconciler" -type ConstraintStatusReconciler struct { +type ConstraintPodStatusReconciler struct { client.Client - Scheme *runtime.Scheme - Log logr.Logger - DynamicClient dynamic.Interface - ClientSet *kubernetes.Clientset - Namespace string + Scheme *runtime.Scheme + Log logr.Logger + DynamicClient *dynamic.DynamicClient + Namespace string + // This includes api-resources list and it finds a missing version of resources. + DiscoveryStorage *DiscoveryStorage + // key = constraintPodName ConstraintToSyncOnly map[string][]v1alpha1.SyncOnlyEntry - discoveryInfo } // SetupWithManager sets up the controller with the Manager. -func (r *ConstraintStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ConstraintPodStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: int(1)}). Named(ControllerName). @@ -84,9 +73,10 @@ func (r *ConstraintStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -// User set gatekeeper.spec.audit.auditFromCache to Automatic, this reconcile function -// collect requested constraints and add found kinds which is used in constraint to config.spec.sync.syncOnly -func (r *ConstraintStatusReconciler) Reconcile(ctx context.Context, +// When spec.audit.auditFromCache is set to Automatic, +// Reconcile analyzes the constraint associated with the ConstraintPodStatus reconcile request. +// The kinds used in the constraint's match configuration is used to configure the syncOnly option. +func (r *ConstraintPodStatusReconciler) Reconcile(ctx context.Context, request reconcile.Request, ) (reconcile.Result, error) { log := r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) @@ -101,6 +91,12 @@ func (r *ConstraintStatusReconciler) Reconcile(ctx context.Context, Name: "gatekeeper", }, gatekeeper) if err != nil { + if apierrors.IsNotFound(err) { + log.Error(err, "Gatekeeper resource is not found") + + return reconcile.Result{}, nil + } + return reconcile.Result{}, err } @@ -111,31 +107,26 @@ func (r *ConstraintStatusReconciler) Reconcile(ctx context.Context, Name: "config", }, config) - if apierrors.IsNotFound(err) { - config = &v1alpha1.Config{ - ObjectMeta: metav1.ObjectMeta{ - Name: "config", - Namespace: r.Namespace, - }, - } + if err != nil { + if apierrors.IsNotFound(err) { + config = &v1alpha1.Config{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + Namespace: r.Namespace, + }, + } - err = controllerutil.SetOwnerReference(gatekeeper, config, r.Scheme) - if err != nil { - return reconcile.Result{}, err - } + createErr := r.Create(ctx, config) + if createErr != nil { + log.Error(err, "Fail to create the Gatekeeper Config object, will retry.") - err = r.Create(ctx, config) - if err != nil { - log.Error(err, "Fail to Create Config resource, requeue") + return reconcile.Result{}, createErr + } + log.Info("The Gatekeeper Config object was created") + } else { return reconcile.Result{}, err } - - log.Info("The Gatekeeper's Config resource is just created") - } - - if err != nil && !apierrors.IsNotFound(err) { - return reconcile.Result{}, err } constraintPodStatus := &v1beta1.ConstraintPodStatus{} @@ -156,18 +147,7 @@ func (r *ConstraintStatusReconciler) Reconcile(ctx context.Context, return reconcile.Result{}, err } - labels := constraintPodStatus.GetLabels() - - constraintKind := labels["internal.gatekeeper.sh/constraint-kind"] - constraintName := labels["internal.gatekeeper.sh/constraint-name"] - - constraintGVR := schema.GroupVersionResource{ - Group: "constraints.gatekeeper.sh", - Version: "v1beta1", - Resource: strings.ToLower(constraintKind), - } - - constraint, err := r.DynamicClient.Resource(constraintGVR).Get(ctx, constraintName, metav1.GetOptions{}) + constraint, constraintName, err := getConstraint(ctx, *constraintPodStatus, r.DynamicClient) if err != nil { if apierrors.IsNotFound(err) { r.Log.Info("The Constraint was not found", "constraintName:", constraintName) @@ -180,15 +160,20 @@ func (r *ConstraintStatusReconciler) Reconcile(ctx context.Context, constraintMatchKinds, _, err := unstructured.NestedSlice(constraint.Object, "spec", "match", "kinds") if err != nil { - r.Log.V(1).Info("There is no provided kinds in contsraint", "constraintName:", constraintName) + r.Log.V(1).Info("There are no provided kinds in the Constraint", "constraintName:", constraintName) + + err = r.handleDeleteEvent(ctx, request.Name, config) + if err != nil { + return reconcile.Result{}, err + } return reconcile.Result{}, nil //nolint:nilerr } - constraintSyncOnlyEntries, err := r.getSyncOnlys(constraintMatchKinds) + constraintSyncOnlyEntries, err := r.DiscoveryStorage.getSyncOnlys(constraintMatchKinds) if err != nil { if errors.Is(err, ErrNotFoundDiscovery) { - r.Log.V(1).Info("Cannot find matched discovery. Requeue after 3 mins") + r.Log.V(1).Info("Cannot find matched discovery. Requeue after 10 secs") requeueTime = time.Second * 10 } else { @@ -198,143 +183,29 @@ func (r *ConstraintStatusReconciler) Reconcile(ctx context.Context, } } - r.ConstraintToSyncOnly[request.Name] = *constraintSyncOnlyEntries + r.ConstraintToSyncOnly[request.Name] = constraintSyncOnlyEntries - updatedSyncOnly := r.getUniqSyncOnly() + uniqSyncOnly := r.getUniqSyncOnly() - if reflect.DeepEqual(updatedSyncOnly, config.Spec.Sync.SyncOnly) { - r.Log.V(1).Info("There is no changes detected. Cancel Update") + if reflect.DeepEqual(uniqSyncOnly, config.Spec.Sync.SyncOnly) { + r.Log.V(1).Info("There are no changes detected. Cancel Updating") return reconcile.Result{RequeueAfter: requeueTime}, nil } - config.Spec.Sync.SyncOnly = updatedSyncOnly + config.Spec.Sync.SyncOnly = uniqSyncOnly err = r.Update(ctx, config, &client.UpdateOptions{}) if err != nil { - return reconcile.Result{}, err - } - - return reconcile.Result{RequeueAfter: requeueTime}, nil -} + log.Error(err, "unable to update config syncOnly") -func (r *ConstraintStatusReconciler) getSyncOnlys(constraintMatchKinds []interface{}) ( - *[]v1alpha1.SyncOnlyEntry, error, -) { - syncOnlys := []v1alpha1.SyncOnlyEntry{} - - var finalErr error - - for _, match := range constraintMatchKinds { - newKind, ok := match.(map[string]interface{}) - if !ok { - continue - } - - apiGroups, ok := newKind["apiGroups"].([]interface{}) - if !ok { - continue - } - - kindsInKinds, ok := newKind["kinds"].([]interface{}) - if !ok { - continue - } - - for _, apiGroup := range apiGroups { - for _, kind := range kindsInKinds { - version, err := r.getAPIVersion(kind.(string), apiGroup.(string), false) - if err != nil { - r.Log.V(1).Info("getAPIVersion has error but continue") - - if finalErr == nil { - finalErr = err - } else { - // Accumulate error - finalErr = fmt.Errorf("%w; %w", finalErr, err) - } - - continue - } - - syncOnlys = append(syncOnlys, v1alpha1.SyncOnlyEntry{ - Group: apiGroup.(string), - Version: version, - Kind: kind.(string), - }) - } - } - } - - return &syncOnlys, finalErr -} - -// Find the version that the constraint does not provided. -// Constraint only provide kind and apiGroup. However the config resource need version -func (r *ConstraintStatusReconciler) getAPIVersion(kind string, apiGroup string, skipRefresh bool) (string, error) { - // Cool time(10 min) to refresh discoveries - if len(r.apiResourceList) == 0 || - r.discoveryLastRefreshed.Add(time.Minute*10).Before(time.Now()) { - err := r.refreshDiscoveryInfo() - if err != nil { - return "", err - } - - // The discovery is just refeshed so skip another refesh - skipRefresh = true - } - - for _, resc := range r.apiResourceList { - groupVerison, err := schema.ParseGroupVersion(resc.GroupVersion) - if err != nil { - r.Log.Error(err, "Cannot parse the group and version in getApiVersion ", "GroupVersion:", resc.GroupVersion) - - continue - } - - group := groupVerison.Group - version := groupVerison.Version - // Consider groupversion == v1 or groupversion == app1/v1 - for _, apiResource := range resc.APIResources { - if apiResource.Kind == kind && group == apiGroup { - return version, nil - } - } - } - - if !skipRefresh { - // Get new discoveryInfo, when any resource is not found - err := r.refreshDiscoveryInfo() - if err != nil { - return "", err - } - - // Retry one more time after refresh the discovery - return r.getAPIVersion(kind, apiGroup, true) - } - - return "", ErrNotFoundDiscovery -} - -// Retrieve all groups and versions to add in config sync -// Constraints present only kind and group so this function helps to find the version -func (r *ConstraintStatusReconciler) refreshDiscoveryInfo() error { - r.discoveryLastRefreshed = time.Now() - - discoveryClient := r.ClientSet.Discovery() - - apiList, err := discoveryClient.ServerPreferredResources() - if err != nil { - return err + return reconcile.Result{RequeueAfter: requeueTime}, nil } - // Save fetched discovery at apiResourceList - r.apiResourceList = apiList - - return nil + return reconcile.Result{RequeueAfter: requeueTime}, nil } -func (r *ConstraintStatusReconciler) getUniqSyncOnly() []v1alpha1.SyncOnlyEntry { +func (r *ConstraintPodStatusReconciler) getUniqSyncOnly() []v1alpha1.SyncOnlyEntry { syncOnlySet := map[v1alpha1.SyncOnlyEntry]bool{} // Add to table for unique filtering for _, syncEntries := range r.ConstraintToSyncOnly { @@ -348,22 +219,39 @@ func (r *ConstraintStatusReconciler) getUniqSyncOnly() []v1alpha1.SyncOnlyEntry syncOnlys = append(syncOnlys, key) } + // Sort syncOnly + sort.Slice(syncOnlys, func(i, j int) bool { + stringi := syncOnlys[i].Group + " " + syncOnlys[i].Kind + " " + syncOnlys[i].Version + stringj := syncOnlys[j].Group + " " + syncOnlys[j].Kind + " " + syncOnlys[j].Version + + return stringi < stringj + }) + return syncOnlys } -// This function is called when constraintPodStatus deleted event. -// Delete constraintPodstatus key and refresh r.constraintMap with current constraint -func (r *ConstraintStatusReconciler) handleDeleteEvent( +// handleDeleteEvent is called when a ConstraintPodStatus object is deleted. +// It deletes ConstraintPodStatus' key in the `ConstraintToSyncOnly` map and +// recalculates the appropriate SyncOnly entries. +func (r *ConstraintPodStatusReconciler) handleDeleteEvent( ctx context.Context, cpsName string, config *v1alpha1.Config, ) error { delete(r.ConstraintToSyncOnly, cpsName) updatedSyncOnly := r.getUniqSyncOnly() + if reflect.DeepEqual(updatedSyncOnly, config.Spec.Sync.SyncOnly) { + r.Log.V(1).Info("There are no changes detected. Will not update.") + + return nil + } + config.Spec.Sync.SyncOnly = updatedSyncOnly err := r.Update(ctx, config, &client.UpdateOptions{}) if err != nil { + r.Log.Error(err, "unable to update config syncOnly") + return err } diff --git a/controllers/cps_controller_helper.go b/controllers/cps_controller_helper.go index e34f69275..80472b36c 100644 --- a/controllers/cps_controller_helper.go +++ b/controllers/cps_controller_helper.go @@ -2,57 +2,71 @@ package controllers import ( "context" + "strings" + "time" + "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" gkv1alpha1 "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" gkv1beta1 "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" + "k8s.io/utils/strings/slices" ctrl "sigs.k8s.io/controller-runtime" cacheRuntime "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) -var setupLog = ctrl.Log.WithName("setup") +var ( + setupLog = ctrl.Log.WithName("setup") + errCrdNotReady = errors.New("CRD is not ready") +) -func (r *GatekeeperReconciler) addConstraintController(mainCtx context.Context, +func (r *GatekeeperReconciler) handleCPSController(mainCtx context.Context, gatekeeper *operatorv1alpha1.Gatekeeper, ) error { - ready, err := checkCPScontrollerAvailable(mainCtx, gatekeeper, r.DynamicClient) + isCRDReady, err := checkCPSCrdAvailable(mainCtx, r.DynamicClient) if err != nil { return err } - // auditFromCache is not Automatic, Delete existing ConstraintPodStatus Controller - if !ready { - if r.childCtx != nil { - r.DeleteCPSController() + if !isCRDReady { + return errCrdNotReady + } + + isAutomaticOn := checkCPScontrollerPrereqs(gatekeeper) + + // auditFromCache is not set to Automatic, so stop the existing ConstraintPodStatus controller + if !isAutomaticOn { + if r.isCPSCtrlRunning { + r.StopCPSController() } return nil } - if r.isCpsCtlInstalled { + if r.isCPSCtrlRunning { return nil } - r.childCtx, r.childCtxCancel = context.WithCancel(mainCtx) + var cpsCtrlCtx context.Context - cpsMgr, err := ctrl.NewManager(r.Cfg, ctrl.Options{ + cpsCtrlCtx, r.cpsCtrlCtxCancel = context.WithCancel(mainCtx) + + cpsMgr, err := ctrl.NewManager(r.KubeConfig, ctrl.Options{ Scheme: r.Scheme, Metrics: server.Options{ BindAddress: ":8083", }, - HealthProbeBindAddress: ":8084", - LeaderElection: r.EnableLeaderElection, - LeaderElectionID: "5ff985ccc.constraintstatuspod.gatekeeper.sh", + LeaderElection: r.EnableLeaderElection, + LeaderElectionID: "5ff985ccc.constraintstatuspod.gatekeeper.sh", Cache: cacheRuntime.Options{ ByObject: map[client.Object]cacheRuntime.ByObject{ &gkv1beta1.ConstraintPodStatus{}: { @@ -79,65 +93,132 @@ func (r *GatekeeperReconciler) addConstraintController(mainCtx context.Context, }, }) if err != nil { - setupLog.Error(err, "Failed to setup NewManager for ConstraintPodstatus contoller") + setupLog.Error(err, "Failed to setup NewManager for ConstraintPodStatus contoller") + + return err + } + + constraintToSyncOnly := r.getConstraintToSyncOnly(mainCtx) + + if err := (&ConstraintPodStatusReconciler{ + Scheme: r.Scheme, + Client: cpsMgr.GetClient(), + DynamicClient: r.DynamicClient, + Log: ctrl.Log.WithName("ConstraintPodStatus"), + Namespace: r.Namespace, + ConstraintToSyncOnly: constraintToSyncOnly, + DiscoveryStorage: r.DiscoveryStorage, + }).SetupWithManager(cpsMgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ConstraintPodStatus") return err } - // Use another go routine for gatekeeper controller + r.isCPSCtrlRunning = true + + // Use another go routine for the ConstraintPodStatus controller go func() { - err = r.addConstraintStatusManager(cpsMgr) + err := cpsMgr.Start(cpsCtrlCtx) if err != nil { - r.isCpsCtlInstalled = false + setupLog.Error(err, "A problem running ConstraintPodStatus manager. Triggering a reconcile to restart it.") + } - setupLog.Error(err, "unable to start ConstraintPodStatus manager") + defer r.cpsCtrlCtxCancel() + + r.cpsCtrlCtxCancel = nil + r.isCPSCtrlRunning = false + + // In case it is not an error and a child context is cancelled + // because the auditFromCache changed from Automatic, + // sending this channel avoids encountering a race condition. + // If the error happens when cpsMgr start, it will retry to start cpsMgr + r.ManualReconcileTrigger <- event.GenericEvent{ + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": v1alpha1.GroupVersion.String(), + "kind": "Gatekeeper", + "metadata": map[string]interface{}{ + "name": defaultGatekeeperCrName, + }, + }, + }, } }() return nil } -func (r *GatekeeperReconciler) addConstraintStatusManager(mgr manager.Manager, -) error { - if err := (&ConstraintStatusReconciler{ - Scheme: r.Scheme, - Client: mgr.GetClient(), - ClientSet: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - DynamicClient: r.DynamicClient, - Log: ctrl.Log.WithName("controllers").WithName("ConstraintPodStatus"), - Namespace: r.Namespace, - ConstraintToSyncOnly: map[string][]gkv1alpha1.SyncOnlyEntry{}, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ConstraintPodStatus") +func (r *GatekeeperReconciler) getConstraintToSyncOnly(mainCtx context.Context) map[string][]gkv1alpha1.SyncOnlyEntry { + cpsList := &v1beta1.ConstraintPodStatusList{} - return err + err := r.Client.List(mainCtx, cpsList, &client.ListOptions{}) + if err != nil { + return nil } - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up health check") + // key = ConstraintPodStatus Name + constraintToSyncOnly := map[string][]gkv1alpha1.SyncOnlyEntry{} + // Add to table for unique filtering + for _, cps := range cpsList.Items { + // Pick only Audit ConstraintPodStatus + if !slices.Contains(cps.Status.Operations, "audit") { + continue + } - return err - } + constraint, constraintName, err := getConstraint(mainCtx, cps, r.DynamicClient) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("The Constraint was not found", "constraintName:", constraintName) - if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up ready check") + return nil + } - return err + return nil + } + + constraintMatchKinds, _, err := unstructured.NestedSlice(constraint.Object, "spec", "match", "kinds") + if err != nil { + r.Log.V(1).Info("There are no provided kinds in the Contsraint", "constraintName:", constraintName) + + return nil + } + + constraintSyncOnlyEntries, err := r.DiscoveryStorage.getSyncOnlys(constraintMatchKinds) + if err != nil { + // No need to retry. The ConstraintPodStatus_controller will sort out + continue + } + + constraintToSyncOnly[cps.Name] = constraintSyncOnlyEntries } - r.isCpsCtlInstalled = true + return constraintToSyncOnly +} - if err := mgr.Start(r.childCtx); err != nil { - setupLog.Error(err, "problem running ConstraintStatus manager") +// Helper function to get constraint from ConstraintPodStatus +func getConstraint(ctx context.Context, cps gkv1beta1.ConstraintPodStatus, + dynamicClient *dynamic.DynamicClient, +) (*unstructured.Unstructured, string, error) { + labels := cps.GetLabels() + constraintKind := labels["internal.gatekeeper.sh/constraint-kind"] + constraintName := labels["internal.gatekeeper.sh/constraint-name"] + + constraintGVR := schema.GroupVersionResource{ + Group: "constraints.gatekeeper.sh", + Version: "v1beta1", + Resource: strings.ToLower(constraintKind), + } - return err + constraint, err := dynamicClient.Resource(constraintGVR).Get(ctx, constraintName, metav1.GetOptions{}) + if err != nil { + return nil, constraintName, err } - return nil + return constraint, constraintName, nil } -// Check constraintPodStatus crd status is "True" and type is "NamesAccepted" -func checkCPSCrdAvailable(mainCtx context.Context, dynamicClient *dynamic.DynamicClient) bool { +// Check ConstraintPodStatus Crd status is "True" and type is "NamesAccepted" +func checkCPSCrdAvailable(mainCtx context.Context, dynamicClient *dynamic.DynamicClient) (bool, error) { crdGVR := schema.GroupVersionResource{ Group: "apiextensions.k8s.io", Version: "v1", @@ -147,16 +228,16 @@ func checkCPSCrdAvailable(mainCtx context.Context, dynamicClient *dynamic.Dynami crd, err := dynamicClient.Resource(crdGVR). Get(mainCtx, "constraintpodstatuses.status.gatekeeper.sh", metav1.GetOptions{}) if err != nil { - setupLog.V(1).Info("Cannot fetch ConstraintPodStatus crd") + setupLog.V(1).Info("Cannot fetch ConstraintPodStatus CRD") - return false + return false, err } conditions, ok, _ := unstructured.NestedSlice(crd.Object, "status", "conditions") if !ok { setupLog.V(1).Info("Cannot parse ConstraintPodStatus status conditions") - return false + return false, errors.New("Failed to parse status, conditions") } for _, condition := range conditions { @@ -166,52 +247,47 @@ func checkCPSCrdAvailable(mainCtx context.Context, dynamicClient *dynamic.Dynami if !ok { setupLog.V(1).Info("Cannot parse ConstraintPodStatus conditions status") - return false + return false, errors.New("Failed to parse status string") } conditionType, ok := parsedCondition["type"].(string) if !ok { setupLog.V(1).Info("Cannot parse ConstraintPodStatus conditions type") - return false + return false, errors.New("Failed to parse ConstraintPodStatus conditions type") } if conditionType == "NamesAccepted" && status == "True" { - setupLog.V(1).Info("ConstraintPodStatus crd is ready") + setupLog.V(1).Info("ConstraintPodStatus CRD is ready") - return true + return true, nil } } - setupLog.V(1).Info("ConstraintPodStatus crd is not ready yet") + setupLog.V(1).Info("ConstraintPodStatus CRD is not ready yet") - return false + return false, nil } // Check gatekeeper auditFromCache=Automatic -// Check constraintpodstatuses.status.gatekeeper.sh crd status has "NamesAccepted" condition that is true. -func checkCPScontrollerAvailable(mainCtx context.Context, - gatekeeper *operatorv1alpha1.Gatekeeper, dynamicClient *dynamic.DynamicClient, -) (bool, error) { - if gatekeeper.Spec.Audit == nil { - return false, nil - } +// Check constraintpodstatuses.status.gatekeeper.sh CRD status has "NamesAccepted" condition that is true. +func checkCPScontrollerPrereqs(gatekeeper *operatorv1alpha1.Gatekeeper) bool { + return gatekeeper.Spec.Audit != nil && gatekeeper.Spec.Audit.AuditFromCache != nil && + *gatekeeper.Spec.Audit.AuditFromCache == operatorv1alpha1.AuditFromCacheAutomatic +} - if gatekeeper.Spec.Audit.AuditFromCache == nil || - *gatekeeper.Spec.Audit.AuditFromCache != operatorv1alpha1.AuditFromCacheAutomatic { - return false, nil +func (r *GatekeeperReconciler) StopCPSController() { + if r.cpsCtrlCtxCancel == nil { + return } - if !checkCPSCrdAvailable(mainCtx, dynamicClient) { - return false, errors.New("ConstraintPodStatus Crd is not ready yet") - } + setupLog.Info("Gatekeeper auditFromCache unset from Automatic. Stopping the ConstraintPodStatus manager.") - return true, nil -} + r.cpsCtrlCtxCancel() -func (r *GatekeeperReconciler) DeleteCPSController() { - r.childCtxCancel() - r.isCpsCtlInstalled = false + for r.isCPSCtrlRunning { + setupLog.Info("Waiting for the ConstraintPodStatus manager to shutdown") - setupLog.Info("Gatekeeper deleted. Close ConstraintPodStatus manager") + time.Sleep(1 * time.Second) + } } diff --git a/controllers/discovery_storage.go b/controllers/discovery_storage.go new file mode 100644 index 000000000..872faf5c5 --- /dev/null +++ b/controllers/discovery_storage.go @@ -0,0 +1,140 @@ +package controllers + +import ( + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" +) + +var ErrNotFoundDiscovery = errors.New("there are no matched apiGroup, version or kind") + +type DiscoveryStorage struct { + apiResourceList []*metav1.APIResourceList + discoveryLastRefreshed time.Time + ClientSet *kubernetes.Clientset + Log logr.Logger +} + +func (r *DiscoveryStorage) getSyncOnlys(constraintMatchKinds []interface{}) ( + []v1alpha1.SyncOnlyEntry, error, +) { + syncOnlys := []v1alpha1.SyncOnlyEntry{} + + var finalErr error + + for _, match := range constraintMatchKinds { + newKind, ok := match.(map[string]interface{}) + if !ok { + continue + } + + apiGroups, ok := newKind["apiGroups"].([]interface{}) + if !ok { + continue + } + + kindsInKinds, ok := newKind["kinds"].([]interface{}) + if !ok { + continue + } + + for _, apiGroup := range apiGroups { + for _, kind := range kindsInKinds { + version, err := r.getAPIVersion(kind.(string), apiGroup.(string), false, r.ClientSet) + if err != nil { + r.Log.V(1).Info("getAPIVersion has error but continue") + + if finalErr == nil { + finalErr = err + } else { + // Accumulate error + finalErr = fmt.Errorf("%w; %w", finalErr, err) + } + + continue + } + + syncOnlys = append(syncOnlys, v1alpha1.SyncOnlyEntry{ + Group: apiGroup.(string), + Version: version, + Kind: kind.(string), + }) + } + } + } + + return syncOnlys, finalErr +} + +// getAPIVersion gets the server preferred API version for the constraint's match kind entry +// Constraint only provide kind and apiGroup. However the config resource need version +func (r *DiscoveryStorage) getAPIVersion(kind string, + apiGroup string, skipRefresh bool, clientSet *kubernetes.Clientset, +) (string, error) { + // Cool time(10 min) to refresh discoveries + if len(r.apiResourceList) == 0 || + r.discoveryLastRefreshed.Add(time.Minute*10).Before(time.Now()) { + err := r.refreshDiscoveryInfo() + if err != nil { + return "", err + } + + // The discovery is just refeshed so skip another refesh + skipRefresh = true + } + + for _, resc := range r.apiResourceList { + groupVerison, err := schema.ParseGroupVersion(resc.GroupVersion) + if err != nil { + r.Log.Error(err, "Cannot parse the group and version in getApiVersion ", "GroupVersion:", resc.GroupVersion) + + continue + } + + group := groupVerison.Group + version := groupVerison.Version + // Consider groupversion == v1 or groupversion == app1/v1 + for _, apiResource := range resc.APIResources { + if apiResource.Kind == kind && group == apiGroup { + return version, nil + } + } + } + + if !skipRefresh { + // Get new discoveryInfo, when any resource is not found + err := r.refreshDiscoveryInfo() + if err != nil { + return "", err + } + + // Retry one more time after refresh the discovery + return r.getAPIVersion(kind, apiGroup, true, clientSet) + } + + return "", ErrNotFoundDiscovery +} + +// Retrieve all groups and versions to add in config sync +// Constraints present only kind and group so this function helps to find the version +func (r *DiscoveryStorage) refreshDiscoveryInfo() error { + r.discoveryLastRefreshed = time.Now() + + discoveryClient := r.ClientSet.Discovery() + + apiList, err := discoveryClient.ServerPreferredResources() + if err != nil { + return err + } + + // Save fetched discovery at apiResourceList + r.apiResourceList = apiList + + return nil +} diff --git a/controllers/gatekeeper_controller.go b/controllers/gatekeeper_controller.go index ae65b5a71..e503e6bed 100644 --- a/controllers/gatekeeper_controller.go +++ b/controllers/gatekeeper_controller.go @@ -39,8 +39,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" "github.com/gatekeeper/gatekeeper-operator/controllers/merge" @@ -131,13 +132,14 @@ type GatekeeperReconciler struct { Scheme *runtime.Scheme Namespace string PlatformInfo platform.PlatformInfo - // Flag for constraintPodStatus controller installation - isCpsCtlInstalled bool - DynamicClient *dynamic.DynamicClient - Cfg *rest.Config - EnableLeaderElection bool - childCtx context.Context - childCtxCancel context.CancelFunc + // Flag for ConstraintPodStatus controller installation + DiscoveryStorage *DiscoveryStorage + ManualReconcileTrigger chan event.GenericEvent + isCPSCtrlRunning bool + DynamicClient *dynamic.DynamicClient + KubeConfig *rest.Config + EnableLeaderElection bool + cpsCtrlCtxCancel context.CancelFunc } type crudOperation uint32 @@ -192,7 +194,6 @@ const ( func (r *GatekeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := r.Log.WithValues("gatekeeper", req.NamespacedName) logger.Info("Reconciling Gatekeeper") - var requeueTime time.Duration if req.Name != defaultGatekeeperCrName { err := fmt.Errorf("Gatekeeper resource name must be '%s'", defaultGatekeeperCrName) @@ -205,8 +206,8 @@ func (r *GatekeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) err := r.Get(ctx, req.NamespacedName, gatekeeper) if err != nil { if apierrors.IsNotFound(err) { - if r.childCtx != nil && r.isCpsCtlInstalled { - r.DeleteCPSController() + if r.isCPSCtrlRunning { + r.StopCPSController() } return ctrl.Result{}, nil @@ -220,10 +221,6 @@ func (r *GatekeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) "spec.image.image", gatekeeper.Spec.Image.Image) } - if err := r.addConstraintController(ctx, gatekeeper); err != nil { - requeueTime = time.Second * 3 - } - err, requeue := r.deployGatekeeperResources(gatekeeper) if err != nil { return ctrl.Result{}, errors.Wrap(err, "Unable to deploy Gatekeeper resources") @@ -231,11 +228,20 @@ func (r *GatekeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } - return reconcile.Result{RequeueAfter: requeueTime}, nil + if err := r.handleCPSController(ctx, gatekeeper); err != nil { + if errors.Is(err, errCrdNotReady) { + // ConstraintPodStatus CRD is not ready, wait for the CRD is ready + return ctrl.Result{RequeueAfter: time.Second * 3}, nil + } else { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. -func (r *GatekeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *GatekeeperReconciler) SetupWithManager(mgr ctrl.Manager, fromCPSMgrSource *source.Channel) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: int(1)}). For(&operatorv1alpha1.Gatekeeper{}). @@ -246,10 +252,8 @@ func (r *GatekeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { return oldGeneration != newGeneration }, - DeleteFunc: func(e event.DeleteEvent) bool { - return true - }, }). + WatchesRawSource(fromCPSMgrSource, &handler.EnqueueRequestForObject{}). Complete(r) } diff --git a/docs/upgrading-gatekeeper.md b/docs/upgrading-gatekeeper.md index eb73d399c..0dda501ce 100644 --- a/docs/upgrading-gatekeeper.md +++ b/docs/upgrading-gatekeeper.md @@ -110,13 +110,15 @@ imported Gatekeeper manifests. ## 10. Update unit and e2e tests -The gatekeeper package version should be updated with the matched Gatekeeper version in go.mod. +Make sure to add or modify any unit and e2e tests as a result of any operator +controller changes. -## 11. Updating gatekeeper +## 11. Updating the gatekeeper Go module ### Update gatekeeper package according to gatekeeper version -The gatekeeper package version should be updated with the matched Gatekeeper version in go.mod. +The Gatekeeper Go module version in `go.mod` should be updated to match the Gatekeeper version being deployed by the operator. ``` -require github.com/open-policy-agent/gatekeeper/v3 v3.13.4 +sed -i "s/require github.com/open-policy-agent/gatekeeper/v3 .*/require github.com/open-policy-agent/gatekeeper/v3 ${GATEKEEPER_VERSION}/" go.mod +go mod tidy ``` \ No newline at end of file diff --git a/main.go b/main.go index 79f048e92..ca5b53f61 100644 --- a/main.go +++ b/main.go @@ -24,9 +24,12 @@ import ( // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/source" "sigs.k8s.io/controller-runtime/pkg/webhook" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" @@ -121,17 +124,24 @@ func main() { } dynamicClient := dynamic.NewForConfigOrDie(mgr.GetConfig()) + manualReconcileTrigger := make(chan event.GenericEvent, 1024) + fromCPSMgrSource := &source.Channel{Source: manualReconcileTrigger, DestBufferSize: 1024} if err = (&controllers.GatekeeperReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("Gatekeeper"), - Scheme: mgr.GetScheme(), - Namespace: namespace, - PlatformInfo: platformInfo, - DynamicClient: dynamicClient, - Cfg: cfg, - EnableLeaderElection: enableLeaderElection, - }).SetupWithManager(mgr); err != nil { + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Gatekeeper"), + Scheme: mgr.GetScheme(), + Namespace: namespace, + PlatformInfo: platformInfo, + DynamicClient: dynamicClient, + KubeConfig: cfg, + EnableLeaderElection: enableLeaderElection, + ManualReconcileTrigger: manualReconcileTrigger, + DiscoveryStorage: &controllers.DiscoveryStorage{ + Log: ctrl.Log.WithName("discovery_storage"), + ClientSet: kubernetes.NewForConfigOrDie(mgr.GetConfig()), + }, + }).SetupWithManager(mgr, fromCPSMgrSource); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Gatekeeper") os.Exit(1) } diff --git a/test/e2e/case1_audit_from_cache_test.go b/test/e2e/case1_audit_from_cache_test.go index 3bb8a4f46..ffa040b46 100644 --- a/test/e2e/case1_audit_from_cache_test.go +++ b/test/e2e/case1_audit_from_cache_test.go @@ -55,7 +55,7 @@ var _ = Describe("Test auditFromCache", Ordered, func() { By("Create a gatekeeper resource") _, err := KubectlWithOutput("apply", "-f", case1GatekeeperYaml) Expect(err).ShouldNot(HaveOccurred()) - // 150 secs are needed when this spec after gatekeeper-controller test + // Need enough time until gatekeeper is up ctlDeployment := GetWithTimeout(clientHubDynamic, deploymentGVR, "gatekeeper-controller-manager", gatekeeperNamespace, true, 150) Expect(ctlDeployment).NotTo(BeNil()) @@ -107,6 +107,7 @@ var _ = Describe("Test auditFromCache", Ordered, func() { ingress := GetWithTimeout(clientHubDynamic, constraintGVR, "case1-ingress-deny", "", true, 60) Expect(ingress).NotTo(BeNil()) }) + Describe("Gatekeeper with auditFromCache=Automatic create syncOnly config", Ordered, func() { It("should create config resource with syncOnly includes pod, ingress, storageclass", func() { config := &v1alpha1.Config{} @@ -145,10 +146,11 @@ var _ = Describe("Test auditFromCache", Ordered, func() { foundSyncOnly := slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { return s.Kind == key }) + Expect(foundSyncOnly).ShouldNot(Equal(-1)) Expect(config.Spec.Sync.SyncOnly[foundSyncOnly]).Should(BeEquivalentTo(val)) } }) - It("Should error message shows cached pod list", func() { + It("Should have an error message with the cached pod list", func() { _, err := KubectlWithOutput("apply", "-f", case1PodYaml, "-n", allowNamespace) Expect(err).ShouldNot(HaveOccurred()) output, err := KubectlWithOutput("apply", "-f", case1PodYaml, "-n", denyNamespace) @@ -157,6 +159,7 @@ var _ = Describe("Test auditFromCache", Ordered, func() { Should(ContainSubstring("cached data: {\"case1-pod\": {\"apiVersion\": \"v1\", \"kind\": \"Pod\"")) }) }) + Describe("Gatekeeper with auditFromCache=Automatic delete syncOnly config", Ordered, func() { It("Should have 3 syncOnly elements in config", func() { config := &v1alpha1.Config{} @@ -231,14 +234,15 @@ var _ = Describe("Test auditFromCache", Ordered, func() { }, timeout).Should(BeNil()) }) }) - Describe("Updating contraint should apply to config resource", Ordered, func() { + + Describe("Updating constraint should apply to config resource", Ordered, func() { It("Should update the config resource", func() { config := &v1alpha1.Config{} By("Add a new constraint") Kubectl("apply", "-f", case1ConstraintUpdateYaml) - By("Group name 'apps.StatefulSet' should exist in SyncOnly although the crds does not exist") + By("Group name 'apps.StatefulSet' should exist in SyncOnly") Eventually(func(g Gomega) int { err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) g.Expect(err).ShouldNot(HaveOccurred()) @@ -251,7 +255,7 @@ var _ = Describe("Test auditFromCache", Ordered, func() { By("Update the config") Kubectl("apply", "-f", case1ConstraintUpdateChangeYaml) - By("Group name 'batch.CronJob' should exist in SyncOnly although tge crds does not exist") + By("Group name 'batch.CronJob' should exist in SyncOnly") Eventually(func(g Gomega) int { err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) g.Expect(err).ShouldNot(HaveOccurred()) @@ -261,7 +265,7 @@ var _ = Describe("Test auditFromCache", Ordered, func() { }) }, timeout).ShouldNot(Equal(-1)) - By("Group name 'events.k8s.io.Event' should exist in SyncOnly although the crds does not exist") + By("Group name 'events.k8s.io.Event' should exist in SyncOnly") Eventually(func(g Gomega) int { err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) g.Expect(err).ShouldNot(HaveOccurred()) @@ -272,8 +276,9 @@ var _ = Describe("Test auditFromCache", Ordered, func() { }, timeout).ShouldNot(Equal(-1)) }) }) + Describe("Add wrong match kinds", Ordered, func() { - It("Should not add not-founded matches", func() { + It("Should not add not founded matches", func() { config := &v1alpha1.Config{} By("Apply constraint") @@ -291,9 +296,40 @@ var _ = Describe("Test auditFromCache", Ordered, func() { return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { return s.Group == "ohmyhappy.sad.io" && s.Kind == "alien" }) - }, 20).Should(Equal(-1)) + }, 10).Should(Equal(-1)) + + By("Group name 'apps.StatefulSet' should still exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "apps" && s.Kind == "StatefulSet" + }) + }, timeout).ShouldNot(Equal(-1)) + + By("Group name 'batch.CronJob' should still exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "batch" && s.Kind == "CronJob" + }) + }, timeout).ShouldNot(Equal(-1)) + + By("Group name 'events.k8s.io.Event' should still exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "events.k8s.io" && s.Kind == "Event" + }) + }, timeout).ShouldNot(Equal(-1)) }) }) + AfterAll(func() { Kubectl("delete", "ns", allowNamespace, "--ignore-not-found", "--grace-period=1") Kubectl("delete", "ns", denyNamespace, "--ignore-not-found", "--grace-period=1") diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index af36d595b..b8fcc821e 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -24,11 +24,13 @@ import ( "path/filepath" "testing" + test "github.com/gatekeeper/gatekeeper-operator/test/e2e/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -102,6 +104,20 @@ var _ = BeforeSuite(func() { clientHubDynamic = NewKubeClientDynamic("", "", "") deploymentGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + + test.DefaultDeployment.NamespaceSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "admission.gatekeeper.sh/ignore", + Operator: metav1.LabelSelectorOpDoesNotExist, + }, + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{gatekeeperNamespace}, + }, + }, + } }) var _ = AfterSuite(func() {