diff --git a/.github/workflows/ci_tests.yaml b/.github/workflows/ci_tests.yaml index 55598d74..c9eb73ec 100644 --- a/.github/workflows/ci_tests.yaml +++ b/.github/workflows/ci_tests.yaml @@ -81,11 +81,10 @@ jobs: - name: E2E Tests run: | - make deploy-ci NAMESPACE=mygatekeeper IMG=localhost:5000/gatekeeper-operator:$GITHUB_SHA - kubectl -n mygatekeeper wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s - kubectl -n mygatekeeper logs deployment/gatekeeper-operator-controller -c manager -f > operator.log & - make test-e2e NAMESPACE=mygatekeeper - kubectl delete --wait namespace mygatekeeper + make deploy-ci NAMESPACE=gatekeeper-system IMG=localhost:5000/gatekeeper-operator:$GITHUB_SHA + kubectl -n gatekeeper-system wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s + kubectl -n gatekeeper-system logs deployment/gatekeeper-operator-controller -c manager -f > operator.log & + make test-e2e NAMESPACE=gatekeeper-system - name: Debug if: ${{ failure() }} diff --git a/.gitignore b/.gitignore index 874c0dae..ec136637 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,5 @@ testbin/* !vendor/**/zz_generated.* ci-tools/ + +.vscode/* diff --git a/Makefile b/Makefile index a6275e8c..57669adc 100644 --- a/Makefile +++ b/Makefile @@ -146,11 +146,11 @@ tidy: ## Run go mod tidy .PHONY: test test: manifests generate fmt vet envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" GOFLAGS=$(GOFLAGS) go test ./... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" GOFLAGS=$(GOFLAGS) go test $(go list ./... | grep -v /test/) -coverprofile cover.out .PHONY: test-e2e test-e2e: e2e-dependencies generate fmt vet ## Run e2e tests, using the configured Kubernetes cluster in ~/.kube/config - GOFLAGS=$(GOFLAGS) USE_EXISTING_CLUSTER=true $(GINKGO) -v --trace --fail-fast --label-filter="$(LABEL_FILTER)" ./test/e2e -- --namespace="$(NAMESPACE)" --timeout="5m" --delete-timeout="10m" + GOFLAGS=$(GOFLAGS) USE_EXISTING_CLUSTER=true $(GINKGO) -v --trace --fail-fast ./test/e2e -- --namespace="$(NAMESPACE)" --timeout="5m" --delete-timeout="10m" .PHONY: test-cluster test-cluster: ## Create a local kind cluster with a registry for testing @@ -182,7 +182,14 @@ download-binaries: kustomize go-bindata envtest controller-gen rm -rf bats-core-${BATS_VERSION} v${BATS_VERSION}.tar.gz; \ fi -##@ Build +.PHONY: kind-bootstrap-cluster +kind-bootstrap-cluster: test-cluster install dev-build + kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/audit=privileged + kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/enforce=privileged + kubectl label ns $(NAMESPACE) --overwrite pod-security.kubernetes.io/warn=privileged + kind load docker-image $(IMG) + $(MAKE) deploy-ci NAMESPACE=$(NAMESPACE) IMG=$(IMG) + kubectl -n $(NAMESPACE) wait deployment/gatekeeper-operator-controller --for condition=Available --timeout=90s .PHONY: build build: generate fmt vet ## Build manager binary. diff --git a/api/v1alpha1/gatekeeper_types.go b/api/v1alpha1/gatekeeper_types.go index 72675044..a77c8aee 100644 --- a/api/v1alpha1/gatekeeper_types.go +++ b/api/v1alpha1/gatekeeper_types.go @@ -92,6 +92,8 @@ type AuditConfig struct { // +optional ConstraintViolationLimit *uint64 `json:"constraintViolationLimit,omitempty"` // +optional + // Setting Automatic lets the Gatekeeper operator manage syncOnly in the config resource. + // It is not recommended to use Automatic when using referential constraints since those are not detected. AuditFromCache *AuditFromCacheMode `json:"auditFromCache,omitempty"` // +kubebuilder:validation:Minimum:=0 // +optional @@ -140,12 +142,13 @@ const ( LogLevelError LogLevelMode = "ERROR" ) -// +kubebuilder:validation:Enum:=Enabled;Disabled +// +kubebuilder:validation:Enum:=Enabled;Disabled;Automatic type AuditFromCacheMode string const ( - AuditFromCacheEnabled AuditFromCacheMode = "Enabled" - AuditFromCacheDisabled AuditFromCacheMode = "Disabled" + AuditFromCacheEnabled AuditFromCacheMode = "Enabled" + AuditFromCacheDisabled AuditFromCacheMode = "Disabled" + AuditFromCacheAutomatic AuditFromCacheMode = "Automatic" ) // +kubebuilder:validation:Enum:=Enabled;Disabled diff --git a/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml b/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml index 7ffedce7..08e0edb4 100644 --- a/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml +++ b/bundle/manifests/operator.gatekeeper.sh_gatekeepers.yaml @@ -864,9 +864,14 @@ spec: minimum: 0 type: integer auditFromCache: + description: Setting Automatic lets the Gatekeeper operator manage + syncOnly in the config resource. It is not recommended to use + Automatic when using referential constraints since those are + not detected. enum: - Enabled - Disabled + - Automatic type: string auditInterval: type: string diff --git a/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml b/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml index a5b84d84..c7f323dd 100644 --- a/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml +++ b/config/crd/bases/operator.gatekeeper.sh_gatekeepers.yaml @@ -864,9 +864,14 @@ spec: minimum: 0 type: integer auditFromCache: + description: Setting Automatic lets the Gatekeeper operator manage + syncOnly in the config resource. It is not recommended to use + Automatic when using referential constraints since those are + not detected. enum: - Enabled - Disabled + - Automatic type: string auditInterval: type: string diff --git a/controllers/constraintstatus_controller.go b/controllers/constraintstatus_controller.go new file mode 100644 index 00000000..0b314ed4 --- /dev/null +++ b/controllers/constraintstatus_controller.go @@ -0,0 +1,259 @@ +package controllers + +import ( + "context" + "reflect" + "sort" + "time" + + operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" + "github.com/go-logr/logr" + "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/utils/strings/slices" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var ControllerName = "constraintstatus_reconciler" + +type ConstraintPodStatusReconciler struct { + client.Client + Scheme *runtime.Scheme + Log logr.Logger + DynamicClient *dynamic.DynamicClient + Namespace string + // This includes api-resources list and it finds a missing version of resources. + DiscoveryStorage *DiscoveryStorage + // key = constraintPodName + ConstraintToSyncOnly map[string][]v1alpha1.SyncOnlyEntry +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ConstraintPodStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: int(1)}). + Named(ControllerName). + For(&v1beta1.ConstraintPodStatus{}, + builder.WithPredicates(predicate.Funcs{ + // Execute this reconcile func when it is audit-constraintStatuspod + // because a constraint creates 4 constraintPodstatus + CreateFunc: func(e event.CreateEvent) bool { + obj := e.Object.(*v1beta1.ConstraintPodStatus) + + return slices.Contains(obj.Status.Operations, "audit") + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj := e.ObjectOld.(*v1beta1.ConstraintPodStatus) + newObj := e.ObjectNew.(*v1beta1.ConstraintPodStatus) + + return slices.Contains(newObj.Status.Operations, "audit") && + // Update when the constraint is refreshed + oldObj.Status.ObservedGeneration != newObj.Status.ObservedGeneration + }, + DeleteFunc: func(e event.DeleteEvent) bool { + obj := e.Object.(*v1beta1.ConstraintPodStatus) + + return slices.Contains(obj.Status.Operations, "audit") + }, + }, + )). + Complete(r) +} + +// When spec.audit.auditFromCache is set to Automatic, +// Reconcile analyzes the constraint associated with the ConstraintPodStatus reconcile request. +// The kinds used in the constraint's match configuration is used to configure the syncOnly option. +func (r *ConstraintPodStatusReconciler) Reconcile(ctx context.Context, + request reconcile.Request, +) (reconcile.Result, error) { + log := r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) + log.Info("Reconciling ConstraintPodStatus and Config") + // This is used for RequeueAfter + var requeueTime time.Duration + + gatekeeper := &operatorv1alpha1.Gatekeeper{} + // Get gatekeeper resource + err := r.Get(ctx, types.NamespacedName{ + Namespace: "", + Name: "gatekeeper", + }, gatekeeper) + if err != nil { + if apierrors.IsNotFound(err) { + log.Error(err, "Gatekeeper resource is not found") + + return reconcile.Result{}, nil + } + + return reconcile.Result{}, err + } + + // Get config or create if not exist + config := &v1alpha1.Config{} + err = r.Get(ctx, types.NamespacedName{ + Namespace: r.Namespace, + Name: "config", + }, config) + + if err != nil { + if apierrors.IsNotFound(err) { + config = &v1alpha1.Config{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + Namespace: r.Namespace, + }, + } + + createErr := r.Create(ctx, config) + if createErr != nil { + log.Error(err, "Fail to create the Gatekeeper Config object, will retry.") + + return reconcile.Result{}, createErr + } + + log.Info("The Gatekeeper Config object was created") + } else { + return reconcile.Result{}, err + } + } + + constraintPodStatus := &v1beta1.ConstraintPodStatus{} + + err = r.Get(ctx, request.NamespacedName, constraintPodStatus) + if err != nil { + if apierrors.IsNotFound(err) { + log.V(1).Info("Cannot find the ConstraintPodStatus") + + err = r.handleDeleteEvent(ctx, request.Name, config) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + } + // Requeue + return reconcile.Result{}, err + } + + constraint, constraintName, err := getConstraint(ctx, *constraintPodStatus, r.DynamicClient) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("The Constraint was not found", "constraintName:", constraintName) + + return reconcile.Result{}, nil + } + + return reconcile.Result{}, err + } + + constraintMatchKinds, _, err := unstructured.NestedSlice(constraint.Object, "spec", "match", "kinds") + if err != nil { + r.Log.V(1).Info("There are no provided kinds in the Constraint", "constraintName:", constraintName) + + err = r.handleDeleteEvent(ctx, request.Name, config) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + } + + constraintSyncOnlyEntries, err := r.DiscoveryStorage.getSyncOnlys(constraintMatchKinds) + if err != nil { + if errors.Is(err, ErrNotFoundDiscovery) { + r.Log.V(1).Info("Cannot find matched discovery. Requeue after 10 secs") + + requeueTime = time.Second * 10 + } else { + log.Error(err, "Error to get matching kind and apigroup") + + return reconcile.Result{}, err + } + } + + r.ConstraintToSyncOnly[request.Name] = constraintSyncOnlyEntries + + uniqSyncOnly := r.getUniqSyncOnly() + + if reflect.DeepEqual(uniqSyncOnly, config.Spec.Sync.SyncOnly) { + r.Log.V(1).Info("There are no changes detected. Cancel Updating") + + return reconcile.Result{RequeueAfter: requeueTime}, nil + } + + config.Spec.Sync.SyncOnly = uniqSyncOnly + + err = r.Update(ctx, config, &client.UpdateOptions{}) + if err != nil { + log.Error(err, "unable to update config syncOnly") + + return reconcile.Result{}, err + } + + return reconcile.Result{RequeueAfter: requeueTime}, nil +} + +func (r *ConstraintPodStatusReconciler) getUniqSyncOnly() []v1alpha1.SyncOnlyEntry { + syncOnlySet := map[v1alpha1.SyncOnlyEntry]bool{} + // Add to table for unique filtering + for _, syncEntries := range r.ConstraintToSyncOnly { + for _, entry := range syncEntries { + syncOnlySet[entry] = true + } + } + + syncOnlys := make([]v1alpha1.SyncOnlyEntry, 0, len(syncOnlySet)) + for key := range syncOnlySet { + syncOnlys = append(syncOnlys, key) + } + + // Sort syncOnly so the returned value is consistent each time the method is called. + sort.Slice(syncOnlys, func(i, j int) bool { + stringi := syncOnlys[i].Group + " " + syncOnlys[i].Kind + " " + syncOnlys[i].Version + stringj := syncOnlys[j].Group + " " + syncOnlys[j].Kind + " " + syncOnlys[j].Version + + return stringi < stringj + }) + + return syncOnlys +} + +// handleDeleteEvent is called when a ConstraintPodStatus object is deleted. +// It deletes ConstraintPodStatus' key in the `ConstraintToSyncOnly` map and +// recalculates the appropriate SyncOnly entries. +func (r *ConstraintPodStatusReconciler) handleDeleteEvent( + ctx context.Context, cpsName string, config *v1alpha1.Config, +) error { + delete(r.ConstraintToSyncOnly, cpsName) + + updatedSyncOnly := r.getUniqSyncOnly() + + if reflect.DeepEqual(updatedSyncOnly, config.Spec.Sync.SyncOnly) { + r.Log.V(1).Info("There are no changes detected. Will not update.") + + return nil + } + + config.Spec.Sync.SyncOnly = updatedSyncOnly + + err := r.Update(ctx, config, &client.UpdateOptions{}) + if err != nil { + r.Log.Error(err, "unable to update config syncOnly") + + return err + } + + return nil +} diff --git a/controllers/cps_controller_helper.go b/controllers/cps_controller_helper.go new file mode 100644 index 00000000..08183d98 --- /dev/null +++ b/controllers/cps_controller_helper.go @@ -0,0 +1,291 @@ +package controllers + +import ( + "context" + "strings" + "time" + + "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" + operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" + gkv1alpha1 "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" + gkv1beta1 "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/utils/strings/slices" + ctrl "sigs.k8s.io/controller-runtime" + cacheRuntime "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var ( + setupLog = ctrl.Log.WithName("setup") + errCrdNotReady = errors.New("CRD is not ready") +) + +func (r *GatekeeperReconciler) handleCPSController(mainCtx context.Context, + gatekeeper *operatorv1alpha1.Gatekeeper, +) error { + isCRDReady, err := checkCPSCrdAvailable(mainCtx, r.DynamicClient) + if err != nil { + return err + } + + if !isCRDReady { + return errCrdNotReady + } + + isAutomaticOn := checkCPScontrollerPrereqs(gatekeeper) + + // auditFromCache is not set to Automatic, so stop the existing ConstraintPodStatus controller + if !isAutomaticOn { + if r.isCPSCtrlRunning { + r.StopCPSController() + } + + return nil + } + + if r.isCPSCtrlRunning { + return nil + } + + var cpsCtrlCtx context.Context + + cpsCtrlCtx, r.cpsCtrlCtxCancel = context.WithCancel(mainCtx) + + cpsMgr, err := ctrl.NewManager(r.KubeConfig, ctrl.Options{ + Scheme: r.Scheme, + Metrics: server.Options{ + BindAddress: "0", + }, + LeaderElection: r.EnableLeaderElection, + LeaderElectionID: "5ff985ccc.constraintstatuspod.gatekeeper.sh", + Cache: cacheRuntime.Options{ + ByObject: map[client.Object]cacheRuntime.ByObject{ + &gkv1beta1.ConstraintPodStatus{}: { + Transform: func(obj interface{}) (interface{}, error) { + constraintStatus := obj.(*gkv1beta1.ConstraintPodStatus) + // Only cache fields that are utilized by the controllers. + guttedObj := &gkv1beta1.ConstraintPodStatus{ + TypeMeta: constraintStatus.TypeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: constraintStatus.Name, + Labels: constraintStatus.Labels, + Namespace: constraintStatus.Namespace, + }, + Status: gkv1beta1.ConstraintPodStatusStatus{ + ObservedGeneration: constraintStatus.Status.ObservedGeneration, + Operations: constraintStatus.Status.Operations, + }, + } + + return guttedObj, nil + }, + }, + }, + }, + }) + if err != nil { + setupLog.Error(err, "Failed to setup NewManager for ConstraintPodStatus contoller") + + return err + } + + constraintToSyncOnly := r.getConstraintToSyncOnly(mainCtx) + + if err := (&ConstraintPodStatusReconciler{ + Scheme: r.Scheme, + Client: cpsMgr.GetClient(), + DynamicClient: r.DynamicClient, + Log: ctrl.Log.WithName("ConstraintPodStatus"), + Namespace: r.Namespace, + ConstraintToSyncOnly: constraintToSyncOnly, + DiscoveryStorage: r.DiscoveryStorage, + }).SetupWithManager(cpsMgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ConstraintPodStatus") + + return err + } + + r.isCPSCtrlRunning = true + + // Use another go routine for the ConstraintPodStatus controller + go func() { + err := cpsMgr.Start(cpsCtrlCtx) + if err != nil { + setupLog.Error(err, "A problem running ConstraintPodStatus manager. Triggering a reconcile to restart it.") + } + + defer r.cpsCtrlCtxCancel() + + r.cpsCtrlCtxCancel = nil + r.isCPSCtrlRunning = false + + // In case it is not an error and a child context is cancelled + // because the auditFromCache changed from Automatic, + // sending this channel avoids encountering a race condition. + // If the error happens when cpsMgr start, it will retry to start cpsMgr + r.ManualReconcileTrigger <- event.GenericEvent{ + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": v1alpha1.GroupVersion.String(), + "kind": "Gatekeeper", + "metadata": map[string]interface{}{ + "name": defaultGatekeeperCrName, + }, + }, + }, + } + }() + + return nil +} + +func (r *GatekeeperReconciler) getConstraintToSyncOnly(mainCtx context.Context) map[string][]gkv1alpha1.SyncOnlyEntry { + cpsList := &v1beta1.ConstraintPodStatusList{} + + // key = ConstraintPodStatus Name + constraintToSyncOnly := map[string][]gkv1alpha1.SyncOnlyEntry{} + + err := r.Client.List(mainCtx, cpsList, &client.ListOptions{}) + if err != nil { + return constraintToSyncOnly + } + + // Add to table for unique filtering + for _, cps := range cpsList.Items { + // Pick only Audit ConstraintPodStatus + if !slices.Contains(cps.Status.Operations, "audit") { + continue + } + + constraint, constraintName, err := getConstraint(mainCtx, cps, r.DynamicClient) + if err != nil { + if apierrors.IsNotFound(err) { + r.Log.Info("The Constraint was not found", "constraintName:", constraintName) + } + + continue + } + + constraintMatchKinds, _, err := unstructured.NestedSlice(constraint.Object, "spec", "match", "kinds") + if err != nil { + r.Log.V(1).Info("There are no provided kinds in the Constraint", "constraintName:", constraintName) + + continue + } + + constraintSyncOnlyEntries, err := r.DiscoveryStorage.getSyncOnlys(constraintMatchKinds) + if err != nil { + // No need to retry. The ConstraintPodStatus_controller will sort out + continue + } + + constraintToSyncOnly[cps.Name] = constraintSyncOnlyEntries + } + + return constraintToSyncOnly +} + +// Helper function to get constraint from ConstraintPodStatus +func getConstraint(ctx context.Context, cps gkv1beta1.ConstraintPodStatus, + dynamicClient *dynamic.DynamicClient, +) (*unstructured.Unstructured, string, error) { + labels := cps.GetLabels() + constraintKind := labels["internal.gatekeeper.sh/constraint-kind"] + constraintName := labels["internal.gatekeeper.sh/constraint-name"] + + constraintGVR := schema.GroupVersionResource{ + Group: "constraints.gatekeeper.sh", + Version: "v1beta1", + Resource: strings.ToLower(constraintKind), + } + + constraint, err := dynamicClient.Resource(constraintGVR).Get(ctx, constraintName, metav1.GetOptions{}) + if err != nil { + return nil, constraintName, err + } + + return constraint, constraintName, nil +} + +// Check ConstraintPodStatus Crd status is "True" and type is "NamesAccepted" +func checkCPSCrdAvailable(mainCtx context.Context, dynamicClient *dynamic.DynamicClient) (bool, error) { + crdGVR := schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + } + + crd, err := dynamicClient.Resource(crdGVR). + Get(mainCtx, "constraintpodstatuses.status.gatekeeper.sh", metav1.GetOptions{}) + if err != nil { + setupLog.V(1).Info("Cannot fetch ConstraintPodStatus CRD") + + return false, err + } + + conditions, ok, _ := unstructured.NestedSlice(crd.Object, "status", "conditions") + if !ok { + setupLog.V(1).Info("Cannot parse ConstraintPodStatus status conditions") + + return false, errors.New("Failed to parse status, conditions") + } + + for _, condition := range conditions { + parsedCondition := condition.(map[string]interface{}) + + status, ok := parsedCondition["status"].(string) + if !ok { + setupLog.V(1).Info("Cannot parse ConstraintPodStatus conditions status") + + return false, errors.New("Failed to parse status string") + } + + conditionType, ok := parsedCondition["type"].(string) + if !ok { + setupLog.V(1).Info("Cannot parse ConstraintPodStatus conditions type") + + return false, errors.New("Failed to parse ConstraintPodStatus conditions type") + } + + if conditionType == "NamesAccepted" && status == "True" { + setupLog.V(1).Info("ConstraintPodStatus CRD is ready") + + return true, nil + } + } + + setupLog.V(1).Info("ConstraintPodStatus CRD is not ready yet") + + return false, nil +} + +// Check gatekeeper auditFromCache=Automatic +func checkCPScontrollerPrereqs(gatekeeper *operatorv1alpha1.Gatekeeper) bool { + return gatekeeper.Spec.Audit != nil && gatekeeper.Spec.Audit.AuditFromCache != nil && + *gatekeeper.Spec.Audit.AuditFromCache == operatorv1alpha1.AuditFromCacheAutomatic +} + +func (r *GatekeeperReconciler) StopCPSController() { + if r.cpsCtrlCtxCancel == nil { + return + } + + setupLog.Info("Gatekeeper auditFromCache unset from Automatic. Stopping the ConstraintPodStatus manager.") + + r.cpsCtrlCtxCancel() + + for r.isCPSCtrlRunning { + setupLog.Info("Waiting for the ConstraintPodStatus manager to shutdown") + + time.Sleep(1 * time.Second) + } +} diff --git a/controllers/discovery_storage.go b/controllers/discovery_storage.go new file mode 100644 index 00000000..872faf5c --- /dev/null +++ b/controllers/discovery_storage.go @@ -0,0 +1,140 @@ +package controllers + +import ( + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" +) + +var ErrNotFoundDiscovery = errors.New("there are no matched apiGroup, version or kind") + +type DiscoveryStorage struct { + apiResourceList []*metav1.APIResourceList + discoveryLastRefreshed time.Time + ClientSet *kubernetes.Clientset + Log logr.Logger +} + +func (r *DiscoveryStorage) getSyncOnlys(constraintMatchKinds []interface{}) ( + []v1alpha1.SyncOnlyEntry, error, +) { + syncOnlys := []v1alpha1.SyncOnlyEntry{} + + var finalErr error + + for _, match := range constraintMatchKinds { + newKind, ok := match.(map[string]interface{}) + if !ok { + continue + } + + apiGroups, ok := newKind["apiGroups"].([]interface{}) + if !ok { + continue + } + + kindsInKinds, ok := newKind["kinds"].([]interface{}) + if !ok { + continue + } + + for _, apiGroup := range apiGroups { + for _, kind := range kindsInKinds { + version, err := r.getAPIVersion(kind.(string), apiGroup.(string), false, r.ClientSet) + if err != nil { + r.Log.V(1).Info("getAPIVersion has error but continue") + + if finalErr == nil { + finalErr = err + } else { + // Accumulate error + finalErr = fmt.Errorf("%w; %w", finalErr, err) + } + + continue + } + + syncOnlys = append(syncOnlys, v1alpha1.SyncOnlyEntry{ + Group: apiGroup.(string), + Version: version, + Kind: kind.(string), + }) + } + } + } + + return syncOnlys, finalErr +} + +// getAPIVersion gets the server preferred API version for the constraint's match kind entry +// Constraint only provide kind and apiGroup. However the config resource need version +func (r *DiscoveryStorage) getAPIVersion(kind string, + apiGroup string, skipRefresh bool, clientSet *kubernetes.Clientset, +) (string, error) { + // Cool time(10 min) to refresh discoveries + if len(r.apiResourceList) == 0 || + r.discoveryLastRefreshed.Add(time.Minute*10).Before(time.Now()) { + err := r.refreshDiscoveryInfo() + if err != nil { + return "", err + } + + // The discovery is just refeshed so skip another refesh + skipRefresh = true + } + + for _, resc := range r.apiResourceList { + groupVerison, err := schema.ParseGroupVersion(resc.GroupVersion) + if err != nil { + r.Log.Error(err, "Cannot parse the group and version in getApiVersion ", "GroupVersion:", resc.GroupVersion) + + continue + } + + group := groupVerison.Group + version := groupVerison.Version + // Consider groupversion == v1 or groupversion == app1/v1 + for _, apiResource := range resc.APIResources { + if apiResource.Kind == kind && group == apiGroup { + return version, nil + } + } + } + + if !skipRefresh { + // Get new discoveryInfo, when any resource is not found + err := r.refreshDiscoveryInfo() + if err != nil { + return "", err + } + + // Retry one more time after refresh the discovery + return r.getAPIVersion(kind, apiGroup, true, clientSet) + } + + return "", ErrNotFoundDiscovery +} + +// Retrieve all groups and versions to add in config sync +// Constraints present only kind and group so this function helps to find the version +func (r *DiscoveryStorage) refreshDiscoveryInfo() error { + r.discoveryLastRefreshed = time.Now() + + discoveryClient := r.ClientSet.Discovery() + + apiList, err := discoveryClient.ServerPreferredResources() + if err != nil { + return err + } + + // Save fetched discovery at apiResourceList + r.apiResourceList = apiList + + return nil +} diff --git a/controllers/gatekeeper_controller.go b/controllers/gatekeeper_controller.go index 7752f85e..ae9feb26 100644 --- a/controllers/gatekeeper_controller.go +++ b/controllers/gatekeeper_controller.go @@ -33,10 +33,15 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" "github.com/gatekeeper/gatekeeper-operator/controllers/merge" @@ -123,17 +128,24 @@ var ( // GatekeeperReconciler reconciles a Gatekeeper object type GatekeeperReconciler struct { client.Client - Log logr.Logger - Scheme *runtime.Scheme - Namespace string - PlatformInfo platform.PlatformInfo + Log logr.Logger + Scheme *runtime.Scheme + Namespace string + PlatformInfo platform.PlatformInfo + DiscoveryStorage *DiscoveryStorage + ManualReconcileTrigger chan event.GenericEvent + isCPSCtrlRunning bool + DynamicClient *dynamic.DynamicClient + KubeConfig *rest.Config + EnableLeaderElection bool + cpsCtrlCtxCancel context.CancelFunc } type crudOperation uint32 const ( - apply crudOperation = iota - delete crudOperation = iota + applyCrud crudOperation = iota + deleteCrud crudOperation = iota ) // Gatekeeper Operator RBAC permissions to manage Gatekeeper custom resource @@ -193,8 +205,13 @@ func (r *GatekeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) err := r.Get(ctx, req.NamespacedName, gatekeeper) if err != nil { if apierrors.IsNotFound(err) { + if r.isCPSCtrlRunning { + r.StopCPSController() + } + return ctrl.Result{}, nil } + return ctrl.Result{}, err } @@ -210,12 +227,22 @@ func (r *GatekeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } + if err := r.handleCPSController(ctx, gatekeeper); err != nil { + if errors.Is(err, errCrdNotReady) { + // ConstraintPodStatus CRD is not ready, wait for the CRD is ready + return ctrl.Result{RequeueAfter: time.Second * 3}, nil + } else { + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. -func (r *GatekeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *GatekeeperReconciler) SetupWithManager(mgr ctrl.Manager, fromCPSMgrSource *source.Channel) error { return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: int(1)}). For(&operatorv1alpha1.Gatekeeper{}). WithEventFilter(predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { @@ -224,10 +251,8 @@ func (r *GatekeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { return oldGeneration != newGeneration }, - DeleteFunc: func(e event.DeleteEvent) bool { - return false - }, }). + WatchesRawSource(fromCPSMgrSource, &handler.EnqueueRequestForObject{}). Complete(r) } @@ -268,7 +293,7 @@ func (r *GatekeeperReconciler) deleteAssets(assets []string, gatekeeper *operato return err } - if err = r.crudResource(obj, gatekeeper, delete); err != nil { + if err = r.crudResource(obj, gatekeeper, deleteCrud); err != nil { return err } } @@ -295,7 +320,7 @@ func (r *GatekeeperReconciler) applyAsset(gatekeeper *operatorv1alpha1.Gatekeepe return err } - if err = r.crudResource(obj, gatekeeper, apply); err != nil { + if err = r.crudResource(obj, gatekeeper, applyCrud); err != nil { return err } return nil @@ -425,7 +450,7 @@ func (r *GatekeeperReconciler) crudResource(obj *unstructured.Unstructured, gate switch { case err == nil: - if operation == apply { + if operation == applyCrud { err = merge.RetainClusterObjectFields(obj, clusterObj) if err != nil { return errors.Wrapf(err, "Unable to retain cluster object fields from %s", namespacedName) @@ -436,7 +461,7 @@ func (r *GatekeeperReconciler) crudResource(obj *unstructured.Unstructured, gate } logger.Info(fmt.Sprintf("Updated Gatekeeper resource")) - } else if operation == delete { + } else if operation == deleteCrud { if err = r.Delete(ctx, obj); err != nil { return errors.Wrapf(err, "Error attempting to delete resource %s", namespacedName) } @@ -444,7 +469,7 @@ func (r *GatekeeperReconciler) crudResource(obj *unstructured.Unstructured, gate } case apierrors.IsNotFound(err): - if operation == apply { + if operation == applyCrud { if err = r.Create(ctx, obj); err != nil { return errors.Wrapf(err, "Error attempting to create resource %s", namespacedName) } @@ -667,6 +692,7 @@ func removeMutatingRBACRules(obj *unstructured.Unstructured) error { return err } } + return nil } @@ -805,7 +831,8 @@ func setConstraintViolationLimit(obj *unstructured.Unstructured, constraintViola func setAuditFromCache(obj *unstructured.Unstructured, auditFromCache *operatorv1alpha1.AuditFromCacheMode) error { if auditFromCache != nil { auditFromCacheValue := "false" - if *auditFromCache == operatorv1alpha1.AuditFromCacheEnabled { + if *auditFromCache == operatorv1alpha1.AuditFromCacheEnabled || + *auditFromCache == operatorv1alpha1.AuditFromCacheAutomatic { auditFromCacheValue = "true" } return setContainerArg(obj, managerContainer, AuditFromCacheArg, auditFromCacheValue, false) diff --git a/controllers/gatekeeper_controller_test.go b/controllers/gatekeeper_controller_test.go index a9663cf6..5d269f01 100644 --- a/controllers/gatekeeper_controller_test.go +++ b/controllers/gatekeeper_controller_test.go @@ -31,7 +31,7 @@ import ( test "github.com/gatekeeper/gatekeeper-operator/test/e2e/util" ) -var namespace = "mygatekeeper" +var namespace = "gatekeeper-system" func TestDeployWebhookConfigs(t *testing.T) { g := NewWithT(t) diff --git a/deploy/gatekeeper-operator.yaml b/deploy/gatekeeper-operator.yaml index f4d1f2bf..1911cbe6 100644 --- a/deploy/gatekeeper-operator.yaml +++ b/deploy/gatekeeper-operator.yaml @@ -874,6 +874,7 @@ spec: enum: - Enabled - Disabled + - Automatic type: string auditInterval: type: string diff --git a/docs/upgrading-gatekeeper.md b/docs/upgrading-gatekeeper.md index 23959d47..0dda501c 100644 --- a/docs/upgrading-gatekeeper.md +++ b/docs/upgrading-gatekeeper.md @@ -112,3 +112,13 @@ imported Gatekeeper manifests. Make sure to add or modify any unit and e2e tests as a result of any operator controller changes. + +## 11. Updating the gatekeeper Go module + +### Update gatekeeper package according to gatekeeper version + +The Gatekeeper Go module version in `go.mod` should be updated to match the Gatekeeper version being deployed by the operator. +``` +sed -i "s/require github.com/open-policy-agent/gatekeeper/v3 .*/require github.com/open-policy-agent/gatekeeper/v3 ${GATEKEEPER_VERSION}/" go.mod +go mod tidy +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 4d66ff33..c40c7c72 100644 --- a/go.mod +++ b/go.mod @@ -7,16 +7,25 @@ require ( github.com/go-logr/logr v1.2.4 github.com/onsi/ginkgo/v2 v2.13.0 github.com/onsi/gomega v1.29.0 + github.com/open-policy-agent/frameworks/constraint v0.0.0-20231030230613-2e0cb3d68575 + github.com/open-policy-agent/gatekeeper/v3 v3.13.4 github.com/pkg/errors v0.9.1 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d k8s.io/api v0.28.3 k8s.io/apiextensions-apiserver v0.28.3 k8s.io/apimachinery v0.28.3 k8s.io/client-go v0.28.3 + k8s.io/klog v1.0.0 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/controller-runtime v0.16.3 ) require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect + github.com/aws/aws-sdk-go v1.44.23 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -30,6 +39,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/cel-go v0.16.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -48,11 +58,12 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sync v0.4.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect @@ -60,14 +71,16 @@ require ( golang.org/x/tools v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiserver v0.28.3 // indirect k8s.io/component-base v0.28.3 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 0852247c..8bf6b366 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,24 @@ +cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= +cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg= +contrib.go.opencensus.io/exporter/ocagent v0.7.0 h1:BEfdCTXfMV30tLZD8c9n64V/tIZX5+9sXiuFLnrr1k8= +contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= +contrib.go.opencensus.io/exporter/stackdriver v0.13.14 h1:zBakwHardp9Jcb8sQHcHpXy/0+JIb1M8KjigCJzx7+4= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.44.23 h1:oFvpKJk5qdprnCcuCWk2/CADdvfYtyduQ392bMXjlYI= +github.com/aws/aws-sdk-go v1.44.23/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -12,13 +30,18 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -39,6 +62,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= +github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -50,10 +75,18 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -82,6 +115,11 @@ github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4 github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20231030230613-2e0cb3d68575 h1:rhln22JjTgsJGL8gDK4qEM372Ei1PPQk4ZTIOKM9WvY= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20231030230613-2e0cb3d68575/go.mod h1:AaCd/gbQ31R7btHO450Kdp18/Zmvn7hjEt7Qbp+MfJM= +github.com/open-policy-agent/gatekeeper/v3 v3.13.4 h1:WkrjM/15O8AxC/a3wYXImQX1UcpXCAnFYI8B07FEMso= +github.com/open-policy-agent/gatekeeper/v3 v3.13.4/go.mod h1:fFv+yG/u8UGFdShqRB2LInw+yBdZ2OXPFsmFlGIdq5I= +github.com/open-policy-agent/opa v0.57.1 h1:LAa4Z0UkpjV94nRLy6XCvgOacQ6N1jf8TJLMUIzFRqc= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -95,23 +133,40 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.35.0 h1:N93oX6BrJ2iP3UuE2Uz4Lt+5BkUpaFer3L9CbADzesc= +github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= @@ -121,6 +176,7 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= @@ -141,6 +197,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -170,8 +228,15 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -182,6 +247,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -194,16 +260,21 @@ k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2E k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/apiserver v0.28.3 h1:8Ov47O1cMyeDzTXz0rwcfIIGAP/dP7L8rWbEljRcg5w= +k8s.io/apiserver v0.28.3/go.mod h1:YIpM+9wngNAv8Ctt0rHG4vQuX/I5rvkEMtZtsxW2rNM= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/main.go b/main.go index 204245b8..ca5b53f6 100644 --- a/main.go +++ b/main.go @@ -23,15 +23,13 @@ import ( // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. + + "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/source" "sigs.k8s.io/controller-runtime/pkg/webhook" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" @@ -39,7 +37,18 @@ import ( "github.com/gatekeeper/gatekeeper-operator/pkg/platform" "github.com/gatekeeper/gatekeeper-operator/pkg/util" "github.com/gatekeeper/gatekeeper-operator/pkg/version" + constraintV1 "github.com/open-policy-agent/frameworks/constraint/pkg/apis/templates/v1beta1" + gkv1alpha1 "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + gkv1beta1 "github.com/open-policy-agent/gatekeeper/v3/apis/status/v1beta1" "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" // +kubebuilder:scaffold:imports ) @@ -50,8 +59,10 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(operatorv1alpha1.AddToScheme(scheme)) + utilruntime.Must(gkv1beta1.AddToScheme(scheme)) + utilruntime.Must(gkv1alpha1.AddToScheme(scheme)) + utilruntime.Must(constraintV1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -59,6 +70,7 @@ func main() { var metricsAddr string var enableLeaderElection bool var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, @@ -73,6 +85,7 @@ func main() { ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) ctrl.Log.WithName("Gatekeeper Operator version").Info(fmt.Sprintf("%#v", version.Get())) + ctx := ctrl.SetupSignalHandler() metricsOptions := server.Options{ BindAddress: metricsAddr, @@ -110,16 +123,29 @@ func main() { os.Exit(1) } + dynamicClient := dynamic.NewForConfigOrDie(mgr.GetConfig()) + manualReconcileTrigger := make(chan event.GenericEvent, 1024) + fromCPSMgrSource := &source.Channel{Source: manualReconcileTrigger, DestBufferSize: 1024} + if err = (&controllers.GatekeeperReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("Gatekeeper"), - Scheme: mgr.GetScheme(), - Namespace: namespace, - PlatformInfo: platformInfo, - }).SetupWithManager(mgr); err != nil { + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Gatekeeper"), + Scheme: mgr.GetScheme(), + Namespace: namespace, + PlatformInfo: platformInfo, + DynamicClient: dynamicClient, + KubeConfig: cfg, + EnableLeaderElection: enableLeaderElection, + ManualReconcileTrigger: manualReconcileTrigger, + DiscoveryStorage: &controllers.DiscoveryStorage{ + Log: ctrl.Log.WithName("discovery_storage"), + ClientSet: kubernetes.NewForConfigOrDie(mgr.GetConfig()), + }, + }).SetupWithManager(mgr, fromCPSMgrSource); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Gatekeeper") os.Exit(1) } + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { @@ -132,8 +158,9 @@ func main() { } setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running manager") + os.Exit(1) } } diff --git a/test/e2e/case1_audit_from_cache_test.go b/test/e2e/case1_audit_from_cache_test.go new file mode 100644 index 00000000..f43a7a33 --- /dev/null +++ b/test/e2e/case1_audit_from_cache_test.go @@ -0,0 +1,364 @@ +package e2e + +import ( + "time" + + gv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" + . "github.com/gatekeeper/gatekeeper-operator/test/e2e/util" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" + "golang.org/x/exp/slices" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +const ( + case1GatekeeperYaml string = "../resources/case1_audit_from_cache/gatekeeper.yaml" + case1TemplateYaml string = "../resources/case1_audit_from_cache/template.yaml" + case1ConstraintPodYaml string = "../resources/case1_audit_from_cache/constraint-pod.yaml" + case1ConstraintPod2Yaml string = "../resources/case1_audit_from_cache/constraint-pod-2.yaml" + case1ConstraintIngressYaml string = "../resources/case1_audit_from_cache/constraint-ingress.yaml" + case1ConstraintStorageclassYaml string = "../resources/case1_audit_from_cache/constraint-storageclass.yaml" + case1PodYaml string = "../resources/case1_audit_from_cache/pod.yaml" + allowNamespace string = "case1-allow" + denyNamespace string = "case1-deny" + case1ConstraintUpdateYaml string = "../resources/case1_audit_from_cache/constraint-update.yaml" + case1ConstraintUpdateChangeYaml string = "../resources/case1_audit_from_cache/constraint-update-change.yaml" + case1ConstraintWrongYaml string = "../resources/case1_audit_from_cache/constraint-wrong.yaml" +) + +var constraintGVR = schema.GroupVersionResource{ + Group: "constraints.gatekeeper.sh", + Version: "v1beta1", + Resource: "case1template", +} + +var templateGVR = schema.GroupVersionResource{ + Group: "templates.gatekeeper.sh", + Version: "v1", + Resource: "constrainttemplates", +} + +var _ = Describe("Test auditFromCache", Ordered, func() { + BeforeAll(func() { + if !useExistingCluster() { + Skip("Test requires existing cluster. Set environment variable USE_EXISTING_CLUSTER=true and try again.") + } + + By("Create namespaces to compare") + Kubectl("create", "ns", allowNamespace) + Kubectl("create", "ns", denyNamespace) + + By("Create a gatekeeper resource") + _, err := KubectlWithOutput("apply", "-f", case1GatekeeperYaml) + Expect(err).ShouldNot(HaveOccurred()) + // Need enough time until gatekeeper is up + ctlDeployment := GetWithTimeout(clientHubDynamic, deploymentGVR, + "gatekeeper-controller-manager", gatekeeperNamespace, true, 150) + Expect(ctlDeployment).NotTo(BeNil()) + + Eventually(func(g Gomega) { + auditDeployment := GetWithTimeout(clientHubDynamic, deploymentGVR, + "gatekeeper-audit", gatekeeperNamespace, true, 60) + g.Expect(auditDeployment).NotTo(BeNil()) + + availableReplicas, _, err := unstructured.NestedInt64(auditDeployment.Object, "status", "availableReplicas") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(availableReplicas).Should(BeNumerically(">", 0)) + }, 2*time.Minute, 2*time.Second).Should(Succeed()) + + _, err = KubectlWithOutput("apply", "-f", case1TemplateYaml) + Expect(err).ShouldNot(HaveOccurred()) + template := GetWithTimeout(clientHubDynamic, templateGVR, "case1template", "", true, 60) + Expect(template).NotTo(BeNil()) + + Eventually(func() error { + _, err = KubectlWithOutput("apply", "-f", case1ConstraintStorageclassYaml) + + return err + }, timeout).ShouldNot(HaveOccurred()) + storageclass := GetWithTimeout(clientHubDynamic, constraintGVR, "case1-storageclass-deny", "", true, 60) + Expect(storageclass).NotTo(BeNil()) + + Eventually(func() error { + _, err = KubectlWithOutput("apply", "-f", case1ConstraintPodYaml) + + return err + }, timeout).ShouldNot(HaveOccurred()) + pod := GetWithTimeout(clientHubDynamic, constraintGVR, "case1-pod-deny", "", true, 60) + Expect(pod).NotTo(BeNil()) + + Eventually(func() error { + _, err = KubectlWithOutput("apply", "-f", case1ConstraintPod2Yaml) + + return err + }, timeout).ShouldNot(HaveOccurred()) + pod2 := GetWithTimeout(clientHubDynamic, constraintGVR, "case1-pod-deny-2", "", true, 60) + Expect(pod2).NotTo(BeNil()) + + Eventually(func() error { + _, err = KubectlWithOutput("apply", "-f", case1ConstraintIngressYaml) + + return err + }, timeout).ShouldNot(HaveOccurred()) + ingress := GetWithTimeout(clientHubDynamic, constraintGVR, "case1-ingress-deny", "", true, 60) + Expect(ingress).NotTo(BeNil()) + }) + + Describe("Gatekeeper with auditFromCache=Automatic create syncOnly config", Ordered, func() { + It("should create config resource with syncOnly includes pod, ingress, storageclass", func() { + config := &v1alpha1.Config{} + + By("config syncOnly should have 3 elements, duplicates should be omitted") + Eventually(func(g Gomega) []v1alpha1.SyncOnlyEntry { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return config.Spec.Sync.SyncOnly + }, timeout).Should(HaveLen(3)) + + expectedSyncOnly := map[string]v1alpha1.SyncOnlyEntry{ + "Ingress": { + Group: "networking.k8s.io", + Kind: "Ingress", + Version: "v1", + }, + "Pod": { + Kind: "Pod", + Version: "v1", + }, + "StorageClass": { + Group: "storage.k8s.io", + Version: "v1", + Kind: "StorageClass", + }, + } + for key, val := range expectedSyncOnly { + foundSyncOnly := slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Kind == key + }) + Expect(foundSyncOnly).ShouldNot(Equal(-1)) + Expect(config.Spec.Sync.SyncOnly[foundSyncOnly]).Should(BeEquivalentTo(val)) + } + }) + It("Should have an error message with the cached pod list", func() { + _, err := KubectlWithOutput("apply", "-f", case1PodYaml, "-n", allowNamespace) + Expect(err).ShouldNot(HaveOccurred()) + output, err := KubectlWithOutput("apply", "-f", case1PodYaml, "-n", denyNamespace) + Expect(err).Should(HaveOccurred()) + Expect(output). + Should(ContainSubstring("cached data: {\"case1-pod\": {\"apiVersion\": \"v1\", \"kind\": \"Pod\"")) + }) + }) + + Describe("Gatekeeper with auditFromCache=Automatic delete syncOnly config", Ordered, func() { + It("Should have 3 syncOnly elements in config", func() { + config := &v1alpha1.Config{} + By("Config syncOnly should have 3 elements") + + Eventually(func(g Gomega) []v1alpha1.SyncOnlyEntry { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return config.Spec.Sync.SyncOnly + }, timeout).Should(HaveLen(3)) + }) + It("Should have 2 syncOnly elements in config", func() { + Kubectl("delete", "-f", case1ConstraintIngressYaml, "--ignore-not-found") + + config := &v1alpha1.Config{} + + Eventually(func(g Gomega) []v1alpha1.SyncOnlyEntry { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return config.Spec.Sync.SyncOnly + }, timeout).Should(HaveLen(2)) + + By("Ingress should not be in SyncOnly") + Expect(slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Kind == "Ingress" + })).Should(Equal(-1)) + }) + It("Should have 1 syncOnly elements in config", func() { + Kubectl("delete", "-f", case1ConstraintStorageclassYaml, "--ignore-not-found") + + config := &v1alpha1.Config{} + + Eventually(func(g Gomega) []v1alpha1.SyncOnlyEntry { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return config.Spec.Sync.SyncOnly + }, timeout).Should(HaveLen(1)) + + By("StorageClass should not be in SyncOnly") + Expect(slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Kind == "StorageClass" + })).Should(Equal(-1)) + }) + It("Should still have 1 syncOnly elements in config when Pod constraint is deleted", func() { + Kubectl("delete", "-f", case1ConstraintPodYaml, "--ignore-not-found") + config := &v1alpha1.Config{} + + Eventually(func(g Gomega) []v1alpha1.SyncOnlyEntry { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return config.Spec.Sync.SyncOnly + }, timeout).Should(HaveLen(1)) + + By("Pod should exist in SyncOnly because case1ConstraintPod2 yet exist") + Expect(slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Kind == "Pod" + })).ShouldNot(Equal(-1)) + }) + It("Should have 0 syncOnly elements in config ", func() { + Kubectl("delete", "-f", case1ConstraintPod2Yaml, "--ignore-not-found") + config := &v1alpha1.Config{} + + Eventually(func(g Gomega) []v1alpha1.SyncOnlyEntry { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return config.Spec.Sync.SyncOnly + }, timeout).Should(BeNil()) + }) + }) + + Describe("Updating constraint should apply to config resource", Ordered, func() { + It("Should update the config resource", func() { + config := &v1alpha1.Config{} + + By("Add a new constraint") + Kubectl("apply", "-f", case1ConstraintUpdateYaml) + + By("Group name 'apps.StatefulSet' should exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "apps" && s.Kind == "StatefulSet" + }) + }, timeout).ShouldNot(Equal(-1)) + + By("Update the config") + Kubectl("apply", "-f", case1ConstraintUpdateChangeYaml) + + By("Group name 'batch.CronJob' should exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "batch" && s.Kind == "CronJob" + }) + }, timeout).ShouldNot(Equal(-1)) + + By("Group name 'events.k8s.io.Event' should exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "events.k8s.io" && s.Kind == "Event" + }) + }, timeout).ShouldNot(Equal(-1)) + }) + }) + + Describe("Add wrong match kinds", Ordered, func() { + It("Should not add not founded matches", func() { + config := &v1alpha1.Config{} + + By("Apply constraint") + Kubectl("apply", "-f", case1ConstraintWrongYaml) + + Eventually(func() error { + return K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + }, timeout).ShouldNot(HaveOccurred()) + + By("Group name 'ohmyhappy.sad.io' should not exist in SyncOnly") + Consistently(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "ohmyhappy.sad.io" && s.Kind == "alien" + }) + }, 10).Should(Equal(-1)) + + By("Group name 'apps.StatefulSet' should still exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "apps" && s.Kind == "StatefulSet" + }) + }, timeout).ShouldNot(Equal(-1)) + + By("Group name 'batch.CronJob' should still exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "batch" && s.Kind == "CronJob" + }) + }, timeout).ShouldNot(Equal(-1)) + + By("Group name 'events.k8s.io.Event' should still exist in SyncOnly") + Eventually(func(g Gomega) int { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, config) + g.Expect(err).ShouldNot(HaveOccurred()) + + return slices.IndexFunc(config.Spec.Sync.SyncOnly, func(s v1alpha1.SyncOnlyEntry) bool { + return s.Group == "events.k8s.io" && s.Kind == "Event" + }) + }, timeout).ShouldNot(Equal(-1)) + }) + }) + + AfterAll(func() { + Kubectl("delete", "ns", allowNamespace, "--ignore-not-found", "--grace-period=1") + Kubectl("delete", "ns", denyNamespace, "--ignore-not-found", "--grace-period=1") + Kubectl("delete", "-f", case1ConstraintPodYaml, "--ignore-not-found") + Kubectl("delete", "-f", case1ConstraintIngressYaml, "--ignore-not-found") + Kubectl("delete", "-f", case1ConstraintStorageclassYaml, "--ignore-not-found") + Kubectl("delete", "-f", case1ConstraintPod2Yaml, "--ignore-not-found") + Kubectl("delete", "-f", case1ConstraintUpdateYaml, "--ignore-not-found") + Kubectl("delete", "-f", case1ConstraintWrongYaml, "--ignore-not-found") + Kubectl("delete", "-f", case1GatekeeperYaml, "--ignore-not-found") + + Eventually(func() bool { + err := K8sClient.Get(ctx, gatekeeperName, &gv1alpha1.Gatekeeper{}) + if err == nil { + return false + } + + return apierrors.IsNotFound(err) + }, deleteTimeout, pollInterval).Should(BeTrue()) + + Eventually(func() bool { + err := K8sClient.Get(ctx, types.NamespacedName{Name: "config", Namespace: gatekeeperNamespace}, + &v1alpha1.Config{}) + if err == nil { + return false + } + + return apierrors.IsNotFound(err) + }, deleteTimeout, pollInterval).Should(BeTrue()) + + ctlDeployment := GetWithTimeout(clientHubDynamic, deploymentGVR, + "gatekeeper-controller-manager", gatekeeperNamespace, false, 60) + Expect(ctlDeployment).Should(BeNil()) + auditDeployment := GetWithTimeout(clientHubDynamic, deploymentGVR, + "gatekeeper-audit", gatekeeperNamespace, false, 60) + Expect(auditDeployment).Should(BeNil()) + }) +}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 3433c8ac..b8fcc821 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -18,25 +18,34 @@ package e2e import ( "context" + "fmt" "os" + "os/user" "path/filepath" "testing" + test "github.com/gatekeeper/gatekeeper-operator/test/e2e/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" operatorv1alpha1 "github.com/gatekeeper/gatekeeper-operator/api/v1alpha1" + "github.com/open-policy-agent/gatekeeper/v3/apis/config/v1alpha1" // +kubebuilder:scaffold:imports ) @@ -44,16 +53,17 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( - cfg *rest.Config - K8sClient client.Client - testEnv *envtest.Environment - affinityPod *corev1.Pod - affinityNode *corev1.Node + cfg *rest.Config + K8sClient client.Client + testEnv *envtest.Environment + affinityPod *corev1.Pod + affinityNode *corev1.Node + clientHubDynamic dynamic.Interface + deploymentGVR schema.GroupVersionResource ) func TestE2e(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") } @@ -88,6 +98,26 @@ var _ = BeforeSuite(func() { Expect(labelNode(affinityNode)).Should(Succeed()) createAffinityPod() } + + err = v1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + clientHubDynamic = NewKubeClientDynamic("", "", "") + deploymentGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} + + test.DefaultDeployment.NamespaceSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "admission.gatekeeper.sh/ignore", + Operator: metav1.LabelSelectorOpDoesNotExist, + }, + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{gatekeeperNamespace}, + }, + }, + } }) var _ = AfterSuite(func() { @@ -168,3 +198,60 @@ func loadAffinityPodFromFile(namespace string) (*corev1.Pod, error) { pod.ObjectMeta.Namespace = namespace return pod, err } + +func NewKubeClientDynamic(url, kubeconfig, context string) dynamic.Interface { + klog.V(5).Infof("Create kubeclient dynamic for url %s using kubeconfig path %s\n", url, kubeconfig) + + config, err := LoadConfig(url, kubeconfig, context) + if err != nil { + panic(err) + } + + clientset, err := dynamic.NewForConfig(config) + if err != nil { + panic(err) + } + + return clientset +} + +func LoadConfig(url, kubeconfig, context string) (*rest.Config, error) { + if kubeconfig == "" { + kubeconfig = os.Getenv("KUBECONFIG") + } + + klog.V(5).Infof("Kubeconfig path %s\n", kubeconfig) + + // If we have an explicit indication of where the kubernetes config lives, read that. + if kubeconfig != "" { + if context == "" { + return clientcmd.BuildConfigFromFlags(url, kubeconfig) + } + + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, + &clientcmd.ConfigOverrides{ + CurrentContext: context, + }).ClientConfig() + } + + // If not, try the in-cluster config. + if c, err := rest.InClusterConfig(); err == nil { + return c, nil + } + + // If no in-cluster config, try the default location in the user's home directory. + if usr, err := user.Current(); err == nil { + klog.V(5).Infof( + "clientcmd.BuildConfigFromFlags for url %s using %s\n", + url, + filepath.Join(usr.HomeDir, ".kube", "config"), + ) + + if c, err := clientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { + return c, nil + } + } + + return nil, fmt.Errorf("could not create a valid kubeconfig") +} diff --git a/test/e2e/gatekeeper_controller_test.go b/test/e2e/gatekeeper_controller_test.go index d8b0ee9d..ac0c7cb1 100644 --- a/test/e2e/gatekeeper_controller_test.go +++ b/test/e2e/gatekeeper_controller_test.go @@ -102,11 +102,30 @@ var _ = Describe("Gatekeeper", func() { if err == nil { return false } + + return apierrors.IsNotFound(err) + }, deleteTimeout, pollInterval).Should(BeTrue()) + + Eventually(func() bool { + err := K8sClient.Get(ctx, auditName, &appsv1.Deployment{}) + if err == nil { + return false + } + + return apierrors.IsNotFound(err) + }, deleteTimeout, pollInterval).Should(BeTrue()) + + Eventually(func() bool { + err := K8sClient.Get(ctx, controllerManagerName, &appsv1.Deployment{}) + if err == nil { + return false + } + return apierrors.IsNotFound(err) }, deleteTimeout, pollInterval).Should(BeTrue()) }) - Describe("Overriding CR", func() { + Describe("Overriding CR", Ordered, func() { It("Creating an empty gatekeeper contains default values", func() { gatekeeper := emptyGatekeeper() err := loadGatekeeperFromFile(gatekeeper, "gatekeeper_empty.yaml") diff --git a/test/e2e/options.go b/test/e2e/options.go index 04bcaded..2ca05d98 100644 --- a/test/e2e/options.go +++ b/test/e2e/options.go @@ -23,10 +23,12 @@ import ( "github.com/gatekeeper/gatekeeper-operator/pkg/util" ) -var gatekeeperNamespace string -var pollInterval time.Duration -var timeout time.Duration -var deleteTimeout time.Duration +var ( + gatekeeperNamespace string + pollInterval time.Duration + timeout time.Duration + deleteTimeout time.Duration +) func init() { flag.StringVar(&gatekeeperNamespace, "namespace", util.DefaultGatekeeperNamespace, "The namespace to run tests") diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index de25a3f2..f09610c6 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -17,10 +17,20 @@ limitations under the License. package util import ( + "context" + "fmt" + "os/exec" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" admregv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" ) type defaultConfig struct { @@ -84,8 +94,69 @@ var DefaultDeployment = defaultConfig{ { Key: "kubernetes.io/metadata.name", Operator: metav1.LabelSelectorOpNotIn, - Values: []string{"mygatekeeper"}, + Values: []string{"gatekeeper-system"}, }, }, }, } + +// GetWithTimeout keeps polling to get the object for timeout seconds until wantFound is met +// (true for found, false for not found) +func GetWithTimeout( + clientHubDynamic dynamic.Interface, + gvr schema.GroupVersionResource, + name, namespace string, + wantFound bool, + timeout int, +) *unstructured.Unstructured { + if timeout < 1 { + timeout = 1 + } + + var obj *unstructured.Unstructured + + EventuallyWithOffset(1, func() error { + var err error + namespace := clientHubDynamic.Resource(gvr).Namespace(namespace) + + obj, err = namespace.Get(context.TODO(), name, metav1.GetOptions{}) + if wantFound && err != nil { + return err + } + + if !wantFound && err == nil { + return fmt.Errorf("expected to return IsNotFound error") + } + + if !wantFound && err != nil && !errors.IsNotFound(err) { + return err + } + + return nil + }, timeout, 1).Should(BeNil()) + + if wantFound { + return obj + } + + return nil +} + +// Kubectl execute kubectl cli +func Kubectl(args ...string) { + cmd := exec.Command("kubectl", args...) + + err := cmd.Start() + if err != nil { + Fail(fmt.Sprintf("Error: %v", err)) + } +} + +// KubectlWithOutput execute kubectl cli and return output and error +func KubectlWithOutput(args ...string) (string, error) { + output, err := exec.Command("kubectl", args...).CombinedOutput() + //nolint:forbidigo + fmt.Println(string(output)) + + return string(output), err +} diff --git a/test/resources/case1_audit_from_cache/constraint-ingress.yaml b/test/resources/case1_audit_from_cache/constraint-ingress.yaml new file mode 100644 index 00000000..085a73eb --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-ingress.yaml @@ -0,0 +1,9 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-ingress-deny +spec: + match: + kinds: + - apiGroups: ["networking.k8s.io"] + kinds: ["Ingress"] \ No newline at end of file diff --git a/test/resources/case1_audit_from_cache/constraint-pod-2.yaml b/test/resources/case1_audit_from_cache/constraint-pod-2.yaml new file mode 100644 index 00000000..42547aa9 --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-pod-2.yaml @@ -0,0 +1,10 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-pod-deny-2 +spec: + match: + excludedNamespaces: ["case1-allow"] + kinds: + - apiGroups: [""] + kinds: ["Pod"] \ No newline at end of file diff --git a/test/resources/case1_audit_from_cache/constraint-pod.yaml b/test/resources/case1_audit_from_cache/constraint-pod.yaml new file mode 100644 index 00000000..6350dac2 --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-pod.yaml @@ -0,0 +1,10 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-pod-deny +spec: + match: + excludedNamespaces: ["case1-allow"] + kinds: + - apiGroups: [""] + kinds: ["Pod"] \ No newline at end of file diff --git a/test/resources/case1_audit_from_cache/constraint-storageclass.yaml b/test/resources/case1_audit_from_cache/constraint-storageclass.yaml new file mode 100644 index 00000000..711a56ee --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-storageclass.yaml @@ -0,0 +1,9 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-storageclass-deny +spec: + match: + kinds: + - apiGroups: ["storage.k8s.io"] + kinds: ["StorageClass"] \ No newline at end of file diff --git a/test/resources/case1_audit_from_cache/constraint-update-change.yaml b/test/resources/case1_audit_from_cache/constraint-update-change.yaml new file mode 100644 index 00000000..a9e12b75 --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-update-change.yaml @@ -0,0 +1,13 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-update +spec: + match: + kinds: + - apiGroups: ["apps"] + kinds: ["StatefulSet"] + - apiGroups: ["batch"] + kinds: ["CronJob"] + - apiGroups: ["events.k8s.io"] + kinds: ["Event"] \ No newline at end of file diff --git a/test/resources/case1_audit_from_cache/constraint-update.yaml b/test/resources/case1_audit_from_cache/constraint-update.yaml new file mode 100644 index 00000000..60a13afc --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-update.yaml @@ -0,0 +1,9 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-update +spec: + match: + kinds: + - apiGroups: ["apps"] + kinds: ["StatefulSet"] diff --git a/test/resources/case1_audit_from_cache/constraint-wrong.yaml b/test/resources/case1_audit_from_cache/constraint-wrong.yaml new file mode 100644 index 00000000..080628c3 --- /dev/null +++ b/test/resources/case1_audit_from_cache/constraint-wrong.yaml @@ -0,0 +1,9 @@ +apiVersion: constraints.gatekeeper.sh/v1beta1 +kind: Case1Template +metadata: + name: case1-worng +spec: + match: + kinds: + - apiGroups: ["ohmyhappy.sad.io"] + kinds: ["alien"] diff --git a/test/resources/case1_audit_from_cache/gatekeeper.yaml b/test/resources/case1_audit_from_cache/gatekeeper.yaml new file mode 100644 index 00000000..bdfbe89b --- /dev/null +++ b/test/resources/case1_audit_from_cache/gatekeeper.yaml @@ -0,0 +1,11 @@ +apiVersion: operator.gatekeeper.sh/v1alpha1 +kind: Gatekeeper +metadata: + name: gatekeeper +spec: + # Add fields here + audit: + replicas: 2 + logLevel: DEBUG + auditInterval: 20s + auditFromCache: Automatic diff --git a/test/resources/case1_audit_from_cache/pod.yaml b/test/resources/case1_audit_from_cache/pod.yaml new file mode 100644 index 00000000..0c61ff47 --- /dev/null +++ b/test/resources/case1_audit_from_cache/pod.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: case1-pod +spec: + containers: + - name: nginx + image: nginx:1.14.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 \ No newline at end of file diff --git a/test/resources/case1_audit_from_cache/template.yaml b/test/resources/case1_audit_from_cache/template.yaml new file mode 100644 index 00000000..937be06c --- /dev/null +++ b/test/resources/case1_audit_from_cache/template.yaml @@ -0,0 +1,20 @@ +apiVersion: templates.gatekeeper.sh/v1 +kind: ConstraintTemplate +metadata: + name: case1template +spec: + crd: + spec: + names: + kind: Case1Template + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package case1template + + violation[{"msg": msg}] { + 1 > 0 + msg := sprintf("This is test %v",["hi"]) + }{ + msg := sprintf("cached data: %v",[data.inventory.namespace["case1-allow"]["v1"]["Pod"]]) + } \ No newline at end of file