diff --git a/changelog/fragments/1718953844-fix-namespace-filter-on-watchers.yaml b/changelog/fragments/1718953844-fix-namespace-filter-on-watchers.yaml new file mode 100644 index 00000000000..1ac979f9d8a --- /dev/null +++ b/changelog/fragments/1718953844-fix-namespace-filter-on-watchers.yaml @@ -0,0 +1,32 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix namespace filter on watchers started by pod and service eventer on kubernetes provider + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment. +#description: + +# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc. +component: elastic-agent + +# PR URL; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: https://github.com/owner/repo/1234 + +# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: https://github.com/owner/repo/1234 diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index 4722a6adc18..daf3423cd60 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -75,14 +75,14 @@ func NewPodEventer( var replicaSetWatcher, jobWatcher, namespaceWatcher, nodeWatcher kubernetes.Watcher - options := kubernetes.WatchOptions{ - SyncTimeout: cfg.SyncPeriod, - Node: cfg.Node, - } metaConf := cfg.AddResourceMetadata if metaConf.Node.Enabled() || cfg.Hints.Enabled { - nodeWatcher, err = kubernetes.NewNamedWatcher("agent-node", client, &kubernetes.Node{}, options, nil) + nodeWatcher, err = kubernetes.NewNamedWatcher("agent-node", client, &kubernetes.Node{}, kubernetes.WatchOptions{ + SyncTimeout: cfg.SyncPeriod, + Node: cfg.Node, + HonorReSyncs: true, + }, nil) if err != nil { logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Node{}, err) } @@ -90,20 +90,24 @@ func NewPodEventer( if metaConf.Namespace.Enabled() || cfg.Hints.Enabled { namespaceWatcher, err = kubernetes.NewNamedWatcher("agent-namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: cfg.SyncPeriod, + SyncTimeout: cfg.SyncPeriod, + Namespace: cfg.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err) } } - // Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belong to + // Resource is Pod, so we need to create watchers for Replicasets and Jobs that it might belong to // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod if metaConf.Deployment { replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: cfg.SyncPeriod, + SyncTimeout: cfg.SyncPeriod, + Namespace: cfg.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Namespace{}, err) @@ -111,7 +115,9 @@ func NewPodEventer( } if metaConf.CronJob { jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{ - SyncTimeout: cfg.SyncPeriod, + SyncTimeout: cfg.SyncPeriod, + Namespace: cfg.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err) diff --git a/internal/pkg/composable/providers/kubernetes/service.go b/internal/pkg/composable/providers/kubernetes/service.go index 96a93ed0c9a..665ed857e65 100644 --- a/internal/pkg/composable/providers/kubernetes/service.go +++ b/internal/pkg/composable/providers/kubernetes/service.go @@ -49,6 +49,7 @@ func NewServiceEventer( watcher, err := kubernetes.NewNamedWatcher("agent-service", client, &kubernetes.Service{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, + Namespace: cfg.Namespace, HonorReSyncs: true, }, nil) if err != nil { @@ -62,8 +63,9 @@ func NewServiceEventer( if metaConf.Namespace.Enabled() || cfg.Hints.Enabled { namespaceWatcher, err = kubernetes.NewNamedWatcher("agent-namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: cfg.SyncPeriod, - Namespace: cfg.Namespace, + SyncTimeout: cfg.SyncPeriod, + Namespace: cfg.Namespace, + HonorReSyncs: true, }, nil) if err != nil { return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Namespace{}, err)