diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 19b746796f82..2a648eb71d95 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -74,6 +74,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Affecting all Beats* +- Fix `namespace` filter option on `add_kubernetes_metadata` processor. {pull}39934[39934] - Support for multiline zookeeper logs {issue}2496[2496] - Add checks to ensure reloading of units if the configuration actually changed. {pull}34346[34346] - Fix namespacing on self-monitoring {pull}32336[32336] @@ -152,6 +153,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Metricbeat* +- Fix `namespace` filter option on metricset `state_namespace` enricher. {pull}39934[39934] - Fix `namespace` filter option at Kubernetes provider level. {pull}39881[39881] - Fix Azure Monitor 429 error by causing metricbeat to retry the request again. {pull}38294[38294] - Fix fields not being parsed correctly in postgresql/database {issue}25301[25301] {pull}37720[37720] diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index f9143cdf289b..e8600b6d85c5 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -187,9 +187,10 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { } watcher, err := kubernetes.NewNamedWatcher("add_kubernetes_metadata_pod", client, &kubernetes.Pod{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Node: config.Node, - Namespace: config.Namespace, + SyncTimeout: config.SyncPeriod, + Node: config.Node, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { k.log.Errorf("Couldn't create kubernetes watcher for %T", &kubernetes.Pod{}) @@ -198,14 +199,12 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { metaConf := config.AddResourceMetadata - options := kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Node: config.Node, - Namespace: config.Namespace, - } - if metaConf.Node.Enabled() { - nodeWatcher, err = kubernetes.NewNamedWatcher("add_kubernetes_metadata_node", client, &kubernetes.Node{}, options, nil) + nodeWatcher, err = kubernetes.NewNamedWatcher("add_kubernetes_metadata_node", client, &kubernetes.Node{}, kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Node: config.Node, + HonorReSyncs: true, + }, nil) if err != nil { k.log.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Node{}, err) } @@ -213,20 +212,24 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { if metaConf.Namespace.Enabled() { namespaceWatcher, err = kubernetes.NewNamedWatcher("add_kubernetes_metadata_namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { k.log.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err) } } - // Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to + // Resource is Pod, so we need to create watchers for Replicasets and Jobs that it might belong to // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod if metaConf.Deployment { replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { k.log.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err) @@ -235,7 +238,9 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { } if metaConf.CronJob { jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { k.log.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index a0c409ca14e3..f94d424ec0e4 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -29,13 +29,14 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" - kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + + kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" + "github.com/elastic/beats/v7/metricbeat/mb" ) type kubernetesConfig struct { @@ -281,8 +282,7 @@ func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient. } func isNamespaced(resourceName string) bool { - if resourceName == NodeResource || resourceName == PersistentVolumeResource || resourceName == StorageClassResource || - resourceName == NamespaceResource { + if resourceName == NodeResource || resourceName == PersistentVolumeResource || resourceName == StorageClassResource { return false } return true @@ -604,11 +604,11 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - // updateFunc to be used as the resource watcher's add and update handler. + // updateFunc to be used as the resource watchers add and update handler. // The handler function is executed when a watcher is triggered(i.e. new/updated resource). - // It is responsible for generating the metadata for a detected resource by executing the metadata generator's Generate method. + // It is responsible for generating the metadata for a detected resource by executing the metadata generators Generate method. // It is a common handler for all resource watchers. The kind of resource(e.g. pod or deployment) is checked inside the function. - // It returns a map of a resourse identifier(i.e namespace-resource_name) as key and the metadata as value. + // It returns a map of a resource identifier(i.e. namespace-resource_name) as key and the metadata as value. updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := accessor.GetName() @@ -691,7 +691,7 @@ func NewResourceMetadataEnricher( indexFunc := func(e mapstr.M) string { name := getString(e, "name") namespace := getString(e, mb.ModuleDataKey+".namespace") - id := "" + var id string if name != "" && namespace != "" { id = join(namespace, name) } else if namespace != "" {