Skip to content

Commit

Permalink
[add_kubernetes_metadata][Metricset state_namespace] Fix namespace fi…
Browse files Browse the repository at this point in the history
…lter (#39934)

* Fix namespace filter.

Signed-off-by: constanca <[email protected]>

* Fix namespace filter.

Signed-off-by: constanca <[email protected]>

* Fix namespace filter.

Signed-off-by: constanca <[email protected]>

* Fix namespace filter.

Signed-off-by: constanca <[email protected]>

* Ignore lint error on metagen variables

Signed-off-by: constanca <[email protected]>

* make update

Signed-off-by: constanca <[email protected]>

* lint errors

Signed-off-by: constanca <[email protected]>

* lint errors

Signed-off-by: constanca <[email protected]>

* Update changelog

Signed-off-by: constanca <[email protected]>

---------

Signed-off-by: constanca <[email protected]>
  • Loading branch information
constanca-m authored Jun 26, 2024
1 parent 463dc4e commit 7ec2dd8
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 22 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.next.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]

*Affecting all Beats*

- Fix `namespace` filter option on `add_kubernetes_metadata` processor. {pull}39934[39934]
- Support for multiline zookeeper logs {issue}2496[2496]
- Add checks to ensure reloading of units if the configuration actually changed. {pull}34346[34346]
- Fix namespacing on self-monitoring {pull}32336[32336]
Expand Down Expand Up @@ -152,6 +153,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]

*Metricbeat*

- Fix `namespace` filter option on metricset `state_namespace` enricher. {pull}39934[39934]
- Fix `namespace` filter option at Kubernetes provider level. {pull}39881[39881]
- Fix Azure Monitor 429 error by causing metricbeat to retry the request again. {pull}38294[38294]
- Fix fields not being parsed correctly in postgresql/database {issue}25301[25301] {pull}37720[37720]
Expand Down
33 changes: 19 additions & 14 deletions libbeat/processors/add_kubernetes_metadata/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,9 +187,10 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) {
}

watcher, err := kubernetes.NewNamedWatcher("add_kubernetes_metadata_pod", client, &kubernetes.Pod{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Node: config.Node,
Namespace: config.Namespace,
SyncTimeout: config.SyncPeriod,
Node: config.Node,
Namespace: config.Namespace,
HonorReSyncs: true,
}, nil)
if err != nil {
k.log.Errorf("Couldn't create kubernetes watcher for %T", &kubernetes.Pod{})
Expand All @@ -198,35 +199,37 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) {

metaConf := config.AddResourceMetadata

options := kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Node: config.Node,
Namespace: config.Namespace,
}

if metaConf.Node.Enabled() {
nodeWatcher, err = kubernetes.NewNamedWatcher("add_kubernetes_metadata_node", client, &kubernetes.Node{}, options, nil)
nodeWatcher, err = kubernetes.NewNamedWatcher("add_kubernetes_metadata_node", client, &kubernetes.Node{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
Node: config.Node,
HonorReSyncs: true,
}, nil)
if err != nil {
k.log.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Node{}, err)
}
}

if metaConf.Namespace.Enabled() {
namespaceWatcher, err = kubernetes.NewNamedWatcher("add_kubernetes_metadata_namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
SyncTimeout: config.SyncPeriod,
Namespace: config.Namespace,
HonorReSyncs: true,
}, nil)
if err != nil {
k.log.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err)
}
}

// Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to
// Resource is Pod, so we need to create watchers for Replicasets and Jobs that it might belong to
// in order to be able to retrieve 2nd layer Owner metadata like in case of:
// Deployment -> Replicaset -> Pod
// CronJob -> job -> Pod
if metaConf.Deployment {
replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
SyncTimeout: config.SyncPeriod,
Namespace: config.Namespace,
HonorReSyncs: true,
}, nil)
if err != nil {
k.log.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err)
Expand All @@ -235,7 +238,9 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) {
}
if metaConf.CronJob {
jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{
SyncTimeout: config.SyncPeriod,
SyncTimeout: config.SyncPeriod,
Namespace: config.Namespace,
HonorReSyncs: true,
}, nil)
if err != nil {
k.log.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err)
Expand Down
16 changes: 8 additions & 8 deletions metricbeat/module/kubernetes/util/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,14 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"

kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes"
"github.com/elastic/beats/v7/metricbeat/mb"
"github.com/elastic/elastic-agent-autodiscover/kubernetes"
"github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata"
conf "github.com/elastic/elastic-agent-libs/config"
"github.com/elastic/elastic-agent-libs/logp"
"github.com/elastic/elastic-agent-libs/mapstr"

kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes"
"github.com/elastic/beats/v7/metricbeat/mb"
)

type kubernetesConfig struct {
Expand Down Expand Up @@ -281,8 +282,7 @@ func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient.
}

func isNamespaced(resourceName string) bool {
if resourceName == NodeResource || resourceName == PersistentVolumeResource || resourceName == StorageClassResource ||
resourceName == NamespaceResource {
if resourceName == NodeResource || resourceName == PersistentVolumeResource || resourceName == StorageClassResource {
return false
}
return true
Expand Down Expand Up @@ -604,11 +604,11 @@ func NewResourceMetadataEnricher(
return &nilEnricher{}
}

// updateFunc to be used as the resource watcher's add and update handler.
// updateFunc to be used as the resource watchers add and update handler.
// The handler function is executed when a watcher is triggered(i.e. new/updated resource).
// It is responsible for generating the metadata for a detected resource by executing the metadata generator's Generate method.
// It is responsible for generating the metadata for a detected resource by executing the metadata generators Generate method.
// It is a common handler for all resource watchers. The kind of resource(e.g. pod or deployment) is checked inside the function.
// It returns a map of a resourse identifier(i.e namespace-resource_name) as key and the metadata as value.
// It returns a map of a resource identifier(i.e. namespace-resource_name) as key and the metadata as value.
updateFunc := func(r kubernetes.Resource) map[string]mapstr.M {
accessor, _ := meta.Accessor(r)
id := accessor.GetName()
Expand Down Expand Up @@ -691,7 +691,7 @@ func NewResourceMetadataEnricher(
indexFunc := func(e mapstr.M) string {
name := getString(e, "name")
namespace := getString(e, mb.ModuleDataKey+".namespace")
id := ""
var id string
if name != "" && namespace != "" {
id = join(namespace, name)
} else if namespace != "" {
Expand Down

0 comments on commit 7ec2dd8

Please sign in to comment.