From cc81fe7c86cb695fbbf3f0c7765a07daf7ed3a51 Mon Sep 17 00:00:00 2001 From: Dzmitry Pasiukevich Date: Thu, 18 Jul 2019 11:34:44 +0300 Subject: [PATCH] get data from multiple clusters --- .promu.yml | 2 +- Dockerfile | 6 +- collector/cluster_health.go | 40 ++--- collector/cluster_settings.go | 21 ++- collector/const_labels.go | 15 ++ collector/indices.go | 162 ++++++++++---------- collector/indices_settings.go | 21 ++- collector/nodes.go | 268 +++++++++++++++++---------------- collector/snapshots.go | 34 +++-- main.go | 104 +++++++------ pkg/clusterinfo/clusterinfo.go | 25 ++- vendor/vendor.json | 2 +- 12 files changed, 381 insertions(+), 319 deletions(-) create mode 100644 collector/const_labels.go diff --git a/.promu.yml b/.promu.yml index 9a17044b..a05c9318 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: cgo: false repository: - path: github.com/justwatchcom/elasticsearch_exporter + path: github.com/gojuno/elasticsearch_exporter build: flags: -a -tags netgo ldflags: | diff --git a/Dockerfile b/Dockerfile index 5ba6f65a..0f71b238 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,14 @@ FROM quay.io/prometheus/golang-builder as builder -ADD . /go/src/github.com/justwatchcom/elasticsearch_exporter -WORKDIR /go/src/github.com/justwatchcom/elasticsearch_exporter +ADD . /go/src/github.com/gojuno/elasticsearch_exporter +WORKDIR /go/src/github.com/gojuno/elasticsearch_exporter RUN make FROM quay.io/prometheus/busybox:latest MAINTAINER The Prometheus Authors -COPY --from=builder /go/src/github.com/justwatchcom/elasticsearch_exporter/elasticsearch_exporter /bin/elasticsearch_exporter +COPY --from=builder /go/src/github.com/gojuno/elasticsearch_exporter/elasticsearch_exporter /bin/elasticsearch_exporter EXPOSE 9114 ENTRYPOINT [ "/bin/elasticsearch_exporter" ] diff --git a/collector/cluster_health.go b/collector/cluster_health.go index 5321adce..ec665f24 100644 --- a/collector/cluster_health.go +++ b/collector/cluster_health.go @@ -50,6 +50,7 @@ type ClusterHealth struct { // NewClusterHealth returns a new Collector exposing ClusterHealth stats. func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *ClusterHealth { subsystem := "cluster_health" + constLabels := ConstLabelsFromURL(url) return &ClusterHealth{ logger: logger, @@ -57,16 +58,19 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu url: url, up: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "up"), - Help: "Was the last scrape of the ElasticSearch cluster health endpoint successful.", + Name: prometheus.BuildFQName(namespace, subsystem, "up"), + Help: "Was the last scrape of the ElasticSearch cluster health endpoint successful.", + ConstLabels: constLabels, }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "total_scrapes"), - Help: "Current total ElasticSearch cluster health scrapes.", + Name: prometheus.BuildFQName(namespace, subsystem, "total_scrapes"), + Help: "Current total ElasticSearch cluster health scrapes.", + ConstLabels: constLabels, }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "json_parse_failures"), - Help: "Number of errors while parsing JSON.", + Name: prometheus.BuildFQName(namespace, subsystem, "json_parse_failures"), + Help: "Number of errors while parsing JSON.", + ConstLabels: constLabels, }), metrics: []*clusterHealthMetric{ @@ -75,7 +79,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "active_primary_shards"), "The number of primary shards in your cluster. This is an aggregate total across all indices.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.ActivePrimaryShards) @@ -86,7 +90,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "active_shards"), "Aggregate total of all shards across all indices, which includes replica shards.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.ActiveShards) @@ -97,7 +101,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "delayed_unassigned_shards"), "Shards delayed to reduce reallocation overhead", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.DelayedUnassignedShards) @@ -108,7 +112,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "initializing_shards"), "Count of shards that are being freshly created.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.InitializingShards) @@ -119,7 +123,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "number_of_data_nodes"), "Number of data nodes in the cluster.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.NumberOfDataNodes) @@ -130,7 +134,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "number_of_in_flight_fetch"), "The number of ongoing shard info requests.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.NumberOfInFlightFetch) @@ -141,7 +145,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "task_max_waiting_in_queue_millis"), "Tasks max time waiting in queue.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.TaskMaxWaitingInQueueMillis) @@ -152,7 +156,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "number_of_nodes"), "Number of nodes in the cluster.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.NumberOfNodes) @@ -163,7 +167,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "number_of_pending_tasks"), "Cluster level changes which have not yet been executed", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.NumberOfPendingTasks) @@ -174,7 +178,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "relocating_shards"), "The number of shards that are currently moving from one node to another node.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.RelocatingShards) @@ -185,7 +189,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "unassigned_shards"), "The number of shards that exist in the cluster state, but cannot be found in the cluster itself.", - defaultClusterHealthLabels, nil, + defaultClusterHealthLabels, constLabels, ), Value: func(clusterHealth clusterHealthResponse) float64 { return float64(clusterHealth.UnassignedShards) @@ -197,7 +201,7 @@ func NewClusterHealth(logger log.Logger, client *http.Client, url *url.URL) *Clu Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "status"), "Whether all primary and replica shards are allocated.", - []string{"cluster", "color"}, nil, + []string{"cluster", "color"}, constLabels, ), Value: func(clusterHealth clusterHealthResponse, color string) float64 { if clusterHealth.Status == color { diff --git a/collector/cluster_settings.go b/collector/cluster_settings.go index 978abddd..70ecc35b 100644 --- a/collector/cluster_settings.go +++ b/collector/cluster_settings.go @@ -26,26 +26,31 @@ type ClusterSettings struct { // NewClusterSettings defines Cluster Settings Prometheus metrics func NewClusterSettings(logger log.Logger, client *http.Client, url *url.URL) *ClusterSettings { + constLabels := ConstLabelsFromURL(url) return &ClusterSettings{ logger: logger, client: client, url: url, up: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "up"), - Help: "Was the last scrape of the ElasticSearch cluster settings endpoint successful.", + Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "up"), + Help: "Was the last scrape of the ElasticSearch cluster settings endpoint successful.", + ConstLabels: constLabels, }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "total_scrapes"), - Help: "Current total ElasticSearch cluster settings scrapes.", + Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "total_scrapes"), + Help: "Current total ElasticSearch cluster settings scrapes.", + ConstLabels: constLabels, }), shardAllocationEnabled: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "shard_allocation_enabled"), - Help: "Current mode of cluster wide shard routing allocation settings.", + Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "shard_allocation_enabled"), + Help: "Current mode of cluster wide shard routing allocation settings.", + ConstLabels: constLabels, }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "json_parse_failures"), - Help: "Number of errors while parsing JSON.", + Name: prometheus.BuildFQName(namespace, "clustersettings_stats", "json_parse_failures"), + Help: "Number of errors while parsing JSON.", + ConstLabels: constLabels, }), } } diff --git a/collector/const_labels.go b/collector/const_labels.go new file mode 100644 index 00000000..05801fbb --- /dev/null +++ b/collector/const_labels.go @@ -0,0 +1,15 @@ +package collector + +import ( + "net/url" + + "github.com/prometheus/client_golang/prometheus" +) + +func ConstLabelsFromURL(url *url.URL) prometheus.Labels { + u := *url + u.User = nil + return map[string]string{ + "cluster_url": u.String(), + } +} diff --git a/collector/indices.go b/collector/indices.go index d71b07b1..51eac9c6 100644 --- a/collector/indices.go +++ b/collector/indices.go @@ -9,7 +9,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/justwatchcom/elasticsearch_exporter/pkg/clusterinfo" + "github.com/gojuno/elasticsearch_exporter/pkg/clusterinfo" "github.com/prometheus/client_golang/prometheus" ) @@ -51,6 +51,7 @@ type Indices struct { // NewIndices defines Indices Prometheus metrics func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards bool) *Indices { + constLabels := ConstLabelsFromURL(url) indexLabels := labels{ keys: func(...string) []string { @@ -91,16 +92,19 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo }, up: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "index_stats", "up"), - Help: "Was the last scrape of the ElasticSearch index endpoint successful.", + Name: prometheus.BuildFQName(namespace, "index_stats", "up"), + Help: "Was the last scrape of the ElasticSearch index endpoint successful.", + ConstLabels: constLabels, }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "index_stats", "total_scrapes"), - Help: "Current total ElasticSearch index scrapes.", + Name: prometheus.BuildFQName(namespace, "index_stats", "total_scrapes"), + Help: "Current total ElasticSearch index scrapes.", + ConstLabels: constLabels, }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "index_stats", "json_parse_failures"), - Help: "Number of errors while parsing JSON.", + Name: prometheus.BuildFQName(namespace, "index_stats", "json_parse_failures"), + Help: "Number of errors while parsing JSON.", + ConstLabels: constLabels, }), indexMetrics: []*indexMetric{ @@ -109,7 +113,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "docs_primary"), "Count of documents with only primary shards", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Docs.Count) @@ -121,7 +125,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "deleted_docs_primary"), "Count of deleted documents with only primary shards", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Docs.Deleted) @@ -133,7 +137,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "docs_total"), "Total count of documents", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Docs.Count) @@ -145,7 +149,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "deleted_docs_total"), "Total count of deleted documents", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Docs.Deleted) @@ -157,7 +161,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "store_size_bytes_primary"), "Current total size of stored index data in bytes with only primary shards on all nodes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Store.SizeInBytes) @@ -169,7 +173,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "store_size_bytes_total"), "Current total size of stored index data in bytes with all shards on all nodes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Store.SizeInBytes) @@ -181,7 +185,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_count_primary"), "Current number of segments with only primary shards on all nodes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.Count) @@ -193,7 +197,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_count_total"), "Current number of segments with all shards on all nodes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.Count) @@ -205,7 +209,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_memory_bytes_primary"), "Current size of segments with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.MemoryInBytes) @@ -217,7 +221,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_memory_bytes_total"), "Current size of segments with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.MemoryInBytes) @@ -229,7 +233,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_terms_memory_primary"), "Current size of terms with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.TermsMemoryInBytes) @@ -241,7 +245,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_terms_memory_total"), "Current number of terms with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.TermsMemoryInBytes) @@ -253,7 +257,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_fields_memory_bytes_primary"), "Current size of fields with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.StoredFieldsMemoryInBytes) @@ -265,7 +269,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_fields_memory_bytes_total"), "Current size of fields with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.StoredFieldsMemoryInBytes) @@ -277,7 +281,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_term_vectors_memory_primary_bytes"), "Current size of term vectors with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.TermVectorsMemoryInBytes) @@ -289,7 +293,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_term_vectors_memory_total_bytes"), "Current size of term vectors with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.TermVectorsMemoryInBytes) @@ -301,7 +305,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_norms_memory_bytes_primary"), "Current size of norms with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.NormsMemoryInBytes) @@ -313,7 +317,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_norms_memory_bytes_total"), "Current size of norms with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.NormsMemoryInBytes) @@ -325,7 +329,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_points_memory_bytes_primary"), "Current size of points with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.PointsMemoryInBytes) @@ -337,7 +341,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_points_memory_bytes_total"), "Current size of points with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.PointsMemoryInBytes) @@ -349,7 +353,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_doc_values_memory_bytes_primary"), "Current size of doc values with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.DocValuesMemoryInBytes) @@ -361,7 +365,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_doc_values_memory_bytes_total"), "Current size of doc values with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.DocValuesMemoryInBytes) @@ -373,7 +377,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_index_writer_memory_bytes_primary"), "Current size of index writer with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.IndexWriterMemoryInBytes) @@ -385,7 +389,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_index_writer_memory_bytes_total"), "Current size of index writer with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.IndexWriterMemoryInBytes) @@ -397,7 +401,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_version_map_memory_bytes_primary"), "Current size of version map with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.VersionMapMemoryInBytes) @@ -409,7 +413,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_version_map_memory_bytes_total"), "Current size of version map with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.VersionMapMemoryInBytes) @@ -421,7 +425,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_fixed_bit_set_memory_bytes_primary"), "Current size of fixed bit with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Segments.FixedBitSetMemoryInBytes) @@ -433,7 +437,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segment_fixed_bit_set_memory_bytes_total"), "Current size of fixed bit with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Segments.FixedBitSetMemoryInBytes) @@ -445,7 +449,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "completion_bytes_primary"), "Current size of completion with only primary shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Primaries.Completion.SizeInBytes) @@ -457,7 +461,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "completion_bytes_total"), "Current size of completion with all shards on all nodes in bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Completion.SizeInBytes) @@ -469,7 +473,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_query_time_seconds_total"), "Total search query time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.QueryTimeInMillis) / 1000 @@ -481,7 +485,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_query_total"), "Total number of queries", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.QueryTotal) @@ -493,7 +497,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_fetch_time_seconds_total"), "Total search fetch time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.FetchTimeInMillis) / 1000 @@ -505,7 +509,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_fetch_total"), "Total search fetch count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.FetchTotal) @@ -517,7 +521,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_scroll_time_seconds_total"), "Total search scroll time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.ScrollTimeInMillis) / 1000 @@ -529,7 +533,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_scroll_current"), "Current search scroll count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.ScrollCurrent) @@ -541,7 +545,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_scroll_total"), "Total search scroll count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.ScrollTotal) @@ -553,7 +557,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_suggest_time_seconds_total"), "Total search suggest time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.SuggestTimeInMillis) / 1000 @@ -565,7 +569,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "search_suggest_total"), "Total search suggest count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Search.SuggestTotal) @@ -577,7 +581,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "indexing_index_time_seconds_total"), "Total indexing index time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Indexing.IndexTimeInMillis) / 1000 @@ -589,7 +593,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "indexing_index_total"), "Total indexing index count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Indexing.IndexTotal) @@ -601,7 +605,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "indexing_delete_time_seconds_total"), "Total indexing delete time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Indexing.DeleteTimeInMillis) / 1000 @@ -613,7 +617,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "indexing_delete_total"), "Total indexing delete count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Indexing.DeleteTotal) @@ -625,7 +629,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "indexing_noop_update_total"), "Total indexing no-op update count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Indexing.NoopUpdateTotal) @@ -637,7 +641,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "indexing_throttle_time_seconds_total"), "Total indexing throttle time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Indexing.ThrottleTimeInMillis) / 1000 @@ -649,7 +653,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "get_time_seconds_total"), "Total get time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Get.TimeInMillis) / 1000 @@ -661,7 +665,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "get_total"), "Total get count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Get.Total) @@ -673,7 +677,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "merge_time_seconds_total"), "Total merge time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Merges.TotalTimeInMillis) / 1000 @@ -685,7 +689,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "merge_total"), "Total merge count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Merges.Total) @@ -697,7 +701,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "merge_throttle_time_seconds_total"), "Total merge I/O throttle time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Merges.TotalThrottledTimeInMillis) / 1000 @@ -709,7 +713,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "merge_stopped_time_seconds_total"), "Total large merge stopped time in seconds, allowing smaller merges to complete", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Merges.TotalStoppedTimeInMillis) / 1000 @@ -721,7 +725,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "merge_auto_throttle_bytes_total"), "Total bytes that were auto-throttled during merging", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Merges.TotalAutoThrottleInBytes) @@ -733,7 +737,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "refresh_time_seconds_total"), "Total refresh time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Refresh.TotalTimeInMillis) / 1000 @@ -745,7 +749,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "refresh_total"), "Total refresh count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Refresh.Total) @@ -757,7 +761,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "flush_time_seconds_total"), "Total flush time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Flush.TotalTimeInMillis) / 1000 @@ -769,7 +773,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "flush_total"), "Total flush count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Flush.Total) @@ -781,7 +785,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "warmer_time_seconds_total"), "Total warmer time in seconds", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Warmer.TotalTimeInMillis) / 1000 @@ -793,7 +797,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "warmer_total"), "Total warmer count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Warmer.Total) @@ -805,7 +809,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "query_cache_memory_bytes_total"), "Total query cache memory bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.QueryCache.MemorySizeInBytes) @@ -817,7 +821,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "query_cache_size"), "Total query cache size", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.QueryCache.CacheSize) @@ -829,7 +833,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "query_cache_hits_total"), "Total query cache hits count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.QueryCache.HitCount) @@ -841,7 +845,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "query_cache_misses_total"), "Total query cache misses count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.QueryCache.MissCount) @@ -853,7 +857,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "query_cache_caches_total"), "Total query cache caches count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.QueryCache.CacheCount) @@ -865,7 +869,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "query_cache_evictions_total"), "Total query cache evictions count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.QueryCache.Evictions) @@ -877,7 +881,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "request_cache_memory_bytes_total"), "Total request cache memory bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.RequestCache.MemorySizeInBytes) @@ -889,7 +893,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "request_cache_hits_total"), "Total request cache hits count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.RequestCache.HitCount) @@ -901,7 +905,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "request_cache_misses_total"), "Total request cache misses count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.RequestCache.MissCount) @@ -913,7 +917,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "request_cache_evictions_total"), "Total request cache evictions count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.RequestCache.Evictions) @@ -925,7 +929,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "fielddata_memory_bytes_total"), "Total fielddata memory bytes", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Fielddata.MemorySizeInBytes) @@ -937,7 +941,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "index_stats", "fielddata_evictions_total"), "Total fielddata evictions count", - indexLabels.keys(), nil, + indexLabels.keys(), constLabels, ), Value: func(indexStats IndexStatsIndexResponse) float64 { return float64(indexStats.Total.Fielddata.Evictions) @@ -951,7 +955,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "shared_docs"), "Count of documents on this shard", - shardLabels.keys(), nil, + shardLabels.keys(), constLabels, ), Value: func(data IndexStatsIndexShardsDetailResponse) float64 { return float64(data.Docs.Count) @@ -963,7 +967,7 @@ func NewIndices(logger log.Logger, client *http.Client, url *url.URL, shards boo Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "shards_docs_deleted"), "Count of deleted documents on this shard", - shardLabels.keys(), nil, + shardLabels.keys(), constLabels, ), Value: func(data IndexStatsIndexShardsDetailResponse) float64 { return float64(data.Docs.Deleted) diff --git a/collector/indices_settings.go b/collector/indices_settings.go index 7fce2a68..9da2c7f0 100644 --- a/collector/indices_settings.go +++ b/collector/indices_settings.go @@ -25,26 +25,31 @@ type IndicesSettings struct { // NewIndicesSettings defines Indices Settings Prometheus metrics func NewIndicesSettings(logger log.Logger, client *http.Client, url *url.URL) *IndicesSettings { + constLabels := ConstLabelsFromURL(url) return &IndicesSettings{ logger: logger, client: client, url: url, up: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "up"), - Help: "Was the last scrape of the ElasticSearch Indices Settings endpoint successful.", + Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "up"), + Help: "Was the last scrape of the ElasticSearch Indices Settings endpoint successful.", + ConstLabels: constLabels, }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "total_scrapes"), - Help: "Current total ElasticSearch Indices Settings scrapes.", + Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "total_scrapes"), + Help: "Current total ElasticSearch Indices Settings scrapes.", + ConstLabels: constLabels, }), readOnlyIndices: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "read_only_indices"), - Help: "Current number of read only indices within cluster", + Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "read_only_indices"), + Help: "Current number of read only indices within cluster", + ConstLabels: constLabels, }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "json_parse_failures"), - Help: "Number of errors while parsing JSON.", + Name: prometheus.BuildFQName(namespace, "indices_settings_stats", "json_parse_failures"), + Help: "Number of errors while parsing JSON.", + ConstLabels: constLabels, }), } } diff --git a/collector/nodes.go b/collector/nodes.go index 43ffa0e8..0c115924 100644 --- a/collector/nodes.go +++ b/collector/nodes.go @@ -170,6 +170,7 @@ type Nodes struct { // NewNodes defines Nodes Prometheus metrics func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, node string) *Nodes { + constLabels := ConstLabelsFromURL(url) return &Nodes{ logger: logger, client: client, @@ -178,16 +179,19 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no node: node, up: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "node_stats", "up"), - Help: "Was the last scrape of the ElasticSearch nodes endpoint successful.", + Name: prometheus.BuildFQName(namespace, "node_stats", "up"), + Help: "Was the last scrape of the ElasticSearch nodes endpoint successful.", + ConstLabels: constLabels, }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "node_stats", "total_scrapes"), - Help: "Current total ElasticSearch node scrapes.", + Name: prometheus.BuildFQName(namespace, "node_stats", "total_scrapes"), + Help: "Current total ElasticSearch node scrapes.", + ConstLabels: constLabels, }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "node_stats", "json_parse_failures"), - Help: "Number of errors while parsing JSON.", + Name: prometheus.BuildFQName(namespace, "node_stats", "json_parse_failures"), + Help: "Number of errors while parsing JSON.", + ConstLabels: constLabels, }), nodeMetrics: []*nodeMetric{ @@ -196,7 +200,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "load1"), "Shortterm load average", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return node.OS.CPU.LoadAvg.Load1 @@ -208,7 +212,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "load5"), "Midterm load average", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return node.OS.CPU.LoadAvg.Load5 @@ -220,7 +224,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "load15"), "Longterm load average", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return node.OS.CPU.LoadAvg.Load15 @@ -232,7 +236,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "cpu_percent"), "Percent CPU used by OS", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.OS.CPU.Percent) @@ -244,7 +248,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "mem_free_bytes"), "Amount of free physical memory in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.OS.Mem.Free) @@ -256,7 +260,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "mem_used_bytes"), "Amount of used physical memory in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.OS.Mem.Used) @@ -268,7 +272,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "mem_actual_free_bytes"), "Amount of free physical memory in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.OS.Mem.ActualFree) @@ -280,7 +284,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "mem_actual_used_bytes"), "Amount of used physical memory in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.OS.Mem.ActualUsed) @@ -292,7 +296,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "fielddata_memory_size_bytes"), "Field data cache memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.FieldData.MemorySize) @@ -304,7 +308,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "fielddata_evictions"), "Evictions from field data", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.FieldData.Evictions) @@ -316,7 +320,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "completion_size_in_bytes"), "Completion in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Completion.Size) @@ -328,7 +332,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "filter_cache_memory_size_bytes"), "Filter cache memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.FilterCache.MemorySize) @@ -340,7 +344,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "filter_cache_evictions"), "Evictions from filter cache", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.FilterCache.Evictions) @@ -352,7 +356,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_cache_memory_size_bytes"), "Query cache memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.MemorySize) @@ -364,7 +368,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_cache_evictions"), "Evictions from query cache", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.Evictions) @@ -376,7 +380,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_cache_total"), "Query cache total count", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.TotalCount) @@ -388,7 +392,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_cache_cache_size"), "Query cache cache size", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.CacheSize) @@ -400,7 +404,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_cache_cache_total"), "Query cache cache count", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.CacheCount) @@ -412,7 +416,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_cache_count"), "Query cache count", - defaultCacheLabels, nil, + defaultCacheLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.HitCount) @@ -424,7 +428,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "query_miss_count"), "Query miss count", - defaultCacheLabels, nil, + defaultCacheLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.QueryCache.MissCount) @@ -436,7 +440,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "request_cache_memory_size_bytes"), "Request cache memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.RequestCache.MemorySize) @@ -448,7 +452,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "request_cache_evictions"), "Evictions from request cache", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.RequestCache.Evictions) @@ -460,7 +464,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "request_cache_count"), "Request cache count", - defaultCacheLabels, nil, + defaultCacheLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.RequestCache.HitCount) @@ -472,7 +476,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "request_miss_count"), "Request miss count", - defaultCacheLabels, nil, + defaultCacheLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.RequestCache.MissCount) @@ -484,7 +488,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "translog_operations"), "Total translog operations", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Translog.Operations) @@ -496,7 +500,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "translog_size_in_bytes"), "Total translog size in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Translog.Size) @@ -508,7 +512,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "get_time_seconds"), "Total get time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Get.Time) / 1000 @@ -520,7 +524,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "get_total"), "Total get", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Get.Total) @@ -532,7 +536,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "get_missing_time_seconds"), "Total time of get missing in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Get.MissingTime) / 1000 @@ -544,7 +548,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "get_missing_total"), "Total get missing", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Get.MissingTotal) @@ -556,7 +560,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "get_exists_time_seconds"), "Total time get exists in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Get.ExistsTime) / 1000 @@ -568,7 +572,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "get_exists_total"), "Total get exists operations", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Get.ExistsTotal) @@ -580,7 +584,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_refresh", "time_seconds_total"), "Total time spent refreshing in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Refresh.TotalTime) / 1000 @@ -592,7 +596,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_refresh", "total"), "Total refreshes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Refresh.Total) @@ -604,7 +608,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_query_time_seconds"), "Total search query time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.QueryTime) / 1000 @@ -616,7 +620,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_query_total"), "Total number of queries", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.QueryTotal) @@ -628,7 +632,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_fetch_time_seconds"), "Total search fetch time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.FetchTime) / 1000 @@ -640,7 +644,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_fetch_total"), "Total number of fetches", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.FetchTotal) @@ -652,7 +656,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_suggest_total"), "Total number of suggests", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.SuggestTotal) @@ -664,7 +668,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_suggest_time_seconds"), "Total suggest time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.SuggestTime) / 1000 @@ -676,7 +680,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_scroll_total"), "Total number of scrolls", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.ScrollTotal) @@ -688,7 +692,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "search_scroll_time_seconds"), "Total scroll time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Search.ScrollTime) / 1000 @@ -700,7 +704,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "docs"), "Count of documents on this node", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Docs.Count) @@ -712,7 +716,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "docs_deleted"), "Count of deleted documents on this node", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Docs.Deleted) @@ -724,7 +728,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "store_size_bytes"), "Current size of stored index data in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Store.Size) @@ -736,7 +740,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "store_throttle_time_seconds_total"), "Throttle time for index store in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Store.ThrottleTime) / 1000 @@ -748,7 +752,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_memory_bytes"), "Current memory size of segments in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.Memory) @@ -760,7 +764,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_count"), "Count of index segments on this node", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.Count) @@ -772,7 +776,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_terms_memory_in_bytes"), "Count of terms in memory for this node", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.TermsMemory) @@ -784,7 +788,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_index_writer_memory_in_bytes"), "Count of memory for index writer on this node", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.IndexWriterMemory) @@ -796,7 +800,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_norms_memory_in_bytes"), "Count of memory used by norms", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.NormsMemory) @@ -808,7 +812,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_stored_fields_memory_in_bytes"), "Count of stored fields memory", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.StoredFieldsMemory) @@ -820,7 +824,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_doc_values_memory_in_bytes"), "Count of doc values memory", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.DocValuesMemory) @@ -832,7 +836,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_fixed_bit_set_memory_in_bytes"), "Count of fixed bit set", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.FixedBitSet) @@ -844,7 +848,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_term_vectors_memory_in_bytes"), "Term vectors memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.TermVectorsMemory) @@ -856,7 +860,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_points_memory_in_bytes"), "Point values memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.PointsMemory) @@ -868,7 +872,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "segments_version_map_memory_in_bytes"), "Version map memory usage in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Segments.VersionMapMemory) @@ -880,7 +884,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "flush_total"), "Total flushes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Flush.Total) @@ -892,7 +896,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "flush_time_seconds"), "Cumulative flush time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Flush.Time) / 1000 @@ -904,7 +908,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "warmer_total"), "Total warmer count", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Warmer.Total) @@ -916,7 +920,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices", "warmer_time_seconds_total"), "Total warmer time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Warmer.TotalTime) / 1000 @@ -928,7 +932,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_indexing", "index_time_seconds_total"), "Cumulative index time in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Indexing.IndexTime) / 1000 @@ -940,7 +944,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_indexing", "index_total"), "Total index calls", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Indexing.IndexTotal) @@ -952,7 +956,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_indexing", "delete_time_seconds_total"), "Total time indexing delete in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Indexing.DeleteTime) / 1000 @@ -964,7 +968,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_indexing", "delete_total"), "Total indexing deletes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Indexing.DeleteTotal) @@ -976,7 +980,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_indexing", "is_throttled"), "Indexing throttling", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { if node.Indices.Indexing.IsThrottled { @@ -991,7 +995,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_indexing", "throttle_time_seconds_total"), "Cumulative indexing throttling time", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Indexing.ThrottleTime) / 1000 @@ -1003,7 +1007,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "total"), "Total merges", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.Total) @@ -1015,7 +1019,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "current"), "Current merges", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.Current) @@ -1027,7 +1031,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "current_size_in_bytes"), "Size of a current merges in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.CurrentSize) @@ -1039,7 +1043,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "docs_total"), "Cumulative docs merged", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.TotalDocs) @@ -1051,7 +1055,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "total_size_bytes_total"), "Total merge size in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.TotalSize) @@ -1063,7 +1067,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "total_time_seconds_total"), "Total time spent merging in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.TotalTime) / 1000 @@ -1075,7 +1079,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "indices_merges", "total_throttled_time_seconds_total"), "Total throttled time of merges in seconds", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Indices.Merges.TotalThrottledTime) / 1000 @@ -1087,7 +1091,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory", "used_bytes"), "JVM memory currently used by area", - append(defaultNodeLabels, "area"), nil, + append(defaultNodeLabels, "area"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.HeapUsed) @@ -1101,7 +1105,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory", "used_bytes"), "JVM memory currently used by area", - append(defaultNodeLabels, "area"), nil, + append(defaultNodeLabels, "area"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.NonHeapUsed) @@ -1115,7 +1119,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory", "max_bytes"), "JVM memory max", - append(defaultNodeLabels, "area"), nil, + append(defaultNodeLabels, "area"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.HeapMax) @@ -1129,7 +1133,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory", "committed_bytes"), "JVM memory currently committed by area", - append(defaultNodeLabels, "area"), nil, + append(defaultNodeLabels, "area"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.HeapCommitted) @@ -1143,7 +1147,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory", "committed_bytes"), "JVM memory currently committed by area", - append(defaultNodeLabels, "area"), nil, + append(defaultNodeLabels, "area"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.NonHeapCommitted) @@ -1157,7 +1161,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "used_bytes"), "JVM memory currently used by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["young"].Used) @@ -1171,7 +1175,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "max_bytes"), "JVM memory max by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["young"].Max) @@ -1185,7 +1189,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "peak_used_bytes"), "JVM memory peak used by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["young"].PeakUsed) @@ -1199,7 +1203,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "peak_max_bytes"), "JVM memory peak max by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["young"].PeakMax) @@ -1213,7 +1217,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "used_bytes"), "JVM memory currently used by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["survivor"].Used) @@ -1227,7 +1231,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "max_bytes"), "JVM memory max by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["survivor"].Max) @@ -1241,7 +1245,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "peak_used_bytes"), "JVM memory peak used by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["survivor"].PeakUsed) @@ -1255,7 +1259,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "peak_max_bytes"), "JVM memory peak max by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["survivor"].PeakMax) @@ -1269,7 +1273,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "used_bytes"), "JVM memory currently used by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["old"].Used) @@ -1283,7 +1287,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "max_bytes"), "JVM memory max by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["old"].Max) @@ -1297,7 +1301,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "peak_used_bytes"), "JVM memory peak used by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["old"].PeakUsed) @@ -1311,7 +1315,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_memory_pool", "peak_max_bytes"), "JVM memory peak max by pool", - append(defaultNodeLabels, "pool"), nil, + append(defaultNodeLabels, "pool"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.Mem.Pools["old"].PeakMax) @@ -1325,7 +1329,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_buffer_pool", "used_bytes"), "JVM buffer currently used", - append(defaultNodeLabels, "type"), nil, + append(defaultNodeLabels, "type"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.BufferPools["direct"].Used) @@ -1339,7 +1343,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_buffer_pool", "used_bytes"), "JVM buffer currently used", - append(defaultNodeLabels, "type"), nil, + append(defaultNodeLabels, "type"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.JVM.BufferPools["mapped"].Used) @@ -1353,7 +1357,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "cpu_percent"), "Percent CPU used by process", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.CPU.Percent) @@ -1365,7 +1369,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "mem_resident_size_bytes"), "Resident memory in use by process in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.Memory.Resident) @@ -1377,7 +1381,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "mem_share_size_bytes"), "Shared memory in use by process in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.Memory.Share) @@ -1389,7 +1393,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "mem_virtual_size_bytes"), "Total virtual memory used in bytes", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.Memory.TotalVirtual) @@ -1401,7 +1405,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "open_files_count"), "Open file descriptors", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.OpenFD) @@ -1413,7 +1417,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "max_files_descriptors"), "Max file descriptors", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.MaxFD) @@ -1425,7 +1429,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "cpu_time_seconds_sum"), "Process CPU time in seconds", - append(defaultNodeLabels, "type"), nil, + append(defaultNodeLabels, "type"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.CPU.Total) / 1000 @@ -1439,7 +1443,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "cpu_time_seconds_sum"), "Process CPU time in seconds", - append(defaultNodeLabels, "type"), nil, + append(defaultNodeLabels, "type"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.CPU.Sys) / 1000 @@ -1453,7 +1457,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "process", "cpu_time_seconds_sum"), "Process CPU time in seconds", - append(defaultNodeLabels, "type"), nil, + append(defaultNodeLabels, "type"), constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Process.CPU.User) / 1000 @@ -1467,7 +1471,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "transport", "rx_packets_total"), "Count of packets received", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Transport.RxCount) @@ -1479,7 +1483,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "transport", "rx_size_bytes_total"), "Total number of bytes received", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Transport.RxSize) @@ -1491,7 +1495,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "transport", "tx_packets_total"), "Count of packets sent", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Transport.TxCount) @@ -1503,7 +1507,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "transport", "tx_size_bytes_total"), "Total number of bytes sent", - defaultNodeLabels, nil, + defaultNodeLabels, constLabels, ), Value: func(node NodeStatsNodeResponse) float64 { return float64(node.Transport.TxSize) @@ -1517,7 +1521,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_gc", "collection_seconds_count"), "Count of JVM GC runs", - append(defaultNodeLabels, "gc"), nil, + append(defaultNodeLabels, "gc"), constLabels, ), Value: func(gcStats NodeStatsJVMGCCollectorResponse) float64 { return float64(gcStats.CollectionCount) @@ -1531,7 +1535,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "jvm_gc", "collection_seconds_sum"), "GC run time in seconds", - append(defaultNodeLabels, "gc"), nil, + append(defaultNodeLabels, "gc"), constLabels, ), Value: func(gcStats NodeStatsJVMGCCollectorResponse) float64 { return float64(gcStats.CollectionTime) / 1000 @@ -1547,7 +1551,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "breakers", "estimated_size_bytes"), "Estimated size in bytes of breaker", - defaultBreakerLabels, nil, + defaultBreakerLabels, constLabels, ), Value: func(breakerStats NodeStatsBreakersResponse) float64 { return float64(breakerStats.EstimatedSize) @@ -1561,7 +1565,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "breakers", "limit_size_bytes"), "Limit size in bytes for breaker", - defaultBreakerLabels, nil, + defaultBreakerLabels, constLabels, ), Value: func(breakerStats NodeStatsBreakersResponse) float64 { return float64(breakerStats.LimitSize) @@ -1575,7 +1579,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "breakers", "tripped"), "tripped for breaker", - defaultBreakerLabels, nil, + defaultBreakerLabels, constLabels, ), Value: func(breakerStats NodeStatsBreakersResponse) float64 { return float64(breakerStats.Tripped) @@ -1589,7 +1593,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "breakers", "overhead"), "Overhead of circuit breakers", - defaultBreakerLabels, nil, + defaultBreakerLabels, constLabels, ), Value: func(breakerStats NodeStatsBreakersResponse) float64 { return breakerStats.Overhead @@ -1605,7 +1609,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "thread_pool", "completed_count"), "Thread Pool operations completed", - defaultThreadPoolLabels, nil, + defaultThreadPoolLabels, constLabels, ), Value: func(threadPoolStats NodeStatsThreadPoolPoolResponse) float64 { return float64(threadPoolStats.Completed) @@ -1617,7 +1621,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "thread_pool", "rejected_count"), "Thread Pool operations rejected", - defaultThreadPoolLabels, nil, + defaultThreadPoolLabels, constLabels, ), Value: func(threadPoolStats NodeStatsThreadPoolPoolResponse) float64 { return float64(threadPoolStats.Rejected) @@ -1629,7 +1633,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "thread_pool", "active_count"), "Thread Pool threads active", - defaultThreadPoolLabels, nil, + defaultThreadPoolLabels, constLabels, ), Value: func(threadPoolStats NodeStatsThreadPoolPoolResponse) float64 { return float64(threadPoolStats.Active) @@ -1641,7 +1645,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "thread_pool", "largest_count"), "Thread Pool largest threads count", - defaultThreadPoolLabels, nil, + defaultThreadPoolLabels, constLabels, ), Value: func(threadPoolStats NodeStatsThreadPoolPoolResponse) float64 { return float64(threadPoolStats.Largest) @@ -1653,7 +1657,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "thread_pool", "queue_count"), "Thread Pool operations queued", - defaultThreadPoolLabels, nil, + defaultThreadPoolLabels, constLabels, ), Value: func(threadPoolStats NodeStatsThreadPoolPoolResponse) float64 { return float64(threadPoolStats.Queue) @@ -1665,7 +1669,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "thread_pool", "threads_count"), "Thread Pool current threads count", - defaultThreadPoolLabels, nil, + defaultThreadPoolLabels, constLabels, ), Value: func(threadPoolStats NodeStatsThreadPoolPoolResponse) float64 { return float64(threadPoolStats.Threads) @@ -1679,7 +1683,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_data", "available_bytes"), "Available space on block device in bytes", - defaultFilesystemDataLabels, nil, + defaultFilesystemDataLabels, constLabels, ), Value: func(fsStats NodeStatsFSDataResponse) float64 { return float64(fsStats.Available) @@ -1691,7 +1695,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_data", "free_bytes"), "Free space on block device in bytes", - defaultFilesystemDataLabels, nil, + defaultFilesystemDataLabels, constLabels, ), Value: func(fsStats NodeStatsFSDataResponse) float64 { return float64(fsStats.Free) @@ -1703,7 +1707,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_data", "size_bytes"), "Size of block device in bytes", - defaultFilesystemDataLabels, nil, + defaultFilesystemDataLabels, constLabels, ), Value: func(fsStats NodeStatsFSDataResponse) float64 { return float64(fsStats.Total) @@ -1717,7 +1721,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_io_stats_device", "operations_count"), "Count of disk operations", - defaultFilesystemIODeviceLabels, nil, + defaultFilesystemIODeviceLabels, constLabels, ), Value: func(fsIODeviceStats NodeStatsFSIOStatsDeviceResponse) float64 { return float64(fsIODeviceStats.Operations) @@ -1729,7 +1733,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_io_stats_device", "read_operations_count"), "Count of disk read operations", - defaultFilesystemIODeviceLabels, nil, + defaultFilesystemIODeviceLabels, constLabels, ), Value: func(fsIODeviceStats NodeStatsFSIOStatsDeviceResponse) float64 { return float64(fsIODeviceStats.ReadOperations) @@ -1741,7 +1745,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_io_stats_device", "write_operations_count"), "Count of disk write operations", - defaultFilesystemIODeviceLabels, nil, + defaultFilesystemIODeviceLabels, constLabels, ), Value: func(fsIODeviceStats NodeStatsFSIOStatsDeviceResponse) float64 { return float64(fsIODeviceStats.WriteOperations) @@ -1753,7 +1757,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_io_stats_device", "read_size_kilobytes_sum"), "Total kilobytes read from disk", - defaultFilesystemIODeviceLabels, nil, + defaultFilesystemIODeviceLabels, constLabels, ), Value: func(fsIODeviceStats NodeStatsFSIOStatsDeviceResponse) float64 { return float64(fsIODeviceStats.ReadSize) @@ -1765,7 +1769,7 @@ func NewNodes(logger log.Logger, client *http.Client, url *url.URL, all bool, no Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "filesystem_io_stats_device", "write_size_kilobytes_sum"), "Total kilobytes written to disk", - defaultFilesystemIODeviceLabels, nil, + defaultFilesystemIODeviceLabels, constLabels, ), Value: func(fsIODeviceStats NodeStatsFSIOStatsDeviceResponse) float64 { return float64(fsIODeviceStats.WriteSize) diff --git a/collector/snapshots.go b/collector/snapshots.go index f5785889..79045baa 100644 --- a/collector/snapshots.go +++ b/collector/snapshots.go @@ -52,22 +52,26 @@ type Snapshots struct { // NewSnapshots defines Snapshots Prometheus metrics func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapshots { + constLabels := ConstLabelsFromURL(url) return &Snapshots{ logger: logger, client: client, url: url, up: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, "snapshot_stats", "up"), - Help: "Was the last scrape of the ElasticSearch snapshots endpoint successful.", + Name: prometheus.BuildFQName(namespace, "snapshot_stats", "up"), + Help: "Was the last scrape of the ElasticSearch snapshots endpoint successful.", + ConstLabels: constLabels, }), totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "snapshot_stats", "total_scrapes"), - Help: "Current total ElasticSearch snapshots scrapes.", + Name: prometheus.BuildFQName(namespace, "snapshot_stats", "total_scrapes"), + Help: "Current total ElasticSearch snapshots scrapes.", + ConstLabels: constLabels, }), jsonParseFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: prometheus.BuildFQName(namespace, "snapshot_stats", "json_parse_failures"), - Help: "Number of errors while parsing JSON.", + Name: prometheus.BuildFQName(namespace, "snapshot_stats", "json_parse_failures"), + Help: "Number of errors while parsing JSON.", + ConstLabels: constLabels, }), snapshotMetrics: []*snapshotMetric{ { @@ -75,7 +79,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_number_of_indices"), "Number of indices in the last snapshot", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(len(snapshotStats.Indices)) @@ -87,7 +91,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_start_time_timestamp"), "Last snapshot start timestamp", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(snapshotStats.StartTimeInMillis / 1000) @@ -99,7 +103,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_end_time_timestamp"), "Last snapshot end timestamp", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(snapshotStats.EndTimeInMillis / 1000) @@ -111,7 +115,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_number_of_failures"), "Last snapshot number of failures", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(len(snapshotStats.Failures)) @@ -123,7 +127,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_total_shards"), "Last snapshot total shards", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(snapshotStats.Shards.Total) @@ -135,7 +139,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_failed_shards"), "Last snapshot failed shards", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(snapshotStats.Shards.Failed) @@ -147,7 +151,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "snapshot_successful_shards"), "Last snapshot successful shards", - defaultSnapshotLabels, nil, + defaultSnapshotLabels, constLabels, ), Value: func(snapshotStats SnapshotStatDataResponse) float64 { return float64(snapshotStats.Shards.Successful) @@ -161,7 +165,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "number_of_snapshots"), "Number of snapshots in a repository", - defaultSnapshotRepositoryLabels, nil, + defaultSnapshotRepositoryLabels, constLabels, ), Value: func(snapshotsStats SnapshotStatsResponse) float64 { return float64(len(snapshotsStats.Snapshots)) @@ -173,7 +177,7 @@ func NewSnapshots(logger log.Logger, client *http.Client, url *url.URL) *Snapsho Desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "snapshot_stats", "oldest_snapshot_timestamp"), "Timestamp of the oldest snapshot", - defaultSnapshotRepositoryLabels, nil, + defaultSnapshotRepositoryLabels, constLabels, ), Value: func(snapshotsStats SnapshotStatsResponse) float64 { if len(snapshotsStats.Snapshots) == 0 { diff --git a/main.go b/main.go index 9e39eedc..aaf1cb4c 100644 --- a/main.go +++ b/main.go @@ -5,13 +5,14 @@ import ( "net/url" "os" "os/signal" + "strings" "time" "context" "github.com/go-kit/kit/log/level" - "github.com/justwatchcom/elasticsearch_exporter/collector" - "github.com/justwatchcom/elasticsearch_exporter/pkg/clusterinfo" + "github.com/gojuno/elasticsearch_exporter/collector" + "github.com/gojuno/elasticsearch_exporter/pkg/clusterinfo" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" "gopkg.in/alecthomas/kingpin.v2" @@ -85,13 +86,17 @@ func main() { logger := getLogger(*logLevel, *logOutput, *logFormat) - esURL, err := url.Parse(*esURI) - if err != nil { - _ = level.Error(logger).Log( - "msg", "failed to parse es.uri", - "err", err, - ) - os.Exit(1) + var esURLs []*url.URL + for _, esURL := range strings.Split(*esURI, ",") { + u, err := url.Parse(esURL) + if err != nil { + _ = level.Error(logger).Log( + "msg", "failed to parse es.uri", + "err", err, + ) + os.Exit(1) + } + esURLs = append(esURLs, u) } // returns nil if not provided and falls back to simple TCP. @@ -109,31 +114,36 @@ func main() { versionMetric := version.NewCollector(Name) prometheus.MustRegister(versionMetric) - // cluster info retriever - clusterInfoRetriever := clusterinfo.New(logger, httpClient, esURL, *esClusterInfoInterval) + retrievers := make(map[*url.URL]*clusterinfo.Retriever) + for _, esURL := range esURLs { + // cluster info retriever + clusterInfoRetriever := clusterinfo.New(logger, httpClient, esURL, *esClusterInfoInterval) - prometheus.MustRegister(collector.NewClusterHealth(logger, httpClient, esURL)) - prometheus.MustRegister(collector.NewNodes(logger, httpClient, esURL, *esAllNodes, *esNode)) + retrievers[esURL] = clusterInfoRetriever - if *esExportIndices || *esExportShards { - iC := collector.NewIndices(logger, httpClient, esURL, *esExportShards) - prometheus.MustRegister(iC) - if registerErr := clusterInfoRetriever.RegisterConsumer(iC); registerErr != nil { - _ = level.Error(logger).Log("msg", "failed to register indices collector in cluster info") - os.Exit(1) + prometheus.MustRegister(collector.NewClusterHealth(logger, httpClient, esURL)) + prometheus.MustRegister(collector.NewNodes(logger, httpClient, esURL, *esAllNodes, *esNode)) + + if *esExportIndices || *esExportShards { + iC := collector.NewIndices(logger, httpClient, esURL, *esExportShards) + prometheus.MustRegister(iC) + if registerErr := clusterInfoRetriever.RegisterConsumer(iC); registerErr != nil { + _ = level.Error(logger).Log("msg", "failed to register indices collector in cluster info") + os.Exit(1) + } } - } - if *esExportSnapshots { - prometheus.MustRegister(collector.NewSnapshots(logger, httpClient, esURL)) - } + if *esExportSnapshots { + prometheus.MustRegister(collector.NewSnapshots(logger, httpClient, esURL)) + } - if *esExportClusterSettings { - prometheus.MustRegister(collector.NewClusterSettings(logger, httpClient, esURL)) - } + if *esExportClusterSettings { + prometheus.MustRegister(collector.NewClusterSettings(logger, httpClient, esURL)) + } - if *esExportIndicesSettings { - prometheus.MustRegister(collector.NewIndicesSettings(logger, httpClient, esURL)) + if *esExportIndicesSettings { + prometheus.MustRegister(collector.NewIndicesSettings(logger, httpClient, esURL)) + } } // create a http server @@ -142,34 +152,36 @@ func main() { // create a context that is cancelled on SIGKILL ctx, cancel := context.WithCancel(context.Background()) - // start the cluster info retriever - switch runErr := clusterInfoRetriever.Run(ctx); runErr { - case nil: - _ = level.Info(logger).Log( - "msg", "started cluster info retriever", - "interval", (*esClusterInfoInterval).String(), - ) - case clusterinfo.ErrInitialCallTimeout: - _ = level.Info(logger).Log("msg", "initial cluster info call timed out") - default: - _ = level.Error(logger).Log("msg", "failed to run cluster info retriever", "err", err) - os.Exit(1) - } + for esURL, retriever := range retrievers { + // start the cluster info retriever + switch err := retriever.Run(ctx); err { + case nil: + _ = level.Info(logger).Log( + "msg", "started cluster info retriever", + "interval", (*esClusterInfoInterval).String(), + "url", esURL.String(), + ) + case clusterinfo.ErrInitialCallTimeout: + _ = level.Info(logger).Log("msg", "initial cluster info call timed out") + default: + _ = level.Error(logger).Log("msg", "failed to run cluster info retriever", "err", err) + os.Exit(1) + } - // register cluster info retriever as prometheus collector - prometheus.MustRegister(clusterInfoRetriever) + // register cluster info retriever as prometheus collector + prometheus.MustRegister(retriever) + } mux := http.DefaultServeMux mux.Handle(*metricsPath, prometheus.Handler()) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - _, err = w.Write([]byte(` + if _, err := w.Write([]byte(` Elasticsearch Exporter

Elasticsearch Exporter

Metrics

- `)) - if err != nil { + `)); err != nil { _ = level.Error(logger).Log( "msg", "failed handling writer", "err", err, diff --git a/pkg/clusterinfo/clusterinfo.go b/pkg/clusterinfo/clusterinfo.go index e021949e..c4eb6b65 100644 --- a/pkg/clusterinfo/clusterinfo.go +++ b/pkg/clusterinfo/clusterinfo.go @@ -52,6 +52,11 @@ type Retriever struct { // New creates a new Retriever func New(logger log.Logger, client *http.Client, u *url.URL, interval time.Duration) *Retriever { + esURL := *u + esURL.User = nil + constLabels := map[string]string{ + "cluster_url": esURL.String(), + } return &Retriever{ consumerChannels: make(map[string]*chan *Response), logger: logger, @@ -61,8 +66,9 @@ func New(logger log.Logger, client *http.Client, u *url.URL, interval time.Durat sync: make(chan struct{}, 1), versionMetric: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "version_info"), - Help: "Constant metric with ES version information as labels", + Name: prometheus.BuildFQName(namespace, subsystem, "version_info"), + Help: "Constant metric with ES version information as labels", + ConstLabels: constLabels, }, []string{ "cluster", @@ -75,22 +81,25 @@ func New(logger log.Logger, client *http.Client, u *url.URL, interval time.Durat ), up: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "up"), - Help: "Up metric for the cluster info collector", + Name: prometheus.BuildFQName(namespace, subsystem, "up"), + Help: "Up metric for the cluster info collector", + ConstLabels: constLabels, }, []string{"url"}, ), lastUpstreamSuccessTs: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "last_retrieval_success_ts"), - Help: "Timestamp of the last successful cluster info retrieval", + Name: prometheus.BuildFQName(namespace, subsystem, "last_retrieval_success_ts"), + Help: "Timestamp of the last successful cluster info retrieval", + ConstLabels: constLabels, }, []string{"url"}, ), lastUpstreamErrorTs: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Name: prometheus.BuildFQName(namespace, subsystem, "last_retrieval_failure_ts"), - Help: "Timestamp of the last failed cluster info retrieval", + Name: prometheus.BuildFQName(namespace, subsystem, "last_retrieval_failure_ts"), + Help: "Timestamp of the last failed cluster info retrieval", + ConstLabels: constLabels, }, []string{"url"}, ), diff --git a/vendor/vendor.json b/vendor/vendor.json index bc6c9abd..9bb79244 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -129,5 +129,5 @@ "revisionTime": "2017-12-17T18:08:21Z" } ], - "rootPath": "github.com/justwatchcom/elasticsearch_exporter" + "rootPath": "github.com/gojuno/elasticsearch_exporter" }