From 82e6cea0d2eb56bf92123963e38eed4b2245f1b6 Mon Sep 17 00:00:00 2001 From: Stefan Kurek Date: Thu, 5 Dec 2024 13:48:45 -0500 Subject: [PATCH] MongoDB Atlas Receiver adds additional disk & process metrics --- .../mongodbatlasreceiver-metric-adds.yaml | 31 +++ receiver/mongodbatlasreceiver/README.md | 2 +- .../mongodbatlasreceiver/documentation.md | 56 +++- .../generated_component_test.go | 12 +- .../generated_package_test.go | 3 +- .../internal/metadata/generated_config.go | 12 + .../metadata/generated_config_test.go | 16 +- .../internal/metadata/generated_metrics.go | 259 +++++++++++++++--- .../metadata/generated_metrics_test.go | 197 ++++++++----- .../metadata/generated_resource_test.go | 18 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/metric_name_mapping.go | 39 ++- .../internal/metadata/testdata/config.yaml | 12 + .../internal/metric_conversion.go | 68 ++++- receiver/mongodbatlasreceiver/metadata.yaml | 33 ++- 15 files changed, 624 insertions(+), 137 deletions(-) create mode 100644 .chloggen/mongodbatlasreceiver-metric-adds.yaml diff --git a/.chloggen/mongodbatlasreceiver-metric-adds.yaml b/.chloggen/mongodbatlasreceiver-metric-adds.yaml new file mode 100644 index 000000000000..7c4dabadd869 --- /dev/null +++ b/.chloggen/mongodbatlasreceiver-metric-adds.yaml @@ -0,0 +1,31 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: mongodbatlasreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Adds additional metrics to the MongoDB Atlas receiver" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36525] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Adds a number of new default disabled metrics to the MongoDB Atlas receiver. These metrics are: + - mongodbatlas.disk.partition.queue.depth + - mongodbatlas.disk.partition.throughput + - mongodbatlas.process.cache.ratio + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/mongodbatlasreceiver/README.md b/receiver/mongodbatlasreceiver/README.md index 3f9cc38b55f8..641de98fe1ec 100644 --- a/receiver/mongodbatlasreceiver/README.md +++ b/receiver/mongodbatlasreceiver/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fmongodbatlas%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fmongodbatlas) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fmongodbatlas%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fmongodbatlas) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@schmikei](https://www.github.com/schmikei) \| Seeking more code owners! | -[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md index ae5120d4b7a3..251f950865ae 100644 --- a/receiver/mongodbatlasreceiver/documentation.md +++ b/receiver/mongodbatlasreceiver/documentation.md @@ -424,7 +424,7 @@ Aggregate of MongoDB Metrics DOCUMENT_METRICS_UPDATED, DOCUMENT_METRICS_DELETED, DB Operation Rates -Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT +Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT, OPCOUNTER_TTL_DELETED | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -434,7 +434,7 @@ Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOU | Name | Description | Values | | ---- | ----------- | ------ | -| operation | Type of database operation | Str: ``cmd``, ``query``, ``update``, ``delete``, ``getmore``, ``insert``, ``scan_and_order`` | +| operation | Type of database operation | Str: ``cmd``, ``query``, ``update``, ``delete``, ``getmore``, ``insert``, ``scan_and_order``, ``ttl_deleted`` | | cluster_role | Whether process is acting as replica or primary | Str: ``primary``, ``replica`` | ### mongodbatlas.process.db.operations.time @@ -933,6 +933,58 @@ Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED | ---- | ----------- | ------ | | memory_state | Memory usage type | Str: ``resident``, ``virtual``, ``mapped``, ``computed``, ``shared``, ``free``, ``used`` | +## Optional Metrics + +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: + +```yaml +metrics: + : + enabled: true +``` + +### mongodbatlas.disk.partition.queue.depth + +Disk queue depth + +Aggregate of MongoDB Metrics DISK_QUEUE_DEPTH + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### mongodbatlas.disk.partition.throughput + +Disk throughput + +Aggregate of MongoDB Metrics DISK_PARTITION_THROUGHPUT_READ, DISK_PARTITION_THROUGHPUT_WRITE + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| disk_direction | Measurement type for disk operation | Str: ``read``, ``write``, ``total`` | + +### mongodbatlas.process.cache.ratio + +Cache ratios represented as (%) + +Aggregate of MongoDB Metrics CACHE_FILL_RATIO, DIRTY_FILL_RATIO + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| % | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| cache_ratio_type | Cache ratio type | Str: ``cache_fill``, ``dirty_fill`` | + ## Resource Attributes | Name | Description | Values | Enabled | diff --git a/receiver/mongodbatlasreceiver/generated_component_test.go b/receiver/mongodbatlasreceiver/generated_component_test.go index bd1da92074f4..77549ce4663e 100644 --- a/receiver/mongodbatlasreceiver/generated_component_test.go +++ b/receiver/mongodbatlasreceiver/generated_component_test.go @@ -53,21 +53,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/receiver/mongodbatlasreceiver/generated_package_test.go b/receiver/mongodbatlasreceiver/generated_package_test.go index 1b0508a1d163..3ae9737b91ec 100644 --- a/receiver/mongodbatlasreceiver/generated_package_test.go +++ b/receiver/mongodbatlasreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package mongodbatlasreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go index 46cdab5cd211..9670ae4fe76e 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_config.go @@ -34,8 +34,10 @@ type MetricsConfig struct { MongodbatlasDiskPartitionIopsMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.iops.max"` MongodbatlasDiskPartitionLatencyAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.average"` MongodbatlasDiskPartitionLatencyMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.latency.max"` + MongodbatlasDiskPartitionQueueDepth MetricConfig `mapstructure:"mongodbatlas.disk.partition.queue.depth"` MongodbatlasDiskPartitionSpaceAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.average"` MongodbatlasDiskPartitionSpaceMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.space.max"` + MongodbatlasDiskPartitionThroughput MetricConfig `mapstructure:"mongodbatlas.disk.partition.throughput"` MongodbatlasDiskPartitionUsageAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.average"` MongodbatlasDiskPartitionUsageMax MetricConfig `mapstructure:"mongodbatlas.disk.partition.usage.max"` MongodbatlasDiskPartitionUtilizationAverage MetricConfig `mapstructure:"mongodbatlas.disk.partition.utilization.average"` @@ -43,6 +45,7 @@ type MetricsConfig struct { MongodbatlasProcessAsserts MetricConfig `mapstructure:"mongodbatlas.process.asserts"` MongodbatlasProcessBackgroundFlush MetricConfig `mapstructure:"mongodbatlas.process.background_flush"` MongodbatlasProcessCacheIo MetricConfig `mapstructure:"mongodbatlas.process.cache.io"` + MongodbatlasProcessCacheRatio MetricConfig `mapstructure:"mongodbatlas.process.cache.ratio"` MongodbatlasProcessCacheSize MetricConfig `mapstructure:"mongodbatlas.process.cache.size"` MongodbatlasProcessConnections MetricConfig `mapstructure:"mongodbatlas.process.connections"` MongodbatlasProcessCPUChildrenNormalizedUsageAverage MetricConfig `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.average"` @@ -112,12 +115,18 @@ func DefaultMetricsConfig() MetricsConfig { MongodbatlasDiskPartitionLatencyMax: MetricConfig{ Enabled: true, }, + MongodbatlasDiskPartitionQueueDepth: MetricConfig{ + Enabled: false, + }, MongodbatlasDiskPartitionSpaceAverage: MetricConfig{ Enabled: true, }, MongodbatlasDiskPartitionSpaceMax: MetricConfig{ Enabled: true, }, + MongodbatlasDiskPartitionThroughput: MetricConfig{ + Enabled: false, + }, MongodbatlasDiskPartitionUsageAverage: MetricConfig{ Enabled: true, }, @@ -139,6 +148,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbatlasProcessCacheIo: MetricConfig{ Enabled: true, }, + MongodbatlasProcessCacheRatio: MetricConfig{ + Enabled: false, + }, MongodbatlasProcessCacheSize: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go index 8c67cb277f9b..05673dcc2d78 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_config_test.go @@ -31,8 +31,10 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbatlasDiskPartitionIopsMax: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionLatencyAverage: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionLatencyMax: MetricConfig{Enabled: true}, + MongodbatlasDiskPartitionQueueDepth: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionSpaceAverage: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionSpaceMax: MetricConfig{Enabled: true}, + MongodbatlasDiskPartitionThroughput: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionUsageAverage: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionUsageMax: MetricConfig{Enabled: true}, MongodbatlasDiskPartitionUtilizationAverage: MetricConfig{Enabled: true}, @@ -40,6 +42,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbatlasProcessAsserts: MetricConfig{Enabled: true}, MongodbatlasProcessBackgroundFlush: MetricConfig{Enabled: true}, MongodbatlasProcessCacheIo: MetricConfig{Enabled: true}, + MongodbatlasProcessCacheRatio: MetricConfig{Enabled: true}, MongodbatlasProcessCacheSize: MetricConfig{Enabled: true}, MongodbatlasProcessConnections: MetricConfig{Enabled: true}, MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricConfig{Enabled: true}, @@ -115,8 +118,10 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbatlasDiskPartitionIopsMax: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionLatencyAverage: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionLatencyMax: MetricConfig{Enabled: false}, + MongodbatlasDiskPartitionQueueDepth: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionSpaceAverage: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionSpaceMax: MetricConfig{Enabled: false}, + MongodbatlasDiskPartitionThroughput: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionUsageAverage: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionUsageMax: MetricConfig{Enabled: false}, MongodbatlasDiskPartitionUtilizationAverage: MetricConfig{Enabled: false}, @@ -124,6 +129,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbatlasProcessAsserts: MetricConfig{Enabled: false}, MongodbatlasProcessBackgroundFlush: MetricConfig{Enabled: false}, MongodbatlasProcessCacheIo: MetricConfig{Enabled: false}, + MongodbatlasProcessCacheRatio: MetricConfig{Enabled: false}, MongodbatlasProcessCacheSize: MetricConfig{Enabled: false}, MongodbatlasProcessConnections: MetricConfig{Enabled: false}, MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricConfig{Enabled: false}, @@ -193,8 +199,9 @@ func TestMetricsBuilderConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } @@ -258,8 +265,9 @@ func TestResourceAttributesConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadResourceAttributesConfig(t, tt.name) - diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})) - require.Emptyf(t, diff, "Config mismatch (-expected +actual):\n%s", diff) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } }) } } diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go index 1e2d40f96785..316e97687cb4 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go @@ -102,6 +102,32 @@ var MapAttributeCacheDirection = map[string]AttributeCacheDirection{ "written_from": AttributeCacheDirectionWrittenFrom, } +// AttributeCacheRatioType specifies the a value cache_ratio_type attribute. +type AttributeCacheRatioType int + +const ( + _ AttributeCacheRatioType = iota + AttributeCacheRatioTypeCacheFill + AttributeCacheRatioTypeDirtyFill +) + +// String returns the string representation of the AttributeCacheRatioType. +func (av AttributeCacheRatioType) String() string { + switch av { + case AttributeCacheRatioTypeCacheFill: + return "cache_fill" + case AttributeCacheRatioTypeDirtyFill: + return "dirty_fill" + } + return "" +} + +// MapAttributeCacheRatioType is a helper map of string to AttributeCacheRatioType attribute value. +var MapAttributeCacheRatioType = map[string]AttributeCacheRatioType{ + "cache_fill": AttributeCacheRatioTypeCacheFill, + "dirty_fill": AttributeCacheRatioTypeDirtyFill, +} + // AttributeCacheStatus specifies the a value cache_status attribute. type AttributeCacheStatus int @@ -582,6 +608,7 @@ const ( AttributeOperationGetmore AttributeOperationInsert AttributeOperationScanAndOrder + AttributeOperationTTLDeleted ) // String returns the string representation of the AttributeOperation. @@ -601,6 +628,8 @@ func (av AttributeOperation) String() string { return "insert" case AttributeOperationScanAndOrder: return "scan_and_order" + case AttributeOperationTTLDeleted: + return "ttl_deleted" } return "" } @@ -614,6 +643,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "getmore": AttributeOperationGetmore, "insert": AttributeOperationInsert, "scan_and_order": AttributeOperationScanAndOrder, + "ttl_deleted": AttributeOperationTTLDeleted, } // AttributeOplogType specifies the a value oplog_type attribute. @@ -1038,6 +1068,55 @@ func newMetricMongodbatlasDiskPartitionLatencyMax(cfg MetricConfig) metricMongod return m } +type metricMongodbatlasDiskPartitionQueueDepth struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.queue.depth metric with initial data. +func (m *metricMongodbatlasDiskPartitionQueueDepth) init() { + m.data.SetName("mongodbatlas.disk.partition.queue.depth") + m.data.SetDescription("Disk queue depth") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbatlasDiskPartitionQueueDepth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionQueueDepth) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionQueueDepth) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionQueueDepth(cfg MetricConfig) metricMongodbatlasDiskPartitionQueueDepth { + m := metricMongodbatlasDiskPartitionQueueDepth{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbatlasDiskPartitionSpaceAverage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1140,6 +1219,57 @@ func newMetricMongodbatlasDiskPartitionSpaceMax(cfg MetricConfig) metricMongodba return m } +type metricMongodbatlasDiskPartitionThroughput struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.throughput metric with initial data. +func (m *metricMongodbatlasDiskPartitionThroughput) init() { + m.data.SetName("mongodbatlas.disk.partition.throughput") + m.data.SetDescription("Disk throughput") + m.data.SetUnit("By/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("disk_direction", diskDirectionAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionThroughput) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionThroughput) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionThroughput(cfg MetricConfig) metricMongodbatlasDiskPartitionThroughput { + m := metricMongodbatlasDiskPartitionThroughput{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbatlasDiskPartitionUsageAverage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1491,6 +1621,57 @@ func newMetricMongodbatlasProcessCacheIo(cfg MetricConfig) metricMongodbatlasPro return m } +type metricMongodbatlasProcessCacheRatio struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cache.ratio metric with initial data. +func (m *metricMongodbatlasProcessCacheRatio) init() { + m.data.SetName("mongodbatlas.process.cache.ratio") + m.data.SetDescription("Cache ratios represented as (%)") + m.data.SetUnit("%") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCacheRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("cache_ratio_type", cacheRatioTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCacheRatio) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCacheRatio) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCacheRatio(cfg MetricConfig) metricMongodbatlasProcessCacheRatio { + m := metricMongodbatlasProcessCacheRatio{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbatlasProcessCacheSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -3897,8 +4078,10 @@ type MetricsBuilder struct { metricMongodbatlasDiskPartitionIopsMax metricMongodbatlasDiskPartitionIopsMax metricMongodbatlasDiskPartitionLatencyAverage metricMongodbatlasDiskPartitionLatencyAverage metricMongodbatlasDiskPartitionLatencyMax metricMongodbatlasDiskPartitionLatencyMax + metricMongodbatlasDiskPartitionQueueDepth metricMongodbatlasDiskPartitionQueueDepth metricMongodbatlasDiskPartitionSpaceAverage metricMongodbatlasDiskPartitionSpaceAverage metricMongodbatlasDiskPartitionSpaceMax metricMongodbatlasDiskPartitionSpaceMax + metricMongodbatlasDiskPartitionThroughput metricMongodbatlasDiskPartitionThroughput metricMongodbatlasDiskPartitionUsageAverage metricMongodbatlasDiskPartitionUsageAverage metricMongodbatlasDiskPartitionUsageMax metricMongodbatlasDiskPartitionUsageMax metricMongodbatlasDiskPartitionUtilizationAverage metricMongodbatlasDiskPartitionUtilizationAverage @@ -3906,6 +4089,7 @@ type MetricsBuilder struct { metricMongodbatlasProcessAsserts metricMongodbatlasProcessAsserts metricMongodbatlasProcessBackgroundFlush metricMongodbatlasProcessBackgroundFlush metricMongodbatlasProcessCacheIo metricMongodbatlasProcessCacheIo + metricMongodbatlasProcessCacheRatio metricMongodbatlasProcessCacheRatio metricMongodbatlasProcessCacheSize metricMongodbatlasProcessCacheSize metricMongodbatlasProcessConnections metricMongodbatlasProcessConnections metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage @@ -3955,25 +4139,17 @@ type MetricsBuilder struct { metricMongodbatlasSystemPagingUsageMax metricMongodbatlasSystemPagingUsageMax } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) -} - -type metricBuilderOptionFunc func(mb *MetricsBuilder) - -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) -} +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -3985,8 +4161,10 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbatlasDiskPartitionIopsMax: newMetricMongodbatlasDiskPartitionIopsMax(mbc.Metrics.MongodbatlasDiskPartitionIopsMax), metricMongodbatlasDiskPartitionLatencyAverage: newMetricMongodbatlasDiskPartitionLatencyAverage(mbc.Metrics.MongodbatlasDiskPartitionLatencyAverage), metricMongodbatlasDiskPartitionLatencyMax: newMetricMongodbatlasDiskPartitionLatencyMax(mbc.Metrics.MongodbatlasDiskPartitionLatencyMax), + metricMongodbatlasDiskPartitionQueueDepth: newMetricMongodbatlasDiskPartitionQueueDepth(mbc.Metrics.MongodbatlasDiskPartitionQueueDepth), metricMongodbatlasDiskPartitionSpaceAverage: newMetricMongodbatlasDiskPartitionSpaceAverage(mbc.Metrics.MongodbatlasDiskPartitionSpaceAverage), metricMongodbatlasDiskPartitionSpaceMax: newMetricMongodbatlasDiskPartitionSpaceMax(mbc.Metrics.MongodbatlasDiskPartitionSpaceMax), + metricMongodbatlasDiskPartitionThroughput: newMetricMongodbatlasDiskPartitionThroughput(mbc.Metrics.MongodbatlasDiskPartitionThroughput), metricMongodbatlasDiskPartitionUsageAverage: newMetricMongodbatlasDiskPartitionUsageAverage(mbc.Metrics.MongodbatlasDiskPartitionUsageAverage), metricMongodbatlasDiskPartitionUsageMax: newMetricMongodbatlasDiskPartitionUsageMax(mbc.Metrics.MongodbatlasDiskPartitionUsageMax), metricMongodbatlasDiskPartitionUtilizationAverage: newMetricMongodbatlasDiskPartitionUtilizationAverage(mbc.Metrics.MongodbatlasDiskPartitionUtilizationAverage), @@ -3994,6 +4172,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbatlasProcessAsserts: newMetricMongodbatlasProcessAsserts(mbc.Metrics.MongodbatlasProcessAsserts), metricMongodbatlasProcessBackgroundFlush: newMetricMongodbatlasProcessBackgroundFlush(mbc.Metrics.MongodbatlasProcessBackgroundFlush), metricMongodbatlasProcessCacheIo: newMetricMongodbatlasProcessCacheIo(mbc.Metrics.MongodbatlasProcessCacheIo), + metricMongodbatlasProcessCacheRatio: newMetricMongodbatlasProcessCacheRatio(mbc.Metrics.MongodbatlasProcessCacheRatio), metricMongodbatlasProcessCacheSize: newMetricMongodbatlasProcessCacheSize(mbc.Metrics.MongodbatlasProcessCacheSize), metricMongodbatlasProcessConnections: newMetricMongodbatlasProcessConnections(mbc.Metrics.MongodbatlasProcessConnections), metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(mbc.Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageAverage), @@ -4124,7 +4303,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -4142,28 +4321,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -4177,7 +4348,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -4185,7 +4356,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver") @@ -4197,8 +4368,10 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbatlasDiskPartitionIopsMax.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionLatencyAverage.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionLatencyMax.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionQueueDepth.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionSpaceAverage.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionSpaceMax.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionThroughput.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionUsageAverage.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionUsageMax.emit(ils.Metrics()) mb.metricMongodbatlasDiskPartitionUtilizationAverage.emit(ils.Metrics()) @@ -4206,6 +4379,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbatlasProcessAsserts.emit(ils.Metrics()) mb.metricMongodbatlasProcessBackgroundFlush.emit(ils.Metrics()) mb.metricMongodbatlasProcessCacheIo.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCacheRatio.emit(ils.Metrics()) mb.metricMongodbatlasProcessCacheSize.emit(ils.Metrics()) mb.metricMongodbatlasProcessConnections.emit(ils.Metrics()) mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.emit(ils.Metrics()) @@ -4254,8 +4428,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbatlasSystemPagingUsageAverage.emit(ils.Metrics()) mb.metricMongodbatlasSystemPagingUsageMax.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } for attr, filter := range mb.resourceAttributeIncludeFilter { if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { @@ -4277,8 +4451,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -4314,6 +4488,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String()) } +// RecordMongodbatlasDiskPartitionQueueDepthDataPoint adds a data point to mongodbatlas.disk.partition.queue.depth metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbatlasDiskPartitionQueueDepth.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric. func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) { mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String()) @@ -4324,6 +4503,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pc mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String()) } +// RecordMongodbatlasDiskPartitionThroughputDataPoint adds a data point to mongodbatlas.disk.partition.throughput metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionThroughputDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue AttributeDiskDirection) { + mb.metricMongodbatlasDiskPartitionThroughput.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue.String()) +} + // RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric. func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue AttributeDiskStatus) { mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue.String()) @@ -4359,6 +4543,11 @@ func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.T mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue.String()) } +// RecordMongodbatlasProcessCacheRatioDataPoint adds a data point to mongodbatlas.process.cache.ratio metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheRatioDataPoint(ts pcommon.Timestamp, val float64, cacheRatioTypeAttributeValue AttributeCacheRatioType) { + mb.metricMongodbatlasProcessCacheRatio.recordDataPoint(mb.startTime, ts, val, cacheRatioTypeAttributeValue.String()) +} + // RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric. func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue AttributeCacheStatus) { mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue.String()) @@ -4596,9 +4785,9 @@ func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pco // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go index 6a8d5c246a0e..396332cb5bee 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_test.go @@ -52,14 +52,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -92,6 +92,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, 1, AttributeDiskDirectionRead) + allMetricsCount++ + mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, 1, AttributeDiskStatusFree) @@ -100,6 +103,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, 1, AttributeDiskStatusFree) + allMetricsCount++ + mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, 1, AttributeDiskDirectionRead) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts, 1, AttributeDiskStatusFree) @@ -128,6 +134,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, 1, AttributeCacheDirectionReadInto) + allMetricsCount++ + mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, 1, AttributeCacheRatioTypeCacheFill) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, 1, AttributeCacheStatusDirty) @@ -333,7 +342,7 @@ func TestMetricsBuilder(t *testing.T) { res := rb.Emit() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -343,10 +352,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -363,7 +372,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("object_type") assert.True(t, ok) assert.EqualValues(t, "collection", attrVal.Str()) @@ -378,7 +387,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("object_type") assert.True(t, ok) assert.EqualValues(t, "collection", attrVal.Str()) @@ -393,7 +402,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_direction") assert.True(t, ok) assert.EqualValues(t, "read", attrVal.Str()) @@ -408,7 +417,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_direction") assert.True(t, ok) assert.EqualValues(t, "read", attrVal.Str()) @@ -423,7 +432,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_direction") assert.True(t, ok) assert.EqualValues(t, "read", attrVal.Str()) @@ -438,10 +447,22 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_direction") assert.True(t, ok) assert.EqualValues(t, "read", attrVal.Str()) + case "mongodbatlas.disk.partition.queue.depth": + assert.False(t, validatedMetrics["mongodbatlas.disk.partition.queue.depth"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.queue.depth") + validatedMetrics["mongodbatlas.disk.partition.queue.depth"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Disk queue depth", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.disk.partition.space.average": assert.False(t, validatedMetrics["mongodbatlas.disk.partition.space.average"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.space.average") validatedMetrics["mongodbatlas.disk.partition.space.average"] = true @@ -453,7 +474,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_status") assert.True(t, ok) assert.EqualValues(t, "free", attrVal.Str()) @@ -468,10 +489,25 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_status") assert.True(t, ok) assert.EqualValues(t, "free", attrVal.Str()) + case "mongodbatlas.disk.partition.throughput": + assert.False(t, validatedMetrics["mongodbatlas.disk.partition.throughput"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.throughput") + validatedMetrics["mongodbatlas.disk.partition.throughput"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Disk throughput", ms.At(i).Description()) + assert.Equal(t, "By/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("disk_direction") + assert.True(t, ok) + assert.EqualValues(t, "read", attrVal.Str()) case "mongodbatlas.disk.partition.usage.average": assert.False(t, validatedMetrics["mongodbatlas.disk.partition.usage.average"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.usage.average") validatedMetrics["mongodbatlas.disk.partition.usage.average"] = true @@ -483,7 +519,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_status") assert.True(t, ok) assert.EqualValues(t, "free", attrVal.Str()) @@ -498,7 +534,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("disk_status") assert.True(t, ok) assert.EqualValues(t, "free", attrVal.Str()) @@ -513,7 +549,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.disk.partition.utilization.max": assert.False(t, validatedMetrics["mongodbatlas.disk.partition.utilization.max"], "Found a duplicate in the metrics slice: mongodbatlas.disk.partition.utilization.max") validatedMetrics["mongodbatlas.disk.partition.utilization.max"] = true @@ -525,7 +561,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.asserts": assert.False(t, validatedMetrics["mongodbatlas.process.asserts"], "Found a duplicate in the metrics slice: mongodbatlas.process.asserts") validatedMetrics["mongodbatlas.process.asserts"] = true @@ -537,7 +573,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("assert_type") assert.True(t, ok) assert.EqualValues(t, "regular", attrVal.Str()) @@ -552,7 +588,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.cache.io": assert.False(t, validatedMetrics["mongodbatlas.process.cache.io"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.io") validatedMetrics["mongodbatlas.process.cache.io"] = true @@ -564,10 +600,25 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cache_direction") assert.True(t, ok) assert.EqualValues(t, "read_into", attrVal.Str()) + case "mongodbatlas.process.cache.ratio": + assert.False(t, validatedMetrics["mongodbatlas.process.cache.ratio"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.ratio") + validatedMetrics["mongodbatlas.process.cache.ratio"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Cache ratios represented as (%)", ms.At(i).Description()) + assert.Equal(t, "%", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("cache_ratio_type") + assert.True(t, ok) + assert.EqualValues(t, "cache_fill", attrVal.Str()) case "mongodbatlas.process.cache.size": assert.False(t, validatedMetrics["mongodbatlas.process.cache.size"], "Found a duplicate in the metrics slice: mongodbatlas.process.cache.size") validatedMetrics["mongodbatlas.process.cache.size"] = true @@ -575,13 +626,13 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Cache sizes", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) - assert.False(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cache_status") assert.True(t, ok) assert.EqualValues(t, "dirty", attrVal.Str()) @@ -592,13 +643,13 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of current connections", ms.At(i).Description()) assert.Equal(t, "{connections}", ms.At(i).Unit()) - assert.False(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.cpu.children.normalized.usage.average": assert.False(t, validatedMetrics["mongodbatlas.process.cpu.children.normalized.usage.average"], "Found a duplicate in the metrics slice: mongodbatlas.process.cpu.children.normalized.usage.average") validatedMetrics["mongodbatlas.process.cpu.children.normalized.usage.average"] = true @@ -610,7 +661,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -625,7 +676,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -640,7 +691,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -655,7 +706,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -670,7 +721,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -685,7 +736,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -700,7 +751,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -715,7 +766,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -730,7 +781,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cursor_state") assert.True(t, ok) assert.EqualValues(t, "timed_out", attrVal.Str()) @@ -745,7 +796,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("document_status") assert.True(t, ok) assert.EqualValues(t, "returned", attrVal.Str()) @@ -760,7 +811,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "cmd", attrVal.Str()) @@ -774,13 +825,13 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "DB Operation Times", ms.At(i).Description()) assert.Equal(t, "ms", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("execution_type") assert.True(t, ok) assert.EqualValues(t, "reads", attrVal.Str()) @@ -795,7 +846,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("scanned_type") assert.True(t, ok) assert.EqualValues(t, "index_items", attrVal.Str()) @@ -810,7 +861,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("scanned_type") assert.True(t, ok) assert.EqualValues(t, "index_items", attrVal.Str()) @@ -825,7 +876,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("storage_status") assert.True(t, ok) assert.EqualValues(t, "total", attrVal.Str()) @@ -840,7 +891,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("global_lock_state") assert.True(t, ok) assert.EqualValues(t, "current_queue_total", attrVal.Str()) @@ -855,7 +906,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.index.counters": assert.False(t, validatedMetrics["mongodbatlas.process.index.counters"], "Found a duplicate in the metrics slice: mongodbatlas.process.index.counters") validatedMetrics["mongodbatlas.process.index.counters"] = true @@ -867,7 +918,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("btree_counter_type") assert.True(t, ok) assert.EqualValues(t, "accesses", attrVal.Str()) @@ -882,7 +933,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.journaling.data_files": assert.False(t, validatedMetrics["mongodbatlas.process.journaling.data_files"], "Found a duplicate in the metrics slice: mongodbatlas.process.journaling.data_files") validatedMetrics["mongodbatlas.process.journaling.data_files"] = true @@ -894,7 +945,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.journaling.written": assert.False(t, validatedMetrics["mongodbatlas.process.journaling.written"], "Found a duplicate in the metrics slice: mongodbatlas.process.journaling.written") validatedMetrics["mongodbatlas.process.journaling.written"] = true @@ -906,7 +957,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.memory.usage": assert.False(t, validatedMetrics["mongodbatlas.process.memory.usage"], "Found a duplicate in the metrics slice: mongodbatlas.process.memory.usage") validatedMetrics["mongodbatlas.process.memory.usage"] = true @@ -918,7 +969,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_state") assert.True(t, ok) assert.EqualValues(t, "resident", attrVal.Str()) @@ -933,7 +984,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("direction") assert.True(t, ok) assert.EqualValues(t, "receive", attrVal.Str()) @@ -944,13 +995,13 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Network requests", ms.At(i).Description()) assert.Equal(t, "{requests}", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.oplog.rate": assert.False(t, validatedMetrics["mongodbatlas.process.oplog.rate"], "Found a duplicate in the metrics slice: mongodbatlas.process.oplog.rate") validatedMetrics["mongodbatlas.process.oplog.rate"] = true @@ -962,7 +1013,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.oplog.time": assert.False(t, validatedMetrics["mongodbatlas.process.oplog.time"], "Found a duplicate in the metrics slice: mongodbatlas.process.oplog.time") validatedMetrics["mongodbatlas.process.oplog.time"] = true @@ -974,7 +1025,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("oplog_type") assert.True(t, ok) assert.EqualValues(t, "slave_lag_master_time", attrVal.Str()) @@ -989,7 +1040,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_issue_type") assert.True(t, ok) assert.EqualValues(t, "extra_info", attrVal.Str()) @@ -1004,7 +1055,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.process.tickets": assert.False(t, validatedMetrics["mongodbatlas.process.tickets"], "Found a duplicate in the metrics slice: mongodbatlas.process.tickets") validatedMetrics["mongodbatlas.process.tickets"] = true @@ -1016,7 +1067,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("ticket_type") assert.True(t, ok) assert.EqualValues(t, "available_reads", attrVal.Str()) @@ -1031,7 +1082,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -1046,7 +1097,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -1061,7 +1112,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -1076,7 +1127,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -1091,7 +1142,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -1106,7 +1157,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("cpu_state") assert.True(t, ok) assert.EqualValues(t, "kernel", attrVal.Str()) @@ -1121,7 +1172,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) case "mongodbatlas.system.fts.memory.usage": assert.False(t, validatedMetrics["mongodbatlas.system.fts.memory.usage"], "Found a duplicate in the metrics slice: mongodbatlas.system.fts.memory.usage") validatedMetrics["mongodbatlas.system.fts.memory.usage"] = true @@ -1129,13 +1180,13 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Full-text search", ms.At(i).Description()) assert.Equal(t, "MiBy", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_state") assert.True(t, ok) assert.EqualValues(t, "resident", attrVal.Str()) @@ -1150,7 +1201,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_status") assert.True(t, ok) assert.EqualValues(t, "available", attrVal.Str()) @@ -1165,7 +1216,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_status") assert.True(t, ok) assert.EqualValues(t, "available", attrVal.Str()) @@ -1180,7 +1231,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("direction") assert.True(t, ok) assert.EqualValues(t, "receive", attrVal.Str()) @@ -1195,7 +1246,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("direction") assert.True(t, ok) assert.EqualValues(t, "receive", attrVal.Str()) @@ -1210,7 +1261,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("direction") assert.True(t, ok) assert.EqualValues(t, "receive", attrVal.Str()) @@ -1225,7 +1276,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("direction") assert.True(t, ok) assert.EqualValues(t, "receive", attrVal.Str()) @@ -1240,7 +1291,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_state") assert.True(t, ok) assert.EqualValues(t, "resident", attrVal.Str()) @@ -1255,7 +1306,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("memory_state") assert.True(t, ok) assert.EqualValues(t, "resident", attrVal.Str()) diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_resource_test.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_resource_test.go index ba8a50aae9cc..5f75e77055eb 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_resource_test.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_resource_test.go @@ -9,9 +9,9 @@ import ( ) func TestResourceBuilder(t *testing.T) { - for _, tt := range []string{"default", "all_set", "none_set"} { - t.Run(tt, func(t *testing.T) { - cfg := loadResourceAttributesConfig(t, tt) + for _, test := range []string{"default", "all_set", "none_set"} { + t.Run(test, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, test) rb := NewResourceBuilder(cfg) rb.SetMongodbAtlasClusterName("mongodb_atlas.cluster.name-val") rb.SetMongodbAtlasDbName("mongodb_atlas.db.name-val") @@ -30,7 +30,7 @@ func TestResourceBuilder(t *testing.T) { res := rb.Emit() assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource - switch tt { + switch test { case "default": assert.Equal(t, 9, res.Attributes().Len()) case "all_set": @@ -39,11 +39,11 @@ func TestResourceBuilder(t *testing.T) { assert.Equal(t, 0, res.Attributes().Len()) return default: - assert.Failf(t, "unexpected test case: %s", tt) + assert.Failf(t, "unexpected test case: %s", test) } val, ok := res.Attributes().Get("mongodb_atlas.cluster.name") - assert.Equal(t, tt == "all_set", ok) + assert.Equal(t, test == "all_set", ok) if ok { assert.EqualValues(t, "mongodb_atlas.cluster.name-val", val.Str()) } @@ -93,17 +93,17 @@ func TestResourceBuilder(t *testing.T) { assert.EqualValues(t, "mongodb_atlas.project.name-val", val.Str()) } val, ok = res.Attributes().Get("mongodb_atlas.provider.name") - assert.Equal(t, tt == "all_set", ok) + assert.Equal(t, test == "all_set", ok) if ok { assert.EqualValues(t, "mongodb_atlas.provider.name-val", val.Str()) } val, ok = res.Attributes().Get("mongodb_atlas.region.name") - assert.Equal(t, tt == "all_set", ok) + assert.Equal(t, test == "all_set", ok) if ok { assert.EqualValues(t, "mongodb_atlas.region.name-val", val.Str()) } val, ok = res.Attributes().Get("mongodb_atlas.user.alias") - assert.Equal(t, tt == "all_set", ok) + assert.Equal(t, test == "all_set", ok) if ok { assert.EqualValues(t, "mongodb_atlas.user.alias-val", val.Str()) } diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_status.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_status.go index ce6c7db3bcba..4d4e038d9873 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_status.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_status.go @@ -7,8 +7,7 @@ import ( ) var ( - Type = component.MustNewType("mongodbatlas") - ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" + Type = component.MustNewType("mongodbatlas") ) const ( diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go index e8793b609b3c..6ddb5bd840d8 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go @@ -152,6 +152,16 @@ func getRecordFunc(metricName string) metricRecordFunc { mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatusUsed) } + case "CACHE_FILL_RATIO": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeCacheFill) + } + + case "DIRTY_FILL_RATIO": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasProcessCacheRatioDataPoint(ts, float64(*dp.Value), AttributeCacheRatioTypeDirtyFill) + } + case "TICKETS_AVAILABLE_READS": return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketTypeAvailableReads) @@ -337,6 +347,10 @@ func getRecordFunc(metricName string) metricRecordFunc { return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationInsert, AttributeClusterRolePrimary) } + case "OPCOUNTER_TTL_DELETED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperationTTLDeleted, AttributeClusterRolePrimary) + } // Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects. case "OPCOUNTER_REPL_CMD": @@ -735,6 +749,29 @@ func getRecordFunc(metricName string) metricRecordFunc { mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal) } + // Measures throughput of data read and written to the disk partition (not cache) used by MongoDB. + case "DISK_PARTITION_THROUGHPUT_READ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionRead) + } + + case "DISK_PARTITION_THROUGHPUT_WRITE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionWrite) + } + + // This is a calculated metric that is the sum of the read and write throughput. + case "DISK_PARTITION_THROUGHPUT_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasDiskPartitionThroughputDataPoint(ts, float64(*dp.Value), AttributeDiskDirectionTotal) + } + + // Measures the queue depth of the disk partition used by MongoDB. + case "DISK_QUEUE_DEPTH": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { + mb.RecordMongodbatlasDiskPartitionQueueDepthDataPoint(ts, float64(*dp.Value)) + } + // Measures latency per operation type of the disk partition used by MongoDB. case "DISK_PARTITION_LATENCY_READ": return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { @@ -849,7 +886,7 @@ func getRecordFunc(metricName string) metricRecordFunc { } } -func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements, _ bool) error { +func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements) error { recordFunc := getRecordFunc(meas.Name) if recordFunc == nil { return nil diff --git a/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml index cba17b3284e7..6a88579926fc 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbatlasreceiver/internal/metadata/testdata/config.yaml @@ -13,10 +13,14 @@ all_set: enabled: true mongodbatlas.disk.partition.latency.max: enabled: true + mongodbatlas.disk.partition.queue.depth: + enabled: true mongodbatlas.disk.partition.space.average: enabled: true mongodbatlas.disk.partition.space.max: enabled: true + mongodbatlas.disk.partition.throughput: + enabled: true mongodbatlas.disk.partition.usage.average: enabled: true mongodbatlas.disk.partition.usage.max: @@ -31,6 +35,8 @@ all_set: enabled: true mongodbatlas.process.cache.io: enabled: true + mongodbatlas.process.cache.ratio: + enabled: true mongodbatlas.process.cache.size: enabled: true mongodbatlas.process.connections: @@ -166,10 +172,14 @@ none_set: enabled: false mongodbatlas.disk.partition.latency.max: enabled: false + mongodbatlas.disk.partition.queue.depth: + enabled: false mongodbatlas.disk.partition.space.average: enabled: false mongodbatlas.disk.partition.space.max: enabled: false + mongodbatlas.disk.partition.throughput: + enabled: false mongodbatlas.disk.partition.usage.average: enabled: false mongodbatlas.disk.partition.usage.max: @@ -184,6 +194,8 @@ none_set: enabled: false mongodbatlas.process.cache.io: enabled: false + mongodbatlas.process.cache.ratio: + enabled: false mongodbatlas.process.cache.size: enabled: false mongodbatlas.process.connections: diff --git a/receiver/mongodbatlasreceiver/internal/metric_conversion.go b/receiver/mongodbatlasreceiver/internal/metric_conversion.go index b1c5639d6660..afe80c62f323 100644 --- a/receiver/mongodbatlasreceiver/internal/metric_conversion.go +++ b/receiver/mongodbatlasreceiver/internal/metric_conversion.go @@ -19,15 +19,81 @@ func processMeasurements( var errs error for _, meas := range measurements { - err := metadata.MeasurementsToMetric(mb, meas, false) + err := metadata.MeasurementsToMetric(mb, meas) if err != nil { errs = multierr.Append(errs, err) } } + err := calculateTotalMetrics(mb, measurements) + if err != nil { + errs = multierr.Append(errs, err) + } if errs != nil { return fmt.Errorf("errors occurred while processing measurements: %w", errs) } return nil } + +func calculateTotalMetrics( + mb *metadata.MetricsBuilder, + measurements []*mongodbatlas.Measurements, +) error { + var err error + dptTotalMeasCombined := false + var dptTotalMeas *mongodbatlas.Measurements + + for _, meas := range measurements { + switch meas.Name { + case "DISK_PARTITION_THROUGHPUT_READ": + fallthrough + case "DISK_PARTITION_THROUGHPUT_WRITE": + if dptTotalMeas == nil { + dptTotalMeas = cloneMeasurement(meas) + dptTotalMeas.Name = "DISK_PARTITION_THROUGHPUT_TOTAL" + continue + } + + // Combine data point values with matching timestamps + for j, totalMeas := range dptTotalMeas.DataPoints { + if totalMeas.Timestamp != meas.DataPoints[j].Timestamp || + (totalMeas.Value == nil && meas.DataPoints[j].Value == nil) { + continue + } + if totalMeas.Value == nil { + totalMeas.Value = new(float32) + } + addValue := *meas.DataPoints[j].Value + if meas.DataPoints[j].Value == nil { + addValue = 0 + } + *totalMeas.Value += addValue + dptTotalMeasCombined = true + } + default: + } + } + + if dptTotalMeasCombined { + err = metadata.MeasurementsToMetric(mb, dptTotalMeas) + } + return err +} + +func cloneMeasurement(meas *mongodbatlas.Measurements) *mongodbatlas.Measurements { + clone := &mongodbatlas.Measurements{ + Name: meas.Name, + Units: meas.Units, + DataPoints: make([]*mongodbatlas.DataPoints, len(meas.DataPoints)), + } + + for i, dp := range meas.DataPoints { + if dp != nil { + newDP := *dp + clone.DataPoints[i] = &newDP + } + } + + return clone +} diff --git a/receiver/mongodbatlasreceiver/metadata.yaml b/receiver/mongodbatlasreceiver/metadata.yaml index 74e8b97e1d78..d2c84af480d0 100644 --- a/receiver/mongodbatlasreceiver/metadata.yaml +++ b/receiver/mongodbatlasreceiver/metadata.yaml @@ -90,6 +90,12 @@ attributes: enum: - read_into - written_from + cache_ratio_type: + description: Cache ratio type + type: string + enum: + - cache_fill + - dirty_fill cache_status: description: Cache status type: string @@ -165,6 +171,7 @@ attributes: - getmore - insert - scan_and_order + - ttl_deleted cluster_role: description: Whether process is acting as replica or primary type: string @@ -258,6 +265,14 @@ metrics: attributes: [cache_direction] gauge: value_type: double + mongodbatlas.process.cache.ratio: + enabled: false + description: Cache ratios represented as (%) + extended_documentation: Aggregate of MongoDB Metrics CACHE_FILL_RATIO, DIRTY_FILL_RATIO + unit: "%" + attributes: [cache_ratio_type] + gauge: + value_type: double mongodbatlas.process.cache.size: enabled: true description: Cache sizes @@ -452,7 +467,7 @@ metrics: mongodbatlas.process.db.operations.rate: enabled: true description: DB Operation Rates - extended_documentation: Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT + extended_documentation: Aggregate of MongoDB Metrics OPCOUNTER_GETMORE, OPERATIONS_SCAN_AND_ORDER, OPCOUNTER_UPDATE, OPCOUNTER_REPL_UPDATE, OPCOUNTER_CMD, OPCOUNTER_DELETE, OPCOUNTER_REPL_DELETE, OPCOUNTER_REPL_CMD, OPCOUNTER_QUERY, OPCOUNTER_REPL_INSERT, OPCOUNTER_INSERT, OPCOUNTER_TTL_DELETED unit: "{operations}/s" attributes: [operation, cluster_role] gauge: @@ -618,6 +633,14 @@ metrics: attributes: [disk_direction] gauge: value_type: double + mongodbatlas.disk.partition.throughput: + enabled: false + description: Disk throughput + extended_documentation: Aggregate of MongoDB Metrics DISK_PARTITION_THROUGHPUT_READ, DISK_PARTITION_THROUGHPUT_WRITE + unit: By/s + attributes: [disk_direction] + gauge: + value_type: double mongodbatlas.disk.partition.usage.max: enabled: true description: Disk partition usage (%) @@ -680,6 +703,14 @@ metrics: attributes: [disk_status] gauge: value_type: double + mongodbatlas.disk.partition.queue.depth: + enabled: false + description: Disk queue depth + extended_documentation: Aggregate of MongoDB Metrics DISK_QUEUE_DEPTH + unit: "1" + attributes: [] + gauge: + value_type: double mongodbatlas.db.size: enabled: true description: Database feature size