Skip to content

Commit

Permalink
feat: do not send any histogram or summary metric (#3818)
Browse files Browse the repository at this point in the history
* feat: do not send any histogram data

Signed-off-by: Dominik Rosiek <[email protected]>

* tests: remove not expected bucket metrics

Signed-off-by: Dominik Rosiek <[email protected]>

* feat: drop summary metrics

Signed-off-by: Dominik Rosiek <[email protected]>

* Update .changelog/3818.changed.txt

* test: update test for prometheus

Signed-off-by: Dominik Rosiek <[email protected]>

* chore: remove _bucket metrics

Signed-off-by: Dominik Rosiek <[email protected]>

---------

Signed-off-by: Dominik Rosiek <[email protected]>
  • Loading branch information
sumo-drosiek authored Aug 2, 2024
1 parent 6704458 commit b6c474c
Show file tree
Hide file tree
Showing 15 changed files with 35 additions and 33 deletions.
1 change: 1 addition & 0 deletions .changelog/3818.changed.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
feat: do not send any histogram or summary metric
2 changes: 1 addition & 1 deletion deploy/helm/sumologic/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ The following table lists the configurable parameters of the Sumo Logic chart an
| `sumologic.metrics.collector.otelcol.config.merge` | Configuration for otelcol metrics collector, merged with defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} |
| `sumologic.metrics.collector.otelcol.config.override` | Configuration for otelcol metrics collector, replaces defaults. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/configuration.md. | {} |
| `sumologic.metrics.collector.otelcol.targetAllocator.resources` | Resource requests and limits for Metrics Collector Target Allocator. | {} |
| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from select high-cardinality histogram metrics, leaving only the sum and count components. | `true` |
| `sumologic.metrics.dropHistogramBuckets` | Drop buckets from histogram and summary metrics, leaving only the sum and count components. | `true` |
| `sumologic.metrics.sourceType` | The type of the Sumo Logic source being used for metrics ingestion. Can be `http` or `otlp`. | `otlp` |
| `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `true` |
| `sumologic.traces.spans_per_request` | Maximum number of spans sent in single batch | `100` |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ processors:
metric_statements:
- context: metric
statements:
- extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
- extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
{{- end }}

receivers:
Expand Down
2 changes: 1 addition & 1 deletion deploy/helm/sumologic/conf/metrics/otelcol/processors.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ filter/drop_unnecessary_metrics:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
{{- if .Values.sumologic.metrics.dropHistogramBuckets }}
# drop histograms we've extracted sums and counts from, but don't want the full thing
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
{{- end }}

# Prometheus receiver puts all labels in record-level attributes, and we need them in resource
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ data:
metrics:
metric:
- resource.attributes["job"] != "pod-annotations" and IsMatch(name, "scrape_.*")
- IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- type == METRIC_DATA_TYPE_HISTOGRAM or type == METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM
or type == METRIC_DATA_TYPE_SUMMARY or IsMatch(name, ".*_bucket")
groupbyattrs:
keys:
- container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,10 @@ spec:
metric_statements:
- context: metric
statements:
- extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type
== METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
- extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type
== METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
receivers:
prometheus:
config:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,10 @@ spec:
metric_statements:
- context: metric
statements:
- extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type
== METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
- extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type
== METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
receivers:
prometheus:
config:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,10 @@ spec:
metric_statements:
- context: metric
statements:
- extract_sum_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_count_metric(true) where IsMatch(name, "^(apiserver_request_duration_seconds|coredns_dns_request_duration_seconds|kubelet_runtime_operations_duration_seconds)$")
- extract_sum_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type
== METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
- extract_count_metric(true) where type == METRIC_DATA_TYPE_HISTOGRAM or type
== METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM or type == METRIC_DATA_TYPE_SUMMARY
receivers:
prometheus:
config:
Expand Down
20 changes: 4 additions & 16 deletions tests/integration/internal/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,10 @@ var (
"kubelet_runtime_operations_duration_seconds_sum",
}
KubeSchedulerMetrics = []string{
"scheduler_scheduling_algorithm_duration_seconds_count", // not used by any App
"scheduler_scheduling_algorithm_duration_seconds_sum", // used by Kubernetes - Control Plane
"scheduler_scheduling_algorithm_duration_seconds_bucket", // not used by any App
"scheduler_framework_extension_point_duration_seconds_bucket", // not used by any App, probably will be used by Kubernetes - Control Plane
"scheduler_framework_extension_point_duration_seconds_count", // not used by any App, probably will be used by Kubernetes - Control Plane
"scheduler_framework_extension_point_duration_seconds_sum", // not used by any App, probably will be used by Kubernetes - Control Plane
"scheduler_scheduling_algorithm_duration_seconds_count", // not used by any App
"scheduler_scheduling_algorithm_duration_seconds_sum", // used by Kubernetes - Control Plane
"scheduler_framework_extension_point_duration_seconds_count", // not used by any App, probably will be used by Kubernetes - Control Plane
"scheduler_framework_extension_point_duration_seconds_sum", // not used by any App, probably will be used by Kubernetes - Control Plane
}
KubeApiServerMetrics = []string{
"apiserver_request_total", // used by Kubernetes - Control Plane
Expand All @@ -146,10 +144,8 @@ var (
"etcd_mvcc_db_total_size_in_bytes", // not used by any App
"etcd_debugging_store_expires_total",
"etcd_debugging_store_watchers",
"etcd_disk_backend_commit_duration_seconds_bucket",
"etcd_disk_backend_commit_duration_seconds_count",
"etcd_disk_backend_commit_duration_seconds_sum",
"etcd_disk_wal_fsync_duration_seconds_bucket",
"etcd_disk_wal_fsync_duration_seconds_count",
"etcd_disk_wal_fsync_duration_seconds_sum",
"etcd_grpc_proxy_cache_hits_total",
Expand Down Expand Up @@ -237,12 +233,10 @@ var (
"otelcol_otelsvc_k8s_pod_table_size",
"otelcol_otelsvc_k8s_pod_updated",
"otelcol_processor_accepted_metric_points",
"otelcol_processor_batch_batch_send_size_bucket",
"otelcol_processor_batch_batch_send_size_count",
"otelcol_processor_batch_batch_send_size_sum",
"otelcol_processor_batch_timeout_trigger_send",
"otelcol_processor_dropped_metric_points",
"otelcol_processor_groupbyattrs_metric_groups_bucket",
"otelcol_processor_groupbyattrs_metric_groups_count",
"otelcol_processor_groupbyattrs_metric_groups_sum",
"otelcol_processor_groupbyattrs_num_non_grouped_metrics",
Expand All @@ -262,7 +256,6 @@ var (
"otelcol_processor_refused_log_records",
"otelcol_processor_dropped_log_records",
"otelcol_processor_groupbyattrs_num_grouped_logs",
"otelcol_processor_groupbyattrs_log_groups_bucket",
"otelcol_processor_groupbyattrs_log_groups_count",
"otelcol_processor_groupbyattrs_log_groups_sum",
"otelcol_fileconsumer_reading_files",
Expand Down Expand Up @@ -300,7 +293,6 @@ var (
"prometheus_remote_storage_samples_pending",
"prometheus_remote_storage_samples_retried_total",
"prometheus_remote_storage_samples_total",
"prometheus_remote_storage_sent_batch_duration_seconds_bucket",
"prometheus_remote_storage_sent_batch_duration_seconds_count",
"prometheus_remote_storage_sent_batch_duration_seconds_sum",
"prometheus_remote_storage_shard_capacity",
Expand All @@ -322,7 +314,6 @@ var (
"otelcol_http_server_response_content_length",
"otelcol_http_server_request_content_length",
"otelcol_http_server_duration_count",
"otelcol_http_server_duration_bucket",
"otelcol_processor_batch_batch_size_trigger_send",
"otelcol_processor_filter_datapoints_filtered",
"otelcol_otelsvc_k8s_ip_lookup_miss",
Expand All @@ -338,10 +329,8 @@ var (
// scheduler_scheduling_attempt_duration_seconds is present for K8s >=1.23
"scheduler_e2e_scheduling_duration_seconds_count",
"scheduler_e2e_scheduling_duration_seconds_sum",
"scheduler_e2e_scheduling_duration_seconds_bucket",
"scheduler_scheduling_attempt_duration_seconds_count",
"scheduler_scheduling_attempt_duration_seconds_sum",
"scheduler_scheduling_attempt_duration_seconds_bucket",
"cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile",
"cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile",
"target_info",
Expand Down Expand Up @@ -376,7 +365,6 @@ var (
},
after: []string{
"coredns_proxy_request_duration_seconds_count",
"coredns_proxy_request_duration_seconds_bucket",
"coredns_proxy_request_duration_seconds_sum",
},
},
Expand Down

0 comments on commit b6c474c

Please sign in to comment.